dmabuf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * Copyright 2017 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Zhiyuan Lv <zhiyuan.lv@intel.com>
  25. *
  26. * Contributors:
  27. * Xiaoguang Chen
  28. * Tina Zhang <tina.zhang@intel.com>
  29. */
  30. #include <linux/dma-buf.h>
  31. #include <drm/drmP.h>
  32. #include <linux/vfio.h>
  33. #include "i915_drv.h"
  34. #include "gvt.h"
  35. #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
  36. static int vgpu_gem_get_pages(
  37. struct drm_i915_gem_object *obj)
  38. {
  39. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  40. struct sg_table *st;
  41. struct scatterlist *sg;
  42. int i, ret;
  43. gen8_pte_t __iomem *gtt_entries;
  44. struct intel_vgpu_fb_info *fb_info;
  45. fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
  46. if (WARN_ON(!fb_info))
  47. return -ENODEV;
  48. st = kmalloc(sizeof(*st), GFP_KERNEL);
  49. if (unlikely(!st))
  50. return -ENOMEM;
  51. ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
  52. if (ret) {
  53. kfree(st);
  54. return ret;
  55. }
  56. gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
  57. (fb_info->start >> PAGE_SHIFT);
  58. for_each_sg(st->sgl, sg, fb_info->size, i) {
  59. sg->offset = 0;
  60. sg->length = PAGE_SIZE;
  61. sg_dma_address(sg) =
  62. GEN8_DECODE_PTE(readq(&gtt_entries[i]));
  63. sg_dma_len(sg) = PAGE_SIZE;
  64. }
  65. __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
  66. return 0;
  67. }
  68. static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
  69. struct sg_table *pages)
  70. {
  71. sg_free_table(pages);
  72. kfree(pages);
  73. }
  74. static void dmabuf_gem_object_free(struct kref *kref)
  75. {
  76. struct intel_vgpu_dmabuf_obj *obj =
  77. container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
  78. struct intel_vgpu *vgpu = obj->vgpu;
  79. struct list_head *pos;
  80. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  81. if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
  82. list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
  83. dmabuf_obj = container_of(pos,
  84. struct intel_vgpu_dmabuf_obj, list);
  85. if (dmabuf_obj == obj) {
  86. intel_gvt_hypervisor_put_vfio_device(vgpu);
  87. idr_remove(&vgpu->object_idr,
  88. dmabuf_obj->dmabuf_id);
  89. kfree(dmabuf_obj->info);
  90. kfree(dmabuf_obj);
  91. list_del(pos);
  92. break;
  93. }
  94. }
  95. } else {
  96. /* Free the orphan dmabuf_objs here */
  97. kfree(obj->info);
  98. kfree(obj);
  99. }
  100. }
  101. static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
  102. {
  103. kref_get(&obj->kref);
  104. }
  105. static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
  106. {
  107. kref_put(&obj->kref, dmabuf_gem_object_free);
  108. }
  109. static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
  110. {
  111. struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
  112. struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
  113. struct intel_vgpu *vgpu = obj->vgpu;
  114. if (vgpu) {
  115. mutex_lock(&vgpu->dmabuf_lock);
  116. gem_obj->base.dma_buf = NULL;
  117. dmabuf_obj_put(obj);
  118. mutex_unlock(&vgpu->dmabuf_lock);
  119. } else {
  120. /* vgpu is NULL, as it has been removed already */
  121. gem_obj->base.dma_buf = NULL;
  122. dmabuf_obj_put(obj);
  123. }
  124. }
  125. static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
  126. .flags = I915_GEM_OBJECT_IS_PROXY,
  127. .get_pages = vgpu_gem_get_pages,
  128. .put_pages = vgpu_gem_put_pages,
  129. .release = vgpu_gem_release,
  130. };
  131. static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
  132. struct intel_vgpu_fb_info *info)
  133. {
  134. struct drm_i915_private *dev_priv = to_i915(dev);
  135. struct drm_i915_gem_object *obj;
  136. obj = i915_gem_object_alloc(dev_priv);
  137. if (obj == NULL)
  138. return NULL;
  139. drm_gem_private_object_init(dev, &obj->base,
  140. info->size << PAGE_SHIFT);
  141. i915_gem_object_init(obj, &intel_vgpu_gem_ops);
  142. obj->read_domains = I915_GEM_DOMAIN_GTT;
  143. obj->write_domain = 0;
  144. if (IS_SKYLAKE(dev_priv)
  145. || IS_KABYLAKE(dev_priv)
  146. || IS_BROXTON(dev_priv)) {
  147. unsigned int tiling_mode = 0;
  148. unsigned int stride = 0;
  149. switch (info->drm_format_mod) {
  150. case DRM_FORMAT_MOD_LINEAR:
  151. tiling_mode = I915_TILING_NONE;
  152. break;
  153. case I915_FORMAT_MOD_X_TILED:
  154. tiling_mode = I915_TILING_X;
  155. stride = info->stride;
  156. break;
  157. case I915_FORMAT_MOD_Y_TILED:
  158. case I915_FORMAT_MOD_Yf_TILED:
  159. tiling_mode = I915_TILING_Y;
  160. stride = info->stride;
  161. break;
  162. default:
  163. gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
  164. info->drm_format_mod);
  165. }
  166. obj->tiling_and_stride = tiling_mode | stride;
  167. } else {
  168. obj->tiling_and_stride = info->drm_format_mod ?
  169. I915_TILING_X : 0;
  170. }
  171. return obj;
  172. }
  173. static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
  174. {
  175. if (c && c->x_hot <= c->width && c->y_hot <= c->height)
  176. return true;
  177. else
  178. return false;
  179. }
  180. static int vgpu_get_plane_info(struct drm_device *dev,
  181. struct intel_vgpu *vgpu,
  182. struct intel_vgpu_fb_info *info,
  183. int plane_id)
  184. {
  185. struct drm_i915_private *dev_priv = to_i915(dev);
  186. struct intel_vgpu_primary_plane_format p;
  187. struct intel_vgpu_cursor_plane_format c;
  188. int ret;
  189. if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
  190. ret = intel_vgpu_decode_primary_plane(vgpu, &p);
  191. if (ret)
  192. return ret;
  193. info->start = p.base;
  194. info->start_gpa = p.base_gpa;
  195. info->width = p.width;
  196. info->height = p.height;
  197. info->stride = p.stride;
  198. info->drm_format = p.drm_format;
  199. switch (p.tiled) {
  200. case PLANE_CTL_TILED_LINEAR:
  201. info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
  202. break;
  203. case PLANE_CTL_TILED_X:
  204. info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
  205. break;
  206. case PLANE_CTL_TILED_Y:
  207. info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
  208. break;
  209. case PLANE_CTL_TILED_YF:
  210. info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
  211. break;
  212. default:
  213. gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
  214. }
  215. info->size = (((p.stride * p.height * p.bpp) / 8) +
  216. (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  217. } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
  218. ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
  219. if (ret)
  220. return ret;
  221. info->start = c.base;
  222. info->start_gpa = c.base_gpa;
  223. info->width = c.width;
  224. info->height = c.height;
  225. info->stride = c.width * (c.bpp / 8);
  226. info->drm_format = c.drm_format;
  227. info->drm_format_mod = 0;
  228. info->x_pos = c.x_pos;
  229. info->y_pos = c.y_pos;
  230. if (validate_hotspot(&c)) {
  231. info->x_hot = c.x_hot;
  232. info->y_hot = c.y_hot;
  233. } else {
  234. info->x_hot = UINT_MAX;
  235. info->y_hot = UINT_MAX;
  236. }
  237. info->size = (((info->stride * c.height * c.bpp) / 8)
  238. + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  239. } else {
  240. gvt_vgpu_err("invalid plane id:%d\n", plane_id);
  241. return -EINVAL;
  242. }
  243. if (info->size == 0) {
  244. gvt_vgpu_err("fb size is zero\n");
  245. return -EINVAL;
  246. }
  247. if (info->start & (PAGE_SIZE - 1)) {
  248. gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
  249. return -EFAULT;
  250. }
  251. if (((info->start >> PAGE_SHIFT) + info->size) >
  252. ggtt_total_entries(&dev_priv->ggtt)) {
  253. gvt_vgpu_err("Invalid GTT offset or size\n");
  254. return -EFAULT;
  255. }
  256. if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
  257. gvt_vgpu_err("invalid gma addr\n");
  258. return -EFAULT;
  259. }
  260. return 0;
  261. }
  262. static struct intel_vgpu_dmabuf_obj *
  263. pick_dmabuf_by_info(struct intel_vgpu *vgpu,
  264. struct intel_vgpu_fb_info *latest_info)
  265. {
  266. struct list_head *pos;
  267. struct intel_vgpu_fb_info *fb_info;
  268. struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
  269. struct intel_vgpu_dmabuf_obj *ret = NULL;
  270. list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
  271. dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
  272. list);
  273. if ((dmabuf_obj == NULL) ||
  274. (dmabuf_obj->info == NULL))
  275. continue;
  276. fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
  277. if ((fb_info->start == latest_info->start) &&
  278. (fb_info->start_gpa == latest_info->start_gpa) &&
  279. (fb_info->size == latest_info->size) &&
  280. (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
  281. (fb_info->drm_format == latest_info->drm_format) &&
  282. (fb_info->width == latest_info->width) &&
  283. (fb_info->height == latest_info->height)) {
  284. ret = dmabuf_obj;
  285. break;
  286. }
  287. }
  288. return ret;
  289. }
  290. static struct intel_vgpu_dmabuf_obj *
  291. pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
  292. {
  293. struct list_head *pos;
  294. struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
  295. struct intel_vgpu_dmabuf_obj *ret = NULL;
  296. list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
  297. dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
  298. list);
  299. if (!dmabuf_obj)
  300. continue;
  301. if (dmabuf_obj->dmabuf_id == id) {
  302. ret = dmabuf_obj;
  303. break;
  304. }
  305. }
  306. return ret;
  307. }
  308. static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
  309. struct intel_vgpu_fb_info *fb_info)
  310. {
  311. gvt_dmabuf->drm_format = fb_info->drm_format;
  312. gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
  313. gvt_dmabuf->width = fb_info->width;
  314. gvt_dmabuf->height = fb_info->height;
  315. gvt_dmabuf->stride = fb_info->stride;
  316. gvt_dmabuf->size = fb_info->size;
  317. gvt_dmabuf->x_pos = fb_info->x_pos;
  318. gvt_dmabuf->y_pos = fb_info->y_pos;
  319. gvt_dmabuf->x_hot = fb_info->x_hot;
  320. gvt_dmabuf->y_hot = fb_info->y_hot;
  321. }
  322. int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
  323. {
  324. struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
  325. struct vfio_device_gfx_plane_info *gfx_plane_info = args;
  326. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  327. struct intel_vgpu_fb_info fb_info;
  328. int ret = 0;
  329. if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
  330. VFIO_GFX_PLANE_TYPE_PROBE))
  331. return ret;
  332. else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
  333. (!gfx_plane_info->flags))
  334. return -EINVAL;
  335. ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
  336. gfx_plane_info->drm_plane_type);
  337. if (ret != 0)
  338. goto out;
  339. mutex_lock(&vgpu->dmabuf_lock);
  340. /* If exists, pick up the exposed dmabuf_obj */
  341. dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
  342. if (dmabuf_obj) {
  343. update_fb_info(gfx_plane_info, &fb_info);
  344. gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
  345. /* This buffer may be released between query_plane ioctl and
  346. * get_dmabuf ioctl. Add the refcount to make sure it won't
  347. * be released between the two ioctls.
  348. */
  349. if (!dmabuf_obj->initref) {
  350. dmabuf_obj->initref = true;
  351. dmabuf_obj_get(dmabuf_obj);
  352. }
  353. ret = 0;
  354. gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
  355. vgpu->id, kref_read(&dmabuf_obj->kref),
  356. gfx_plane_info->dmabuf_id);
  357. mutex_unlock(&vgpu->dmabuf_lock);
  358. goto out;
  359. }
  360. mutex_unlock(&vgpu->dmabuf_lock);
  361. /* Need to allocate a new one*/
  362. dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
  363. if (unlikely(!dmabuf_obj)) {
  364. gvt_vgpu_err("alloc dmabuf_obj failed\n");
  365. ret = -ENOMEM;
  366. goto out;
  367. }
  368. dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
  369. GFP_KERNEL);
  370. if (unlikely(!dmabuf_obj->info)) {
  371. gvt_vgpu_err("allocate intel vgpu fb info failed\n");
  372. ret = -ENOMEM;
  373. goto out_free_dmabuf;
  374. }
  375. memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
  376. ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
  377. dmabuf_obj->vgpu = vgpu;
  378. ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
  379. if (ret < 0)
  380. goto out_free_info;
  381. gfx_plane_info->dmabuf_id = ret;
  382. dmabuf_obj->dmabuf_id = ret;
  383. dmabuf_obj->initref = true;
  384. kref_init(&dmabuf_obj->kref);
  385. mutex_lock(&vgpu->dmabuf_lock);
  386. if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
  387. gvt_vgpu_err("get vfio device failed\n");
  388. mutex_unlock(&vgpu->dmabuf_lock);
  389. goto out_free_info;
  390. }
  391. mutex_unlock(&vgpu->dmabuf_lock);
  392. update_fb_info(gfx_plane_info, &fb_info);
  393. INIT_LIST_HEAD(&dmabuf_obj->list);
  394. mutex_lock(&vgpu->dmabuf_lock);
  395. list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
  396. mutex_unlock(&vgpu->dmabuf_lock);
  397. gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
  398. __func__, kref_read(&dmabuf_obj->kref), ret);
  399. return 0;
  400. out_free_info:
  401. kfree(dmabuf_obj->info);
  402. out_free_dmabuf:
  403. kfree(dmabuf_obj);
  404. out:
  405. /* ENODEV means plane isn't ready, which might be a normal case. */
  406. return (ret == -ENODEV) ? 0 : ret;
  407. }
  408. /* To associate an exposed dmabuf with the dmabuf_obj */
  409. int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
  410. {
  411. struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
  412. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  413. struct drm_i915_gem_object *obj;
  414. struct dma_buf *dmabuf;
  415. int dmabuf_fd;
  416. int ret = 0;
  417. mutex_lock(&vgpu->dmabuf_lock);
  418. dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
  419. if (dmabuf_obj == NULL) {
  420. gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
  421. ret = -EINVAL;
  422. goto out;
  423. }
  424. obj = vgpu_create_gem(dev, dmabuf_obj->info);
  425. if (obj == NULL) {
  426. gvt_vgpu_err("create gvt gem obj failed\n");
  427. ret = -ENOMEM;
  428. goto out;
  429. }
  430. obj->gvt_info = dmabuf_obj->info;
  431. dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
  432. if (IS_ERR(dmabuf)) {
  433. gvt_vgpu_err("export dma-buf failed\n");
  434. ret = PTR_ERR(dmabuf);
  435. goto out_free_gem;
  436. }
  437. i915_gem_object_put(obj);
  438. ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
  439. if (ret < 0) {
  440. gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
  441. goto out_free_dmabuf;
  442. }
  443. dmabuf_fd = ret;
  444. dmabuf_obj_get(dmabuf_obj);
  445. if (dmabuf_obj->initref) {
  446. dmabuf_obj->initref = false;
  447. dmabuf_obj_put(dmabuf_obj);
  448. }
  449. mutex_unlock(&vgpu->dmabuf_lock);
  450. gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
  451. " file count: %ld, GEM ref: %d\n",
  452. vgpu->id, dmabuf_obj->dmabuf_id,
  453. kref_read(&dmabuf_obj->kref),
  454. dmabuf_fd,
  455. file_count(dmabuf->file),
  456. kref_read(&obj->base.refcount));
  457. return dmabuf_fd;
  458. out_free_dmabuf:
  459. dma_buf_put(dmabuf);
  460. out_free_gem:
  461. i915_gem_object_put(obj);
  462. out:
  463. mutex_unlock(&vgpu->dmabuf_lock);
  464. return ret;
  465. }
  466. void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
  467. {
  468. struct list_head *pos, *n;
  469. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  470. mutex_lock(&vgpu->dmabuf_lock);
  471. list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
  472. dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
  473. list);
  474. dmabuf_obj->vgpu = NULL;
  475. idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
  476. intel_gvt_hypervisor_put_vfio_device(vgpu);
  477. list_del(pos);
  478. /* dmabuf_obj might be freed in dmabuf_obj_put */
  479. if (dmabuf_obj->initref) {
  480. dmabuf_obj->initref = false;
  481. dmabuf_obj_put(dmabuf_obj);
  482. }
  483. }
  484. mutex_unlock(&vgpu->dmabuf_lock);
  485. }