dmabuf.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Copyright 2017 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Zhiyuan Lv <zhiyuan.lv@intel.com>
  25. *
  26. * Contributors:
  27. * Xiaoguang Chen
  28. * Tina Zhang <tina.zhang@intel.com>
  29. */
  30. #include <linux/dma-buf.h>
  31. #include <drm/drmP.h>
  32. #include <linux/vfio.h>
  33. #include "i915_drv.h"
  34. #include "gvt.h"
  35. #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
  36. static int vgpu_gem_get_pages(
  37. struct drm_i915_gem_object *obj)
  38. {
  39. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  40. struct sg_table *st;
  41. struct scatterlist *sg;
  42. int i, ret;
  43. gen8_pte_t __iomem *gtt_entries;
  44. struct intel_vgpu_fb_info *fb_info;
  45. fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
  46. if (WARN_ON(!fb_info))
  47. return -ENODEV;
  48. st = kmalloc(sizeof(*st), GFP_KERNEL);
  49. if (unlikely(!st))
  50. return -ENOMEM;
  51. ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
  52. if (ret) {
  53. kfree(st);
  54. return ret;
  55. }
  56. gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
  57. (fb_info->start >> PAGE_SHIFT);
  58. for_each_sg(st->sgl, sg, fb_info->size, i) {
  59. sg->offset = 0;
  60. sg->length = PAGE_SIZE;
  61. sg_dma_address(sg) =
  62. GEN8_DECODE_PTE(readq(&gtt_entries[i]));
  63. sg_dma_len(sg) = PAGE_SIZE;
  64. }
  65. __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
  66. return 0;
  67. }
  68. static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
  69. struct sg_table *pages)
  70. {
  71. sg_free_table(pages);
  72. kfree(pages);
  73. }
  74. static void dmabuf_gem_object_free(struct kref *kref)
  75. {
  76. struct intel_vgpu_dmabuf_obj *obj =
  77. container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
  78. struct intel_vgpu *vgpu = obj->vgpu;
  79. struct list_head *pos;
  80. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  81. if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
  82. list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
  83. dmabuf_obj = container_of(pos,
  84. struct intel_vgpu_dmabuf_obj, list);
  85. if (dmabuf_obj == obj) {
  86. intel_gvt_hypervisor_put_vfio_device(vgpu);
  87. idr_remove(&vgpu->object_idr,
  88. dmabuf_obj->dmabuf_id);
  89. kfree(dmabuf_obj->info);
  90. kfree(dmabuf_obj);
  91. list_del(pos);
  92. break;
  93. }
  94. }
  95. } else {
  96. /* Free the orphan dmabuf_objs here */
  97. kfree(obj->info);
  98. kfree(obj);
  99. }
  100. }
  101. static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
  102. {
  103. kref_get(&obj->kref);
  104. }
  105. static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
  106. {
  107. kref_put(&obj->kref, dmabuf_gem_object_free);
  108. }
  109. static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
  110. {
  111. struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
  112. struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
  113. struct intel_vgpu *vgpu = obj->vgpu;
  114. if (vgpu) {
  115. mutex_lock(&vgpu->dmabuf_lock);
  116. gem_obj->base.dma_buf = NULL;
  117. dmabuf_obj_put(obj);
  118. mutex_unlock(&vgpu->dmabuf_lock);
  119. } else {
  120. /* vgpu is NULL, as it has been removed already */
  121. gem_obj->base.dma_buf = NULL;
  122. dmabuf_obj_put(obj);
  123. }
  124. }
  125. static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
  126. .flags = I915_GEM_OBJECT_IS_PROXY,
  127. .get_pages = vgpu_gem_get_pages,
  128. .put_pages = vgpu_gem_put_pages,
  129. .release = vgpu_gem_release,
  130. };
  131. static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
  132. struct intel_vgpu_fb_info *info)
  133. {
  134. struct drm_i915_private *dev_priv = to_i915(dev);
  135. struct drm_i915_gem_object *obj;
  136. obj = i915_gem_object_alloc(dev_priv);
  137. if (obj == NULL)
  138. return NULL;
  139. drm_gem_private_object_init(dev, &obj->base,
  140. info->size << PAGE_SHIFT);
  141. i915_gem_object_init(obj, &intel_vgpu_gem_ops);
  142. obj->read_domains = I915_GEM_DOMAIN_GTT;
  143. obj->write_domain = 0;
  144. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  145. unsigned int tiling_mode = 0;
  146. unsigned int stride = 0;
  147. switch (info->drm_format_mod << 10) {
  148. case PLANE_CTL_TILED_LINEAR:
  149. tiling_mode = I915_TILING_NONE;
  150. break;
  151. case PLANE_CTL_TILED_X:
  152. tiling_mode = I915_TILING_X;
  153. stride = info->stride;
  154. break;
  155. case PLANE_CTL_TILED_Y:
  156. tiling_mode = I915_TILING_Y;
  157. stride = info->stride;
  158. break;
  159. default:
  160. gvt_dbg_core("not supported tiling mode\n");
  161. }
  162. obj->tiling_and_stride = tiling_mode | stride;
  163. } else {
  164. obj->tiling_and_stride = info->drm_format_mod ?
  165. I915_TILING_X : 0;
  166. }
  167. return obj;
  168. }
  169. static int vgpu_get_plane_info(struct drm_device *dev,
  170. struct intel_vgpu *vgpu,
  171. struct intel_vgpu_fb_info *info,
  172. int plane_id)
  173. {
  174. struct drm_i915_private *dev_priv = to_i915(dev);
  175. struct intel_vgpu_primary_plane_format p;
  176. struct intel_vgpu_cursor_plane_format c;
  177. int ret;
  178. if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
  179. ret = intel_vgpu_decode_primary_plane(vgpu, &p);
  180. if (ret)
  181. return ret;
  182. info->start = p.base;
  183. info->start_gpa = p.base_gpa;
  184. info->width = p.width;
  185. info->height = p.height;
  186. info->stride = p.stride;
  187. info->drm_format = p.drm_format;
  188. info->drm_format_mod = p.tiled;
  189. info->size = (((p.stride * p.height * p.bpp) / 8) +
  190. (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  191. } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
  192. ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
  193. if (ret)
  194. return ret;
  195. info->start = c.base;
  196. info->start_gpa = c.base_gpa;
  197. info->width = c.width;
  198. info->height = c.height;
  199. info->stride = c.width * (c.bpp / 8);
  200. info->drm_format = c.drm_format;
  201. info->drm_format_mod = 0;
  202. info->x_pos = c.x_pos;
  203. info->y_pos = c.y_pos;
  204. /* The invalid cursor hotspot value is delivered to host
  205. * until we find a way to get the cursor hotspot info of
  206. * guest OS.
  207. */
  208. info->x_hot = UINT_MAX;
  209. info->y_hot = UINT_MAX;
  210. info->size = (((info->stride * c.height * c.bpp) / 8)
  211. + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  212. } else {
  213. gvt_vgpu_err("invalid plane id:%d\n", plane_id);
  214. return -EINVAL;
  215. }
  216. if (info->size == 0) {
  217. gvt_vgpu_err("fb size is zero\n");
  218. return -EINVAL;
  219. }
  220. if (info->start & (PAGE_SIZE - 1)) {
  221. gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
  222. return -EFAULT;
  223. }
  224. if (((info->start >> PAGE_SHIFT) + info->size) >
  225. ggtt_total_entries(&dev_priv->ggtt)) {
  226. gvt_vgpu_err("Invalid GTT offset or size\n");
  227. return -EFAULT;
  228. }
  229. if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
  230. gvt_vgpu_err("invalid gma addr\n");
  231. return -EFAULT;
  232. }
  233. return 0;
  234. }
  235. static struct intel_vgpu_dmabuf_obj *
  236. pick_dmabuf_by_info(struct intel_vgpu *vgpu,
  237. struct intel_vgpu_fb_info *latest_info)
  238. {
  239. struct list_head *pos;
  240. struct intel_vgpu_fb_info *fb_info;
  241. struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
  242. struct intel_vgpu_dmabuf_obj *ret = NULL;
  243. list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
  244. dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
  245. list);
  246. if ((dmabuf_obj == NULL) ||
  247. (dmabuf_obj->info == NULL))
  248. continue;
  249. fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
  250. if ((fb_info->start == latest_info->start) &&
  251. (fb_info->start_gpa == latest_info->start_gpa) &&
  252. (fb_info->size == latest_info->size) &&
  253. (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
  254. (fb_info->drm_format == latest_info->drm_format) &&
  255. (fb_info->width == latest_info->width) &&
  256. (fb_info->height == latest_info->height)) {
  257. ret = dmabuf_obj;
  258. break;
  259. }
  260. }
  261. return ret;
  262. }
  263. static struct intel_vgpu_dmabuf_obj *
  264. pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
  265. {
  266. struct list_head *pos;
  267. struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
  268. struct intel_vgpu_dmabuf_obj *ret = NULL;
  269. list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
  270. dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
  271. list);
  272. if (!dmabuf_obj)
  273. continue;
  274. if (dmabuf_obj->dmabuf_id == id) {
  275. ret = dmabuf_obj;
  276. break;
  277. }
  278. }
  279. return ret;
  280. }
  281. static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
  282. struct intel_vgpu_fb_info *fb_info)
  283. {
  284. gvt_dmabuf->drm_format = fb_info->drm_format;
  285. gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
  286. gvt_dmabuf->width = fb_info->width;
  287. gvt_dmabuf->height = fb_info->height;
  288. gvt_dmabuf->stride = fb_info->stride;
  289. gvt_dmabuf->size = fb_info->size;
  290. gvt_dmabuf->x_pos = fb_info->x_pos;
  291. gvt_dmabuf->y_pos = fb_info->y_pos;
  292. gvt_dmabuf->x_hot = fb_info->x_hot;
  293. gvt_dmabuf->y_hot = fb_info->y_hot;
  294. }
  295. int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
  296. {
  297. struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
  298. struct vfio_device_gfx_plane_info *gfx_plane_info = args;
  299. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  300. struct intel_vgpu_fb_info fb_info;
  301. int ret = 0;
  302. if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
  303. VFIO_GFX_PLANE_TYPE_PROBE))
  304. return ret;
  305. else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
  306. (!gfx_plane_info->flags))
  307. return -EINVAL;
  308. ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
  309. gfx_plane_info->drm_plane_type);
  310. if (ret != 0)
  311. goto out;
  312. mutex_lock(&vgpu->dmabuf_lock);
  313. /* If exists, pick up the exposed dmabuf_obj */
  314. dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
  315. if (dmabuf_obj) {
  316. update_fb_info(gfx_plane_info, &fb_info);
  317. gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
  318. /* This buffer may be released between query_plane ioctl and
  319. * get_dmabuf ioctl. Add the refcount to make sure it won't
  320. * be released between the two ioctls.
  321. */
  322. if (!dmabuf_obj->initref) {
  323. dmabuf_obj->initref = true;
  324. dmabuf_obj_get(dmabuf_obj);
  325. }
  326. ret = 0;
  327. gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
  328. vgpu->id, kref_read(&dmabuf_obj->kref),
  329. gfx_plane_info->dmabuf_id);
  330. mutex_unlock(&vgpu->dmabuf_lock);
  331. goto out;
  332. }
  333. mutex_unlock(&vgpu->dmabuf_lock);
  334. /* Need to allocate a new one*/
  335. dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
  336. if (unlikely(!dmabuf_obj)) {
  337. gvt_vgpu_err("alloc dmabuf_obj failed\n");
  338. ret = -ENOMEM;
  339. goto out;
  340. }
  341. dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
  342. GFP_KERNEL);
  343. if (unlikely(!dmabuf_obj->info)) {
  344. gvt_vgpu_err("allocate intel vgpu fb info failed\n");
  345. ret = -ENOMEM;
  346. goto out_free_dmabuf;
  347. }
  348. memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
  349. ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
  350. dmabuf_obj->vgpu = vgpu;
  351. ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
  352. if (ret < 0)
  353. goto out_free_info;
  354. gfx_plane_info->dmabuf_id = ret;
  355. dmabuf_obj->dmabuf_id = ret;
  356. dmabuf_obj->initref = true;
  357. kref_init(&dmabuf_obj->kref);
  358. mutex_lock(&vgpu->dmabuf_lock);
  359. if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
  360. gvt_vgpu_err("get vfio device failed\n");
  361. mutex_unlock(&vgpu->dmabuf_lock);
  362. goto out_free_info;
  363. }
  364. mutex_unlock(&vgpu->dmabuf_lock);
  365. update_fb_info(gfx_plane_info, &fb_info);
  366. INIT_LIST_HEAD(&dmabuf_obj->list);
  367. mutex_lock(&vgpu->dmabuf_lock);
  368. list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
  369. mutex_unlock(&vgpu->dmabuf_lock);
  370. gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
  371. __func__, kref_read(&dmabuf_obj->kref), ret);
  372. return 0;
  373. out_free_info:
  374. kfree(dmabuf_obj->info);
  375. out_free_dmabuf:
  376. kfree(dmabuf_obj);
  377. out:
  378. /* ENODEV means plane isn't ready, which might be a normal case. */
  379. return (ret == -ENODEV) ? 0 : ret;
  380. }
  381. /* To associate an exposed dmabuf with the dmabuf_obj */
  382. int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
  383. {
  384. struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
  385. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  386. struct drm_i915_gem_object *obj;
  387. struct dma_buf *dmabuf;
  388. int dmabuf_fd;
  389. int ret = 0;
  390. mutex_lock(&vgpu->dmabuf_lock);
  391. dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
  392. if (dmabuf_obj == NULL) {
  393. gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
  394. ret = -EINVAL;
  395. goto out;
  396. }
  397. obj = vgpu_create_gem(dev, dmabuf_obj->info);
  398. if (obj == NULL) {
  399. gvt_vgpu_err("create gvt gem obj failed\n");
  400. ret = -ENOMEM;
  401. goto out;
  402. }
  403. obj->gvt_info = dmabuf_obj->info;
  404. dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
  405. if (IS_ERR(dmabuf)) {
  406. gvt_vgpu_err("export dma-buf failed\n");
  407. ret = PTR_ERR(dmabuf);
  408. goto out_free_gem;
  409. }
  410. i915_gem_object_put(obj);
  411. ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
  412. if (ret < 0) {
  413. gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
  414. goto out_free_dmabuf;
  415. }
  416. dmabuf_fd = ret;
  417. dmabuf_obj_get(dmabuf_obj);
  418. if (dmabuf_obj->initref) {
  419. dmabuf_obj->initref = false;
  420. dmabuf_obj_put(dmabuf_obj);
  421. }
  422. mutex_unlock(&vgpu->dmabuf_lock);
  423. gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
  424. " file count: %ld, GEM ref: %d\n",
  425. vgpu->id, dmabuf_obj->dmabuf_id,
  426. kref_read(&dmabuf_obj->kref),
  427. dmabuf_fd,
  428. file_count(dmabuf->file),
  429. kref_read(&obj->base.refcount));
  430. return dmabuf_fd;
  431. out_free_dmabuf:
  432. dma_buf_put(dmabuf);
  433. out_free_gem:
  434. i915_gem_object_put(obj);
  435. out:
  436. mutex_unlock(&vgpu->dmabuf_lock);
  437. return ret;
  438. }
  439. void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
  440. {
  441. struct list_head *pos, *n;
  442. struct intel_vgpu_dmabuf_obj *dmabuf_obj;
  443. mutex_lock(&vgpu->dmabuf_lock);
  444. list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
  445. dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
  446. list);
  447. dmabuf_obj->vgpu = NULL;
  448. idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
  449. intel_gvt_hypervisor_put_vfio_device(vgpu);
  450. list_del(pos);
  451. /* dmabuf_obj might be freed in dmabuf_obj_put */
  452. if (dmabuf_obj->initref) {
  453. dmabuf_obj->initref = false;
  454. dmabuf_obj_put(dmabuf_obj);
  455. }
  456. }
  457. mutex_unlock(&vgpu->dmabuf_lock);
  458. }