vmwgfx_context.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include "ttm/ttm_placement.h"
  30. struct vmw_user_context {
  31. struct ttm_base_object base;
  32. struct vmw_resource res;
  33. struct vmw_ctx_binding_state cbs;
  34. };
  35. typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
  36. static void vmw_user_context_free(struct vmw_resource *res);
  37. static struct vmw_resource *
  38. vmw_user_context_base_to_res(struct ttm_base_object *base);
  39. static int vmw_gb_context_create(struct vmw_resource *res);
  40. static int vmw_gb_context_bind(struct vmw_resource *res,
  41. struct ttm_validate_buffer *val_buf);
  42. static int vmw_gb_context_unbind(struct vmw_resource *res,
  43. bool readback,
  44. struct ttm_validate_buffer *val_buf);
  45. static int vmw_gb_context_destroy(struct vmw_resource *res);
  46. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
  47. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  48. bool rebind);
  49. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
  50. static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
  51. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
  52. static uint64_t vmw_user_context_size;
  53. static const struct vmw_user_resource_conv user_context_conv = {
  54. .object_type = VMW_RES_CONTEXT,
  55. .base_obj_to_res = vmw_user_context_base_to_res,
  56. .res_free = vmw_user_context_free
  57. };
  58. const struct vmw_user_resource_conv *user_context_converter =
  59. &user_context_conv;
  60. static const struct vmw_res_func vmw_legacy_context_func = {
  61. .res_type = vmw_res_context,
  62. .needs_backup = false,
  63. .may_evict = false,
  64. .type_name = "legacy contexts",
  65. .backup_placement = NULL,
  66. .create = NULL,
  67. .destroy = NULL,
  68. .bind = NULL,
  69. .unbind = NULL
  70. };
  71. static const struct vmw_res_func vmw_gb_context_func = {
  72. .res_type = vmw_res_context,
  73. .needs_backup = true,
  74. .may_evict = true,
  75. .type_name = "guest backed contexts",
  76. .backup_placement = &vmw_mob_placement,
  77. .create = vmw_gb_context_create,
  78. .destroy = vmw_gb_context_destroy,
  79. .bind = vmw_gb_context_bind,
  80. .unbind = vmw_gb_context_unbind
  81. };
  82. static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
  83. [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
  84. [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
  85. [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
  86. /**
  87. * Context management:
  88. */
  89. static void vmw_hw_context_destroy(struct vmw_resource *res)
  90. {
  91. struct vmw_private *dev_priv = res->dev_priv;
  92. struct {
  93. SVGA3dCmdHeader header;
  94. SVGA3dCmdDestroyContext body;
  95. } *cmd;
  96. if (res->func->destroy == vmw_gb_context_destroy) {
  97. mutex_lock(&dev_priv->cmdbuf_mutex);
  98. mutex_lock(&dev_priv->binding_mutex);
  99. (void) vmw_context_binding_state_kill
  100. (&container_of(res, struct vmw_user_context, res)->cbs);
  101. (void) vmw_gb_context_destroy(res);
  102. mutex_unlock(&dev_priv->binding_mutex);
  103. if (dev_priv->pinned_bo != NULL &&
  104. !dev_priv->query_cid_valid)
  105. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  106. mutex_unlock(&dev_priv->cmdbuf_mutex);
  107. return;
  108. }
  109. vmw_execbuf_release_pinned_bo(dev_priv);
  110. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  111. if (unlikely(cmd == NULL)) {
  112. DRM_ERROR("Failed reserving FIFO space for surface "
  113. "destruction.\n");
  114. return;
  115. }
  116. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  117. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  118. cmd->body.cid = cpu_to_le32(res->id);
  119. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  120. vmw_3d_resource_dec(dev_priv, false);
  121. }
  122. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  123. struct vmw_resource *res,
  124. void (*res_free) (struct vmw_resource *res))
  125. {
  126. int ret;
  127. struct vmw_user_context *uctx =
  128. container_of(res, struct vmw_user_context, res);
  129. ret = vmw_resource_init(dev_priv, res, true,
  130. res_free, &vmw_gb_context_func);
  131. res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
  132. if (unlikely(ret != 0)) {
  133. if (res_free)
  134. res_free(res);
  135. else
  136. kfree(res);
  137. return ret;
  138. }
  139. memset(&uctx->cbs, 0, sizeof(uctx->cbs));
  140. INIT_LIST_HEAD(&uctx->cbs.list);
  141. vmw_resource_activate(res, vmw_hw_context_destroy);
  142. return 0;
  143. }
  144. static int vmw_context_init(struct vmw_private *dev_priv,
  145. struct vmw_resource *res,
  146. void (*res_free) (struct vmw_resource *res))
  147. {
  148. int ret;
  149. struct {
  150. SVGA3dCmdHeader header;
  151. SVGA3dCmdDefineContext body;
  152. } *cmd;
  153. if (dev_priv->has_mob)
  154. return vmw_gb_context_init(dev_priv, res, res_free);
  155. ret = vmw_resource_init(dev_priv, res, false,
  156. res_free, &vmw_legacy_context_func);
  157. if (unlikely(ret != 0)) {
  158. DRM_ERROR("Failed to allocate a resource id.\n");
  159. goto out_early;
  160. }
  161. if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  162. DRM_ERROR("Out of hw context ids.\n");
  163. vmw_resource_unreference(&res);
  164. return -ENOMEM;
  165. }
  166. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  167. if (unlikely(cmd == NULL)) {
  168. DRM_ERROR("Fifo reserve failed.\n");
  169. vmw_resource_unreference(&res);
  170. return -ENOMEM;
  171. }
  172. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  173. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  174. cmd->body.cid = cpu_to_le32(res->id);
  175. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  176. (void) vmw_3d_resource_inc(dev_priv, false);
  177. vmw_resource_activate(res, vmw_hw_context_destroy);
  178. return 0;
  179. out_early:
  180. if (res_free == NULL)
  181. kfree(res);
  182. else
  183. res_free(res);
  184. return ret;
  185. }
  186. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  187. {
  188. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  189. int ret;
  190. if (unlikely(res == NULL))
  191. return NULL;
  192. ret = vmw_context_init(dev_priv, res, NULL);
  193. return (ret == 0) ? res : NULL;
  194. }
  195. static int vmw_gb_context_create(struct vmw_resource *res)
  196. {
  197. struct vmw_private *dev_priv = res->dev_priv;
  198. int ret;
  199. struct {
  200. SVGA3dCmdHeader header;
  201. SVGA3dCmdDefineGBContext body;
  202. } *cmd;
  203. if (likely(res->id != -1))
  204. return 0;
  205. ret = vmw_resource_alloc_id(res);
  206. if (unlikely(ret != 0)) {
  207. DRM_ERROR("Failed to allocate a context id.\n");
  208. goto out_no_id;
  209. }
  210. if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  211. ret = -EBUSY;
  212. goto out_no_fifo;
  213. }
  214. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  215. if (unlikely(cmd == NULL)) {
  216. DRM_ERROR("Failed reserving FIFO space for context "
  217. "creation.\n");
  218. ret = -ENOMEM;
  219. goto out_no_fifo;
  220. }
  221. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  222. cmd->header.size = sizeof(cmd->body);
  223. cmd->body.cid = res->id;
  224. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  225. (void) vmw_3d_resource_inc(dev_priv, false);
  226. return 0;
  227. out_no_fifo:
  228. vmw_resource_release_id(res);
  229. out_no_id:
  230. return ret;
  231. }
  232. static int vmw_gb_context_bind(struct vmw_resource *res,
  233. struct ttm_validate_buffer *val_buf)
  234. {
  235. struct vmw_private *dev_priv = res->dev_priv;
  236. struct {
  237. SVGA3dCmdHeader header;
  238. SVGA3dCmdBindGBContext body;
  239. } *cmd;
  240. struct ttm_buffer_object *bo = val_buf->bo;
  241. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  242. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  243. if (unlikely(cmd == NULL)) {
  244. DRM_ERROR("Failed reserving FIFO space for context "
  245. "binding.\n");
  246. return -ENOMEM;
  247. }
  248. cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  249. cmd->header.size = sizeof(cmd->body);
  250. cmd->body.cid = res->id;
  251. cmd->body.mobid = bo->mem.start;
  252. cmd->body.validContents = res->backup_dirty;
  253. res->backup_dirty = false;
  254. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  255. return 0;
  256. }
  257. static int vmw_gb_context_unbind(struct vmw_resource *res,
  258. bool readback,
  259. struct ttm_validate_buffer *val_buf)
  260. {
  261. struct vmw_private *dev_priv = res->dev_priv;
  262. struct ttm_buffer_object *bo = val_buf->bo;
  263. struct vmw_fence_obj *fence;
  264. struct vmw_user_context *uctx =
  265. container_of(res, struct vmw_user_context, res);
  266. struct {
  267. SVGA3dCmdHeader header;
  268. SVGA3dCmdReadbackGBContext body;
  269. } *cmd1;
  270. struct {
  271. SVGA3dCmdHeader header;
  272. SVGA3dCmdBindGBContext body;
  273. } *cmd2;
  274. uint32_t submit_size;
  275. uint8_t *cmd;
  276. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  277. mutex_lock(&dev_priv->binding_mutex);
  278. vmw_context_binding_state_scrub(&uctx->cbs);
  279. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  280. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  281. if (unlikely(cmd == NULL)) {
  282. DRM_ERROR("Failed reserving FIFO space for context "
  283. "unbinding.\n");
  284. mutex_unlock(&dev_priv->binding_mutex);
  285. return -ENOMEM;
  286. }
  287. cmd2 = (void *) cmd;
  288. if (readback) {
  289. cmd1 = (void *) cmd;
  290. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  291. cmd1->header.size = sizeof(cmd1->body);
  292. cmd1->body.cid = res->id;
  293. cmd2 = (void *) (&cmd1[1]);
  294. }
  295. cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  296. cmd2->header.size = sizeof(cmd2->body);
  297. cmd2->body.cid = res->id;
  298. cmd2->body.mobid = SVGA3D_INVALID_ID;
  299. vmw_fifo_commit(dev_priv, submit_size);
  300. mutex_unlock(&dev_priv->binding_mutex);
  301. /*
  302. * Create a fence object and fence the backup buffer.
  303. */
  304. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  305. &fence, NULL);
  306. vmw_fence_single_bo(bo, fence);
  307. if (likely(fence != NULL))
  308. vmw_fence_obj_unreference(&fence);
  309. return 0;
  310. }
  311. static int vmw_gb_context_destroy(struct vmw_resource *res)
  312. {
  313. struct vmw_private *dev_priv = res->dev_priv;
  314. struct {
  315. SVGA3dCmdHeader header;
  316. SVGA3dCmdDestroyGBContext body;
  317. } *cmd;
  318. if (likely(res->id == -1))
  319. return 0;
  320. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  321. if (unlikely(cmd == NULL)) {
  322. DRM_ERROR("Failed reserving FIFO space for context "
  323. "destruction.\n");
  324. return -ENOMEM;
  325. }
  326. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  327. cmd->header.size = sizeof(cmd->body);
  328. cmd->body.cid = res->id;
  329. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  330. if (dev_priv->query_cid == res->id)
  331. dev_priv->query_cid_valid = false;
  332. vmw_resource_release_id(res);
  333. vmw_3d_resource_dec(dev_priv, false);
  334. return 0;
  335. }
  336. /**
  337. * User-space context management:
  338. */
  339. static struct vmw_resource *
  340. vmw_user_context_base_to_res(struct ttm_base_object *base)
  341. {
  342. return &(container_of(base, struct vmw_user_context, base)->res);
  343. }
  344. static void vmw_user_context_free(struct vmw_resource *res)
  345. {
  346. struct vmw_user_context *ctx =
  347. container_of(res, struct vmw_user_context, res);
  348. struct vmw_private *dev_priv = res->dev_priv;
  349. ttm_base_object_kfree(ctx, base);
  350. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  351. vmw_user_context_size);
  352. }
  353. /**
  354. * This function is called when user space has no more references on the
  355. * base object. It releases the base-object's reference on the resource object.
  356. */
  357. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  358. {
  359. struct ttm_base_object *base = *p_base;
  360. struct vmw_user_context *ctx =
  361. container_of(base, struct vmw_user_context, base);
  362. struct vmw_resource *res = &ctx->res;
  363. *p_base = NULL;
  364. vmw_resource_unreference(&res);
  365. }
  366. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  367. struct drm_file *file_priv)
  368. {
  369. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  370. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  371. return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
  372. }
  373. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  374. struct drm_file *file_priv)
  375. {
  376. struct vmw_private *dev_priv = vmw_priv(dev);
  377. struct vmw_user_context *ctx;
  378. struct vmw_resource *res;
  379. struct vmw_resource *tmp;
  380. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  381. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  382. int ret;
  383. /*
  384. * Approximate idr memory usage with 128 bytes. It will be limited
  385. * by maximum number_of contexts anyway.
  386. */
  387. if (unlikely(vmw_user_context_size == 0))
  388. vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
  389. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  390. if (unlikely(ret != 0))
  391. return ret;
  392. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  393. vmw_user_context_size,
  394. false, true);
  395. if (unlikely(ret != 0)) {
  396. if (ret != -ERESTARTSYS)
  397. DRM_ERROR("Out of graphics memory for context"
  398. " creation.\n");
  399. goto out_unlock;
  400. }
  401. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  402. if (unlikely(ctx == NULL)) {
  403. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  404. vmw_user_context_size);
  405. ret = -ENOMEM;
  406. goto out_unlock;
  407. }
  408. res = &ctx->res;
  409. ctx->base.shareable = false;
  410. ctx->base.tfile = NULL;
  411. /*
  412. * From here on, the destructor takes over resource freeing.
  413. */
  414. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  415. if (unlikely(ret != 0))
  416. goto out_unlock;
  417. tmp = vmw_resource_reference(&ctx->res);
  418. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  419. &vmw_user_context_base_release, NULL);
  420. if (unlikely(ret != 0)) {
  421. vmw_resource_unreference(&tmp);
  422. goto out_err;
  423. }
  424. arg->cid = ctx->base.hash.key;
  425. out_err:
  426. vmw_resource_unreference(&res);
  427. out_unlock:
  428. ttm_read_unlock(&dev_priv->reservation_sem);
  429. return ret;
  430. }
  431. /**
  432. * vmw_context_scrub_shader - scrub a shader binding from a context.
  433. *
  434. * @bi: single binding information.
  435. * @rebind: Whether to issue a bind instead of scrub command.
  436. */
  437. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  438. {
  439. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  440. struct {
  441. SVGA3dCmdHeader header;
  442. SVGA3dCmdSetShader body;
  443. } *cmd;
  444. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  445. if (unlikely(cmd == NULL)) {
  446. DRM_ERROR("Failed reserving FIFO space for shader "
  447. "unbinding.\n");
  448. return -ENOMEM;
  449. }
  450. cmd->header.id = SVGA_3D_CMD_SET_SHADER;
  451. cmd->header.size = sizeof(cmd->body);
  452. cmd->body.cid = bi->ctx->id;
  453. cmd->body.type = bi->i1.shader_type;
  454. cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  455. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  456. return 0;
  457. }
  458. /**
  459. * vmw_context_scrub_render_target - scrub a render target binding
  460. * from a context.
  461. *
  462. * @bi: single binding information.
  463. * @rebind: Whether to issue a bind instead of scrub command.
  464. */
  465. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  466. bool rebind)
  467. {
  468. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  469. struct {
  470. SVGA3dCmdHeader header;
  471. SVGA3dCmdSetRenderTarget body;
  472. } *cmd;
  473. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  474. if (unlikely(cmd == NULL)) {
  475. DRM_ERROR("Failed reserving FIFO space for render target "
  476. "unbinding.\n");
  477. return -ENOMEM;
  478. }
  479. cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
  480. cmd->header.size = sizeof(cmd->body);
  481. cmd->body.cid = bi->ctx->id;
  482. cmd->body.type = bi->i1.rt_type;
  483. cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  484. cmd->body.target.face = 0;
  485. cmd->body.target.mipmap = 0;
  486. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  487. return 0;
  488. }
  489. /**
  490. * vmw_context_scrub_texture - scrub a texture binding from a context.
  491. *
  492. * @bi: single binding information.
  493. * @rebind: Whether to issue a bind instead of scrub command.
  494. *
  495. * TODO: Possibly complement this function with a function that takes
  496. * a list of texture bindings and combines them to a single command.
  497. */
  498. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
  499. bool rebind)
  500. {
  501. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  502. struct {
  503. SVGA3dCmdHeader header;
  504. struct {
  505. SVGA3dCmdSetTextureState c;
  506. SVGA3dTextureState s1;
  507. } body;
  508. } *cmd;
  509. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  510. if (unlikely(cmd == NULL)) {
  511. DRM_ERROR("Failed reserving FIFO space for texture "
  512. "unbinding.\n");
  513. return -ENOMEM;
  514. }
  515. cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
  516. cmd->header.size = sizeof(cmd->body);
  517. cmd->body.c.cid = bi->ctx->id;
  518. cmd->body.s1.stage = bi->i1.texture_stage;
  519. cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
  520. cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  521. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  522. return 0;
  523. }
  524. /**
  525. * vmw_context_binding_drop: Stop tracking a context binding
  526. *
  527. * @cb: Pointer to binding tracker storage.
  528. *
  529. * Stops tracking a context binding, and re-initializes its storage.
  530. * Typically used when the context binding is replaced with a binding to
  531. * another (or the same, for that matter) resource.
  532. */
  533. static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
  534. {
  535. list_del(&cb->ctx_list);
  536. if (!list_empty(&cb->res_list))
  537. list_del(&cb->res_list);
  538. cb->bi.ctx = NULL;
  539. }
  540. /**
  541. * vmw_context_binding_add: Start tracking a context binding
  542. *
  543. * @cbs: Pointer to the context binding state tracker.
  544. * @bi: Information about the binding to track.
  545. *
  546. * Performs basic checks on the binding to make sure arguments are within
  547. * bounds and then starts tracking the binding in the context binding
  548. * state structure @cbs.
  549. */
  550. int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
  551. const struct vmw_ctx_bindinfo *bi)
  552. {
  553. struct vmw_ctx_binding *loc;
  554. switch (bi->bt) {
  555. case vmw_ctx_binding_rt:
  556. if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
  557. DRM_ERROR("Illegal render target type %u.\n",
  558. (unsigned) bi->i1.rt_type);
  559. return -EINVAL;
  560. }
  561. loc = &cbs->render_targets[bi->i1.rt_type];
  562. break;
  563. case vmw_ctx_binding_tex:
  564. if (unlikely((unsigned)bi->i1.texture_stage >=
  565. SVGA3D_NUM_TEXTURE_UNITS)) {
  566. DRM_ERROR("Illegal texture/sampler unit %u.\n",
  567. (unsigned) bi->i1.texture_stage);
  568. return -EINVAL;
  569. }
  570. loc = &cbs->texture_units[bi->i1.texture_stage];
  571. break;
  572. case vmw_ctx_binding_shader:
  573. if (unlikely((unsigned)bi->i1.shader_type >=
  574. SVGA3D_SHADERTYPE_MAX)) {
  575. DRM_ERROR("Illegal shader type %u.\n",
  576. (unsigned) bi->i1.shader_type);
  577. return -EINVAL;
  578. }
  579. loc = &cbs->shaders[bi->i1.shader_type];
  580. break;
  581. default:
  582. BUG();
  583. }
  584. if (loc->bi.ctx != NULL)
  585. vmw_context_binding_drop(loc);
  586. loc->bi = *bi;
  587. loc->bi.scrubbed = false;
  588. list_add_tail(&loc->ctx_list, &cbs->list);
  589. INIT_LIST_HEAD(&loc->res_list);
  590. return 0;
  591. }
  592. /**
  593. * vmw_context_binding_transfer: Transfer a context binding tracking entry.
  594. *
  595. * @cbs: Pointer to the persistent context binding state tracker.
  596. * @bi: Information about the binding to track.
  597. *
  598. */
  599. static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
  600. const struct vmw_ctx_bindinfo *bi)
  601. {
  602. struct vmw_ctx_binding *loc;
  603. switch (bi->bt) {
  604. case vmw_ctx_binding_rt:
  605. loc = &cbs->render_targets[bi->i1.rt_type];
  606. break;
  607. case vmw_ctx_binding_tex:
  608. loc = &cbs->texture_units[bi->i1.texture_stage];
  609. break;
  610. case vmw_ctx_binding_shader:
  611. loc = &cbs->shaders[bi->i1.shader_type];
  612. break;
  613. default:
  614. BUG();
  615. }
  616. if (loc->bi.ctx != NULL)
  617. vmw_context_binding_drop(loc);
  618. if (bi->res != NULL) {
  619. loc->bi = *bi;
  620. list_add_tail(&loc->ctx_list, &cbs->list);
  621. list_add_tail(&loc->res_list, &bi->res->binding_head);
  622. }
  623. }
  624. /**
  625. * vmw_context_binding_kill - Kill a binding on the device
  626. * and stop tracking it.
  627. *
  628. * @cb: Pointer to binding tracker storage.
  629. *
  630. * Emits FIFO commands to scrub a binding represented by @cb.
  631. * Then stops tracking the binding and re-initializes its storage.
  632. */
  633. static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
  634. {
  635. if (!cb->bi.scrubbed) {
  636. (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
  637. cb->bi.scrubbed = true;
  638. }
  639. vmw_context_binding_drop(cb);
  640. }
  641. /**
  642. * vmw_context_binding_state_kill - Kill all bindings associated with a
  643. * struct vmw_ctx_binding state structure, and re-initialize the structure.
  644. *
  645. * @cbs: Pointer to the context binding state tracker.
  646. *
  647. * Emits commands to scrub all bindings associated with the
  648. * context binding state tracker. Then re-initializes the whole structure.
  649. */
  650. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
  651. {
  652. struct vmw_ctx_binding *entry, *next;
  653. list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  654. vmw_context_binding_kill(entry);
  655. }
  656. /**
  657. * vmw_context_binding_state_scrub - Scrub all bindings associated with a
  658. * struct vmw_ctx_binding state structure.
  659. *
  660. * @cbs: Pointer to the context binding state tracker.
  661. *
  662. * Emits commands to scrub all bindings associated with the
  663. * context binding state tracker.
  664. */
  665. static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
  666. {
  667. struct vmw_ctx_binding *entry;
  668. list_for_each_entry(entry, &cbs->list, ctx_list) {
  669. if (!entry->bi.scrubbed) {
  670. (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
  671. entry->bi.scrubbed = true;
  672. }
  673. }
  674. }
  675. /**
  676. * vmw_context_binding_res_list_kill - Kill all bindings on a
  677. * resource binding list
  678. *
  679. * @head: list head of resource binding list
  680. *
  681. * Kills all bindings associated with a specific resource. Typically
  682. * called before the resource is destroyed.
  683. */
  684. void vmw_context_binding_res_list_kill(struct list_head *head)
  685. {
  686. struct vmw_ctx_binding *entry, *next;
  687. list_for_each_entry_safe(entry, next, head, res_list)
  688. vmw_context_binding_kill(entry);
  689. }
  690. /**
  691. * vmw_context_binding_res_list_scrub - Scrub all bindings on a
  692. * resource binding list
  693. *
  694. * @head: list head of resource binding list
  695. *
  696. * Scrub all bindings associated with a specific resource. Typically
  697. * called before the resource is evicted.
  698. */
  699. void vmw_context_binding_res_list_scrub(struct list_head *head)
  700. {
  701. struct vmw_ctx_binding *entry;
  702. list_for_each_entry(entry, head, res_list) {
  703. if (!entry->bi.scrubbed) {
  704. (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
  705. entry->bi.scrubbed = true;
  706. }
  707. }
  708. }
  709. /**
  710. * vmw_context_binding_state_transfer - Commit staged binding info
  711. *
  712. * @ctx: Pointer to context to commit the staged binding info to.
  713. * @from: Staged binding info built during execbuf.
  714. *
  715. * Transfers binding info from a temporary structure to the persistent
  716. * structure in the context. This can be done once commands
  717. */
  718. void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
  719. struct vmw_ctx_binding_state *from)
  720. {
  721. struct vmw_user_context *uctx =
  722. container_of(ctx, struct vmw_user_context, res);
  723. struct vmw_ctx_binding *entry, *next;
  724. list_for_each_entry_safe(entry, next, &from->list, ctx_list)
  725. vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
  726. }
  727. /**
  728. * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
  729. *
  730. * @ctx: The context resource
  731. *
  732. * Walks through the context binding list and rebinds all scrubbed
  733. * resources.
  734. */
  735. int vmw_context_rebind_all(struct vmw_resource *ctx)
  736. {
  737. struct vmw_ctx_binding *entry;
  738. struct vmw_user_context *uctx =
  739. container_of(ctx, struct vmw_user_context, res);
  740. struct vmw_ctx_binding_state *cbs = &uctx->cbs;
  741. int ret;
  742. list_for_each_entry(entry, &cbs->list, ctx_list) {
  743. if (likely(!entry->bi.scrubbed))
  744. continue;
  745. if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
  746. SVGA3D_INVALID_ID))
  747. continue;
  748. ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
  749. if (unlikely(ret != 0))
  750. return ret;
  751. entry->bi.scrubbed = false;
  752. }
  753. return 0;
  754. }
  755. /**
  756. * vmw_context_binding_list - Return a list of context bindings
  757. *
  758. * @ctx: The context resource
  759. *
  760. * Returns the current list of bindings of the given context. Note that
  761. * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  762. */
  763. struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
  764. {
  765. return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
  766. }