vmwgfx_context.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include "ttm/ttm_placement.h"
  30. struct vmw_user_context {
  31. struct ttm_base_object base;
  32. struct vmw_resource res;
  33. struct vmw_ctx_binding_state cbs;
  34. };
  35. typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
  36. static void vmw_user_context_free(struct vmw_resource *res);
  37. static struct vmw_resource *
  38. vmw_user_context_base_to_res(struct ttm_base_object *base);
  39. static int vmw_gb_context_create(struct vmw_resource *res);
  40. static int vmw_gb_context_bind(struct vmw_resource *res,
  41. struct ttm_validate_buffer *val_buf);
  42. static int vmw_gb_context_unbind(struct vmw_resource *res,
  43. bool readback,
  44. struct ttm_validate_buffer *val_buf);
  45. static int vmw_gb_context_destroy(struct vmw_resource *res);
  46. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
  47. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  48. bool rebind);
  49. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
  50. static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
  51. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
  52. static uint64_t vmw_user_context_size;
  53. static const struct vmw_user_resource_conv user_context_conv = {
  54. .object_type = VMW_RES_CONTEXT,
  55. .base_obj_to_res = vmw_user_context_base_to_res,
  56. .res_free = vmw_user_context_free
  57. };
  58. const struct vmw_user_resource_conv *user_context_converter =
  59. &user_context_conv;
  60. static const struct vmw_res_func vmw_legacy_context_func = {
  61. .res_type = vmw_res_context,
  62. .needs_backup = false,
  63. .may_evict = false,
  64. .type_name = "legacy contexts",
  65. .backup_placement = NULL,
  66. .create = NULL,
  67. .destroy = NULL,
  68. .bind = NULL,
  69. .unbind = NULL
  70. };
  71. static const struct vmw_res_func vmw_gb_context_func = {
  72. .res_type = vmw_res_context,
  73. .needs_backup = true,
  74. .may_evict = true,
  75. .type_name = "guest backed contexts",
  76. .backup_placement = &vmw_mob_placement,
  77. .create = vmw_gb_context_create,
  78. .destroy = vmw_gb_context_destroy,
  79. .bind = vmw_gb_context_bind,
  80. .unbind = vmw_gb_context_unbind
  81. };
  82. static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
  83. [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
  84. [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
  85. [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
  86. /**
  87. * Context management:
  88. */
  89. static void vmw_hw_context_destroy(struct vmw_resource *res)
  90. {
  91. struct vmw_private *dev_priv = res->dev_priv;
  92. struct {
  93. SVGA3dCmdHeader header;
  94. SVGA3dCmdDestroyContext body;
  95. } *cmd;
  96. if (res->func->destroy == vmw_gb_context_destroy) {
  97. mutex_lock(&dev_priv->cmdbuf_mutex);
  98. mutex_lock(&dev_priv->binding_mutex);
  99. (void) vmw_context_binding_state_kill
  100. (&container_of(res, struct vmw_user_context, res)->cbs);
  101. (void) vmw_gb_context_destroy(res);
  102. if (dev_priv->pinned_bo != NULL &&
  103. !dev_priv->query_cid_valid)
  104. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  105. mutex_unlock(&dev_priv->binding_mutex);
  106. mutex_unlock(&dev_priv->cmdbuf_mutex);
  107. return;
  108. }
  109. vmw_execbuf_release_pinned_bo(dev_priv);
  110. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  111. if (unlikely(cmd == NULL)) {
  112. DRM_ERROR("Failed reserving FIFO space for surface "
  113. "destruction.\n");
  114. return;
  115. }
  116. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  117. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  118. cmd->body.cid = cpu_to_le32(res->id);
  119. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  120. vmw_3d_resource_dec(dev_priv, false);
  121. }
  122. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  123. struct vmw_resource *res,
  124. void (*res_free) (struct vmw_resource *res))
  125. {
  126. int ret;
  127. struct vmw_user_context *uctx =
  128. container_of(res, struct vmw_user_context, res);
  129. ret = vmw_resource_init(dev_priv, res, true,
  130. res_free, &vmw_gb_context_func);
  131. res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
  132. if (unlikely(ret != 0)) {
  133. if (res_free)
  134. res_free(res);
  135. else
  136. kfree(res);
  137. return ret;
  138. }
  139. memset(&uctx->cbs, 0, sizeof(uctx->cbs));
  140. INIT_LIST_HEAD(&uctx->cbs.list);
  141. vmw_resource_activate(res, vmw_hw_context_destroy);
  142. return 0;
  143. }
  144. static int vmw_context_init(struct vmw_private *dev_priv,
  145. struct vmw_resource *res,
  146. void (*res_free) (struct vmw_resource *res))
  147. {
  148. int ret;
  149. struct {
  150. SVGA3dCmdHeader header;
  151. SVGA3dCmdDefineContext body;
  152. } *cmd;
  153. if (dev_priv->has_mob)
  154. return vmw_gb_context_init(dev_priv, res, res_free);
  155. ret = vmw_resource_init(dev_priv, res, false,
  156. res_free, &vmw_legacy_context_func);
  157. if (unlikely(ret != 0)) {
  158. DRM_ERROR("Failed to allocate a resource id.\n");
  159. goto out_early;
  160. }
  161. if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  162. DRM_ERROR("Out of hw context ids.\n");
  163. vmw_resource_unreference(&res);
  164. return -ENOMEM;
  165. }
  166. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  167. if (unlikely(cmd == NULL)) {
  168. DRM_ERROR("Fifo reserve failed.\n");
  169. vmw_resource_unreference(&res);
  170. return -ENOMEM;
  171. }
  172. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  173. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  174. cmd->body.cid = cpu_to_le32(res->id);
  175. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  176. (void) vmw_3d_resource_inc(dev_priv, false);
  177. vmw_resource_activate(res, vmw_hw_context_destroy);
  178. return 0;
  179. out_early:
  180. if (res_free == NULL)
  181. kfree(res);
  182. else
  183. res_free(res);
  184. return ret;
  185. }
  186. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  187. {
  188. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  189. int ret;
  190. if (unlikely(res == NULL))
  191. return NULL;
  192. ret = vmw_context_init(dev_priv, res, NULL);
  193. return (ret == 0) ? res : NULL;
  194. }
  195. static int vmw_gb_context_create(struct vmw_resource *res)
  196. {
  197. struct vmw_private *dev_priv = res->dev_priv;
  198. int ret;
  199. struct {
  200. SVGA3dCmdHeader header;
  201. SVGA3dCmdDefineGBContext body;
  202. } *cmd;
  203. if (likely(res->id != -1))
  204. return 0;
  205. ret = vmw_resource_alloc_id(res);
  206. if (unlikely(ret != 0)) {
  207. DRM_ERROR("Failed to allocate a context id.\n");
  208. goto out_no_id;
  209. }
  210. if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  211. ret = -EBUSY;
  212. goto out_no_fifo;
  213. }
  214. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  215. if (unlikely(cmd == NULL)) {
  216. DRM_ERROR("Failed reserving FIFO space for context "
  217. "creation.\n");
  218. ret = -ENOMEM;
  219. goto out_no_fifo;
  220. }
  221. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  222. cmd->header.size = sizeof(cmd->body);
  223. cmd->body.cid = res->id;
  224. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  225. (void) vmw_3d_resource_inc(dev_priv, false);
  226. return 0;
  227. out_no_fifo:
  228. vmw_resource_release_id(res);
  229. out_no_id:
  230. return ret;
  231. }
  232. static int vmw_gb_context_bind(struct vmw_resource *res,
  233. struct ttm_validate_buffer *val_buf)
  234. {
  235. struct vmw_private *dev_priv = res->dev_priv;
  236. struct {
  237. SVGA3dCmdHeader header;
  238. SVGA3dCmdBindGBContext body;
  239. } *cmd;
  240. struct ttm_buffer_object *bo = val_buf->bo;
  241. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  242. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  243. if (unlikely(cmd == NULL)) {
  244. DRM_ERROR("Failed reserving FIFO space for context "
  245. "binding.\n");
  246. return -ENOMEM;
  247. }
  248. cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  249. cmd->header.size = sizeof(cmd->body);
  250. cmd->body.cid = res->id;
  251. cmd->body.mobid = bo->mem.start;
  252. cmd->body.validContents = res->backup_dirty;
  253. res->backup_dirty = false;
  254. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  255. return 0;
  256. }
  257. static int vmw_gb_context_unbind(struct vmw_resource *res,
  258. bool readback,
  259. struct ttm_validate_buffer *val_buf)
  260. {
  261. struct vmw_private *dev_priv = res->dev_priv;
  262. struct ttm_buffer_object *bo = val_buf->bo;
  263. struct vmw_fence_obj *fence;
  264. struct vmw_user_context *uctx =
  265. container_of(res, struct vmw_user_context, res);
  266. struct {
  267. SVGA3dCmdHeader header;
  268. SVGA3dCmdReadbackGBContext body;
  269. } *cmd1;
  270. struct {
  271. SVGA3dCmdHeader header;
  272. SVGA3dCmdBindGBContext body;
  273. } *cmd2;
  274. uint32_t submit_size;
  275. uint8_t *cmd;
  276. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  277. mutex_lock(&dev_priv->binding_mutex);
  278. vmw_context_binding_state_scrub(&uctx->cbs);
  279. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  280. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  281. if (unlikely(cmd == NULL)) {
  282. DRM_ERROR("Failed reserving FIFO space for context "
  283. "unbinding.\n");
  284. mutex_unlock(&dev_priv->binding_mutex);
  285. return -ENOMEM;
  286. }
  287. cmd2 = (void *) cmd;
  288. if (readback) {
  289. cmd1 = (void *) cmd;
  290. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  291. cmd1->header.size = sizeof(cmd1->body);
  292. cmd1->body.cid = res->id;
  293. cmd2 = (void *) (&cmd1[1]);
  294. }
  295. cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  296. cmd2->header.size = sizeof(cmd2->body);
  297. cmd2->body.cid = res->id;
  298. cmd2->body.mobid = SVGA3D_INVALID_ID;
  299. vmw_fifo_commit(dev_priv, submit_size);
  300. mutex_unlock(&dev_priv->binding_mutex);
  301. /*
  302. * Create a fence object and fence the backup buffer.
  303. */
  304. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  305. &fence, NULL);
  306. vmw_fence_single_bo(bo, fence);
  307. if (likely(fence != NULL))
  308. vmw_fence_obj_unreference(&fence);
  309. return 0;
  310. }
  311. static int vmw_gb_context_destroy(struct vmw_resource *res)
  312. {
  313. struct vmw_private *dev_priv = res->dev_priv;
  314. struct {
  315. SVGA3dCmdHeader header;
  316. SVGA3dCmdDestroyGBContext body;
  317. } *cmd;
  318. if (likely(res->id == -1))
  319. return 0;
  320. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  321. if (unlikely(cmd == NULL)) {
  322. DRM_ERROR("Failed reserving FIFO space for context "
  323. "destruction.\n");
  324. return -ENOMEM;
  325. }
  326. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  327. cmd->header.size = sizeof(cmd->body);
  328. cmd->body.cid = res->id;
  329. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  330. if (dev_priv->query_cid == res->id)
  331. dev_priv->query_cid_valid = false;
  332. vmw_resource_release_id(res);
  333. vmw_3d_resource_dec(dev_priv, false);
  334. return 0;
  335. }
  336. /**
  337. * User-space context management:
  338. */
  339. static struct vmw_resource *
  340. vmw_user_context_base_to_res(struct ttm_base_object *base)
  341. {
  342. return &(container_of(base, struct vmw_user_context, base)->res);
  343. }
  344. static void vmw_user_context_free(struct vmw_resource *res)
  345. {
  346. struct vmw_user_context *ctx =
  347. container_of(res, struct vmw_user_context, res);
  348. struct vmw_private *dev_priv = res->dev_priv;
  349. ttm_base_object_kfree(ctx, base);
  350. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  351. vmw_user_context_size);
  352. }
  353. /**
  354. * This function is called when user space has no more references on the
  355. * base object. It releases the base-object's reference on the resource object.
  356. */
  357. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  358. {
  359. struct ttm_base_object *base = *p_base;
  360. struct vmw_user_context *ctx =
  361. container_of(base, struct vmw_user_context, base);
  362. struct vmw_resource *res = &ctx->res;
  363. *p_base = NULL;
  364. vmw_resource_unreference(&res);
  365. }
  366. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  367. struct drm_file *file_priv)
  368. {
  369. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  370. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  371. return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
  372. }
  373. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  374. struct drm_file *file_priv)
  375. {
  376. struct vmw_private *dev_priv = vmw_priv(dev);
  377. struct vmw_user_context *ctx;
  378. struct vmw_resource *res;
  379. struct vmw_resource *tmp;
  380. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  381. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  382. struct vmw_master *vmaster = vmw_master(file_priv->master);
  383. int ret;
  384. /*
  385. * Approximate idr memory usage with 128 bytes. It will be limited
  386. * by maximum number_of contexts anyway.
  387. */
  388. if (unlikely(vmw_user_context_size == 0))
  389. vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
  390. ret = ttm_read_lock(&vmaster->lock, true);
  391. if (unlikely(ret != 0))
  392. return ret;
  393. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  394. vmw_user_context_size,
  395. false, true);
  396. if (unlikely(ret != 0)) {
  397. if (ret != -ERESTARTSYS)
  398. DRM_ERROR("Out of graphics memory for context"
  399. " creation.\n");
  400. goto out_unlock;
  401. }
  402. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  403. if (unlikely(ctx == NULL)) {
  404. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  405. vmw_user_context_size);
  406. ret = -ENOMEM;
  407. goto out_unlock;
  408. }
  409. res = &ctx->res;
  410. ctx->base.shareable = false;
  411. ctx->base.tfile = NULL;
  412. /*
  413. * From here on, the destructor takes over resource freeing.
  414. */
  415. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  416. if (unlikely(ret != 0))
  417. goto out_unlock;
  418. tmp = vmw_resource_reference(&ctx->res);
  419. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  420. &vmw_user_context_base_release, NULL);
  421. if (unlikely(ret != 0)) {
  422. vmw_resource_unreference(&tmp);
  423. goto out_err;
  424. }
  425. arg->cid = ctx->base.hash.key;
  426. out_err:
  427. vmw_resource_unreference(&res);
  428. out_unlock:
  429. ttm_read_unlock(&vmaster->lock);
  430. return ret;
  431. }
  432. /**
  433. * vmw_context_scrub_shader - scrub a shader binding from a context.
  434. *
  435. * @bi: single binding information.
  436. * @rebind: Whether to issue a bind instead of scrub command.
  437. */
  438. static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  439. {
  440. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  441. struct {
  442. SVGA3dCmdHeader header;
  443. SVGA3dCmdSetShader body;
  444. } *cmd;
  445. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  446. if (unlikely(cmd == NULL)) {
  447. DRM_ERROR("Failed reserving FIFO space for shader "
  448. "unbinding.\n");
  449. return -ENOMEM;
  450. }
  451. cmd->header.id = SVGA_3D_CMD_SET_SHADER;
  452. cmd->header.size = sizeof(cmd->body);
  453. cmd->body.cid = bi->ctx->id;
  454. cmd->body.type = bi->i1.shader_type;
  455. cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  456. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  457. return 0;
  458. }
  459. /**
  460. * vmw_context_scrub_render_target - scrub a render target binding
  461. * from a context.
  462. *
  463. * @bi: single binding information.
  464. * @rebind: Whether to issue a bind instead of scrub command.
  465. */
  466. static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  467. bool rebind)
  468. {
  469. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  470. struct {
  471. SVGA3dCmdHeader header;
  472. SVGA3dCmdSetRenderTarget body;
  473. } *cmd;
  474. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  475. if (unlikely(cmd == NULL)) {
  476. DRM_ERROR("Failed reserving FIFO space for render target "
  477. "unbinding.\n");
  478. return -ENOMEM;
  479. }
  480. cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
  481. cmd->header.size = sizeof(cmd->body);
  482. cmd->body.cid = bi->ctx->id;
  483. cmd->body.type = bi->i1.rt_type;
  484. cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  485. cmd->body.target.face = 0;
  486. cmd->body.target.mipmap = 0;
  487. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  488. return 0;
  489. }
  490. /**
  491. * vmw_context_scrub_texture - scrub a texture binding from a context.
  492. *
  493. * @bi: single binding information.
  494. * @rebind: Whether to issue a bind instead of scrub command.
  495. *
  496. * TODO: Possibly complement this function with a function that takes
  497. * a list of texture bindings and combines them to a single command.
  498. */
  499. static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
  500. bool rebind)
  501. {
  502. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  503. struct {
  504. SVGA3dCmdHeader header;
  505. struct {
  506. SVGA3dCmdSetTextureState c;
  507. SVGA3dTextureState s1;
  508. } body;
  509. } *cmd;
  510. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  511. if (unlikely(cmd == NULL)) {
  512. DRM_ERROR("Failed reserving FIFO space for texture "
  513. "unbinding.\n");
  514. return -ENOMEM;
  515. }
  516. cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
  517. cmd->header.size = sizeof(cmd->body);
  518. cmd->body.c.cid = bi->ctx->id;
  519. cmd->body.s1.stage = bi->i1.texture_stage;
  520. cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
  521. cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  522. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  523. return 0;
  524. }
  525. /**
  526. * vmw_context_binding_drop: Stop tracking a context binding
  527. *
  528. * @cb: Pointer to binding tracker storage.
  529. *
  530. * Stops tracking a context binding, and re-initializes its storage.
  531. * Typically used when the context binding is replaced with a binding to
  532. * another (or the same, for that matter) resource.
  533. */
  534. static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
  535. {
  536. list_del(&cb->ctx_list);
  537. if (!list_empty(&cb->res_list))
  538. list_del(&cb->res_list);
  539. cb->bi.ctx = NULL;
  540. }
  541. /**
  542. * vmw_context_binding_add: Start tracking a context binding
  543. *
  544. * @cbs: Pointer to the context binding state tracker.
  545. * @bi: Information about the binding to track.
  546. *
  547. * Performs basic checks on the binding to make sure arguments are within
  548. * bounds and then starts tracking the binding in the context binding
  549. * state structure @cbs.
  550. */
  551. int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
  552. const struct vmw_ctx_bindinfo *bi)
  553. {
  554. struct vmw_ctx_binding *loc;
  555. switch (bi->bt) {
  556. case vmw_ctx_binding_rt:
  557. if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
  558. DRM_ERROR("Illegal render target type %u.\n",
  559. (unsigned) bi->i1.rt_type);
  560. return -EINVAL;
  561. }
  562. loc = &cbs->render_targets[bi->i1.rt_type];
  563. break;
  564. case vmw_ctx_binding_tex:
  565. if (unlikely((unsigned)bi->i1.texture_stage >=
  566. SVGA3D_NUM_TEXTURE_UNITS)) {
  567. DRM_ERROR("Illegal texture/sampler unit %u.\n",
  568. (unsigned) bi->i1.texture_stage);
  569. return -EINVAL;
  570. }
  571. loc = &cbs->texture_units[bi->i1.texture_stage];
  572. break;
  573. case vmw_ctx_binding_shader:
  574. if (unlikely((unsigned)bi->i1.shader_type >=
  575. SVGA3D_SHADERTYPE_MAX)) {
  576. DRM_ERROR("Illegal shader type %u.\n",
  577. (unsigned) bi->i1.shader_type);
  578. return -EINVAL;
  579. }
  580. loc = &cbs->shaders[bi->i1.shader_type];
  581. break;
  582. default:
  583. BUG();
  584. }
  585. if (loc->bi.ctx != NULL)
  586. vmw_context_binding_drop(loc);
  587. loc->bi = *bi;
  588. loc->bi.scrubbed = false;
  589. list_add_tail(&loc->ctx_list, &cbs->list);
  590. INIT_LIST_HEAD(&loc->res_list);
  591. return 0;
  592. }
  593. /**
  594. * vmw_context_binding_transfer: Transfer a context binding tracking entry.
  595. *
  596. * @cbs: Pointer to the persistent context binding state tracker.
  597. * @bi: Information about the binding to track.
  598. *
  599. */
  600. static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
  601. const struct vmw_ctx_bindinfo *bi)
  602. {
  603. struct vmw_ctx_binding *loc;
  604. switch (bi->bt) {
  605. case vmw_ctx_binding_rt:
  606. loc = &cbs->render_targets[bi->i1.rt_type];
  607. break;
  608. case vmw_ctx_binding_tex:
  609. loc = &cbs->texture_units[bi->i1.texture_stage];
  610. break;
  611. case vmw_ctx_binding_shader:
  612. loc = &cbs->shaders[bi->i1.shader_type];
  613. break;
  614. default:
  615. BUG();
  616. }
  617. if (loc->bi.ctx != NULL)
  618. vmw_context_binding_drop(loc);
  619. if (bi->res != NULL) {
  620. loc->bi = *bi;
  621. list_add_tail(&loc->ctx_list, &cbs->list);
  622. list_add_tail(&loc->res_list, &bi->res->binding_head);
  623. }
  624. }
  625. /**
  626. * vmw_context_binding_kill - Kill a binding on the device
  627. * and stop tracking it.
  628. *
  629. * @cb: Pointer to binding tracker storage.
  630. *
  631. * Emits FIFO commands to scrub a binding represented by @cb.
  632. * Then stops tracking the binding and re-initializes its storage.
  633. */
  634. static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
  635. {
  636. if (!cb->bi.scrubbed) {
  637. (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
  638. cb->bi.scrubbed = true;
  639. }
  640. vmw_context_binding_drop(cb);
  641. }
  642. /**
  643. * vmw_context_binding_state_kill - Kill all bindings associated with a
  644. * struct vmw_ctx_binding state structure, and re-initialize the structure.
  645. *
  646. * @cbs: Pointer to the context binding state tracker.
  647. *
  648. * Emits commands to scrub all bindings associated with the
  649. * context binding state tracker. Then re-initializes the whole structure.
  650. */
  651. static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
  652. {
  653. struct vmw_ctx_binding *entry, *next;
  654. list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  655. vmw_context_binding_kill(entry);
  656. }
  657. /**
  658. * vmw_context_binding_state_scrub - Scrub all bindings associated with a
  659. * struct vmw_ctx_binding state structure.
  660. *
  661. * @cbs: Pointer to the context binding state tracker.
  662. *
  663. * Emits commands to scrub all bindings associated with the
  664. * context binding state tracker.
  665. */
  666. static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
  667. {
  668. struct vmw_ctx_binding *entry;
  669. list_for_each_entry(entry, &cbs->list, ctx_list) {
  670. if (!entry->bi.scrubbed) {
  671. (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
  672. entry->bi.scrubbed = true;
  673. }
  674. }
  675. }
  676. /**
  677. * vmw_context_binding_res_list_kill - Kill all bindings on a
  678. * resource binding list
  679. *
  680. * @head: list head of resource binding list
  681. *
  682. * Kills all bindings associated with a specific resource. Typically
  683. * called before the resource is destroyed.
  684. */
  685. void vmw_context_binding_res_list_kill(struct list_head *head)
  686. {
  687. struct vmw_ctx_binding *entry, *next;
  688. list_for_each_entry_safe(entry, next, head, res_list)
  689. vmw_context_binding_kill(entry);
  690. }
  691. /**
  692. * vmw_context_binding_res_list_scrub - Scrub all bindings on a
  693. * resource binding list
  694. *
  695. * @head: list head of resource binding list
  696. *
  697. * Scrub all bindings associated with a specific resource. Typically
  698. * called before the resource is evicted.
  699. */
  700. void vmw_context_binding_res_list_scrub(struct list_head *head)
  701. {
  702. struct vmw_ctx_binding *entry;
  703. list_for_each_entry(entry, head, res_list) {
  704. if (!entry->bi.scrubbed) {
  705. (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
  706. entry->bi.scrubbed = true;
  707. }
  708. }
  709. }
  710. /**
  711. * vmw_context_binding_state_transfer - Commit staged binding info
  712. *
  713. * @ctx: Pointer to context to commit the staged binding info to.
  714. * @from: Staged binding info built during execbuf.
  715. *
  716. * Transfers binding info from a temporary structure to the persistent
  717. * structure in the context. This can be done once commands
  718. */
  719. void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
  720. struct vmw_ctx_binding_state *from)
  721. {
  722. struct vmw_user_context *uctx =
  723. container_of(ctx, struct vmw_user_context, res);
  724. struct vmw_ctx_binding *entry, *next;
  725. list_for_each_entry_safe(entry, next, &from->list, ctx_list)
  726. vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
  727. }
  728. /**
  729. * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
  730. *
  731. * @ctx: The context resource
  732. *
  733. * Walks through the context binding list and rebinds all scrubbed
  734. * resources.
  735. */
  736. int vmw_context_rebind_all(struct vmw_resource *ctx)
  737. {
  738. struct vmw_ctx_binding *entry;
  739. struct vmw_user_context *uctx =
  740. container_of(ctx, struct vmw_user_context, res);
  741. struct vmw_ctx_binding_state *cbs = &uctx->cbs;
  742. int ret;
  743. list_for_each_entry(entry, &cbs->list, ctx_list) {
  744. if (likely(!entry->bi.scrubbed))
  745. continue;
  746. if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
  747. SVGA3D_INVALID_ID))
  748. continue;
  749. ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
  750. if (unlikely(ret != 0))
  751. return ret;
  752. entry->bi.scrubbed = false;
  753. }
  754. return 0;
  755. }
  756. /**
  757. * vmw_context_binding_list - Return a list of context bindings
  758. *
  759. * @ctx: The context resource
  760. *
  761. * Returns the current list of bindings of the given context. Note that
  762. * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  763. */
  764. struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
  765. {
  766. return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
  767. }