vmwgfx_shader.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include "ttm/ttm_placement.h"
  30. #define VMW_COMPAT_SHADER_HT_ORDER 12
  31. struct vmw_shader {
  32. struct vmw_resource res;
  33. SVGA3dShaderType type;
  34. uint32_t size;
  35. };
  36. struct vmw_user_shader {
  37. struct ttm_base_object base;
  38. struct vmw_shader shader;
  39. };
  40. /**
  41. * enum vmw_compat_shader_state - Staging state for compat shaders
  42. */
  43. enum vmw_compat_shader_state {
  44. VMW_COMPAT_COMMITED,
  45. VMW_COMPAT_ADD,
  46. VMW_COMPAT_DEL
  47. };
  48. /**
  49. * struct vmw_compat_shader - Metadata for compat shaders.
  50. *
  51. * @handle: The TTM handle of the guest backed shader.
  52. * @tfile: The struct ttm_object_file the guest backed shader is registered
  53. * with.
  54. * @hash: Hash item for lookup.
  55. * @head: List head for staging lists or the compat shader manager list.
  56. * @state: Staging state.
  57. *
  58. * The structure is protected by the cmdbuf lock.
  59. */
  60. struct vmw_compat_shader {
  61. u32 handle;
  62. struct ttm_object_file *tfile;
  63. struct drm_hash_item hash;
  64. struct list_head head;
  65. enum vmw_compat_shader_state state;
  66. };
  67. /**
  68. * struct vmw_compat_shader_manager - Compat shader manager.
  69. *
  70. * @shaders: Hash table containing staged and commited compat shaders
  71. * @list: List of commited shaders.
  72. * @dev_priv: Pointer to a device private structure.
  73. *
  74. * @shaders and @list are protected by the cmdbuf mutex for now.
  75. */
  76. struct vmw_compat_shader_manager {
  77. struct drm_open_hash shaders;
  78. struct list_head list;
  79. struct vmw_private *dev_priv;
  80. };
  81. static void vmw_user_shader_free(struct vmw_resource *res);
  82. static struct vmw_resource *
  83. vmw_user_shader_base_to_res(struct ttm_base_object *base);
  84. static int vmw_gb_shader_create(struct vmw_resource *res);
  85. static int vmw_gb_shader_bind(struct vmw_resource *res,
  86. struct ttm_validate_buffer *val_buf);
  87. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  88. bool readback,
  89. struct ttm_validate_buffer *val_buf);
  90. static int vmw_gb_shader_destroy(struct vmw_resource *res);
  91. static uint64_t vmw_user_shader_size;
  92. static const struct vmw_user_resource_conv user_shader_conv = {
  93. .object_type = VMW_RES_SHADER,
  94. .base_obj_to_res = vmw_user_shader_base_to_res,
  95. .res_free = vmw_user_shader_free
  96. };
  97. const struct vmw_user_resource_conv *user_shader_converter =
  98. &user_shader_conv;
  99. static const struct vmw_res_func vmw_gb_shader_func = {
  100. .res_type = vmw_res_shader,
  101. .needs_backup = true,
  102. .may_evict = true,
  103. .type_name = "guest backed shaders",
  104. .backup_placement = &vmw_mob_placement,
  105. .create = vmw_gb_shader_create,
  106. .destroy = vmw_gb_shader_destroy,
  107. .bind = vmw_gb_shader_bind,
  108. .unbind = vmw_gb_shader_unbind
  109. };
  110. /**
  111. * Shader management:
  112. */
  113. static inline struct vmw_shader *
  114. vmw_res_to_shader(struct vmw_resource *res)
  115. {
  116. return container_of(res, struct vmw_shader, res);
  117. }
  118. static void vmw_hw_shader_destroy(struct vmw_resource *res)
  119. {
  120. (void) vmw_gb_shader_destroy(res);
  121. }
  122. static int vmw_gb_shader_init(struct vmw_private *dev_priv,
  123. struct vmw_resource *res,
  124. uint32_t size,
  125. uint64_t offset,
  126. SVGA3dShaderType type,
  127. struct vmw_dma_buffer *byte_code,
  128. void (*res_free) (struct vmw_resource *res))
  129. {
  130. struct vmw_shader *shader = vmw_res_to_shader(res);
  131. int ret;
  132. ret = vmw_resource_init(dev_priv, res, true,
  133. res_free, &vmw_gb_shader_func);
  134. if (unlikely(ret != 0)) {
  135. if (res_free)
  136. res_free(res);
  137. else
  138. kfree(res);
  139. return ret;
  140. }
  141. res->backup_size = size;
  142. if (byte_code) {
  143. res->backup = vmw_dmabuf_reference(byte_code);
  144. res->backup_offset = offset;
  145. }
  146. shader->size = size;
  147. shader->type = type;
  148. vmw_resource_activate(res, vmw_hw_shader_destroy);
  149. return 0;
  150. }
  151. static int vmw_gb_shader_create(struct vmw_resource *res)
  152. {
  153. struct vmw_private *dev_priv = res->dev_priv;
  154. struct vmw_shader *shader = vmw_res_to_shader(res);
  155. int ret;
  156. struct {
  157. SVGA3dCmdHeader header;
  158. SVGA3dCmdDefineGBShader body;
  159. } *cmd;
  160. if (likely(res->id != -1))
  161. return 0;
  162. ret = vmw_resource_alloc_id(res);
  163. if (unlikely(ret != 0)) {
  164. DRM_ERROR("Failed to allocate a shader id.\n");
  165. goto out_no_id;
  166. }
  167. if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
  168. ret = -EBUSY;
  169. goto out_no_fifo;
  170. }
  171. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  172. if (unlikely(cmd == NULL)) {
  173. DRM_ERROR("Failed reserving FIFO space for shader "
  174. "creation.\n");
  175. ret = -ENOMEM;
  176. goto out_no_fifo;
  177. }
  178. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
  179. cmd->header.size = sizeof(cmd->body);
  180. cmd->body.shid = res->id;
  181. cmd->body.type = shader->type;
  182. cmd->body.sizeInBytes = shader->size;
  183. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  184. (void) vmw_3d_resource_inc(dev_priv, false);
  185. return 0;
  186. out_no_fifo:
  187. vmw_resource_release_id(res);
  188. out_no_id:
  189. return ret;
  190. }
  191. static int vmw_gb_shader_bind(struct vmw_resource *res,
  192. struct ttm_validate_buffer *val_buf)
  193. {
  194. struct vmw_private *dev_priv = res->dev_priv;
  195. struct {
  196. SVGA3dCmdHeader header;
  197. SVGA3dCmdBindGBShader body;
  198. } *cmd;
  199. struct ttm_buffer_object *bo = val_buf->bo;
  200. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  201. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  202. if (unlikely(cmd == NULL)) {
  203. DRM_ERROR("Failed reserving FIFO space for shader "
  204. "binding.\n");
  205. return -ENOMEM;
  206. }
  207. cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  208. cmd->header.size = sizeof(cmd->body);
  209. cmd->body.shid = res->id;
  210. cmd->body.mobid = bo->mem.start;
  211. cmd->body.offsetInBytes = 0;
  212. res->backup_dirty = false;
  213. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  214. return 0;
  215. }
  216. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  217. bool readback,
  218. struct ttm_validate_buffer *val_buf)
  219. {
  220. struct vmw_private *dev_priv = res->dev_priv;
  221. struct {
  222. SVGA3dCmdHeader header;
  223. SVGA3dCmdBindGBShader body;
  224. } *cmd;
  225. struct vmw_fence_obj *fence;
  226. BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
  227. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  228. if (unlikely(cmd == NULL)) {
  229. DRM_ERROR("Failed reserving FIFO space for shader "
  230. "unbinding.\n");
  231. return -ENOMEM;
  232. }
  233. cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  234. cmd->header.size = sizeof(cmd->body);
  235. cmd->body.shid = res->id;
  236. cmd->body.mobid = SVGA3D_INVALID_ID;
  237. cmd->body.offsetInBytes = 0;
  238. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  239. /*
  240. * Create a fence object and fence the backup buffer.
  241. */
  242. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  243. &fence, NULL);
  244. vmw_fence_single_bo(val_buf->bo, fence);
  245. if (likely(fence != NULL))
  246. vmw_fence_obj_unreference(&fence);
  247. return 0;
  248. }
  249. static int vmw_gb_shader_destroy(struct vmw_resource *res)
  250. {
  251. struct vmw_private *dev_priv = res->dev_priv;
  252. struct {
  253. SVGA3dCmdHeader header;
  254. SVGA3dCmdDestroyGBShader body;
  255. } *cmd;
  256. if (likely(res->id == -1))
  257. return 0;
  258. mutex_lock(&dev_priv->binding_mutex);
  259. vmw_context_binding_res_list_scrub(&res->binding_head);
  260. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  261. if (unlikely(cmd == NULL)) {
  262. DRM_ERROR("Failed reserving FIFO space for shader "
  263. "destruction.\n");
  264. mutex_unlock(&dev_priv->binding_mutex);
  265. return -ENOMEM;
  266. }
  267. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
  268. cmd->header.size = sizeof(cmd->body);
  269. cmd->body.shid = res->id;
  270. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  271. mutex_unlock(&dev_priv->binding_mutex);
  272. vmw_resource_release_id(res);
  273. vmw_3d_resource_dec(dev_priv, false);
  274. return 0;
  275. }
  276. /**
  277. * User-space shader management:
  278. */
  279. static struct vmw_resource *
  280. vmw_user_shader_base_to_res(struct ttm_base_object *base)
  281. {
  282. return &(container_of(base, struct vmw_user_shader, base)->
  283. shader.res);
  284. }
  285. static void vmw_user_shader_free(struct vmw_resource *res)
  286. {
  287. struct vmw_user_shader *ushader =
  288. container_of(res, struct vmw_user_shader, shader.res);
  289. struct vmw_private *dev_priv = res->dev_priv;
  290. ttm_base_object_kfree(ushader, base);
  291. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  292. vmw_user_shader_size);
  293. }
  294. /**
  295. * This function is called when user space has no more references on the
  296. * base object. It releases the base-object's reference on the resource object.
  297. */
  298. static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
  299. {
  300. struct ttm_base_object *base = *p_base;
  301. struct vmw_resource *res = vmw_user_shader_base_to_res(base);
  302. *p_base = NULL;
  303. vmw_resource_unreference(&res);
  304. }
  305. int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  306. struct drm_file *file_priv)
  307. {
  308. struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
  309. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  310. return ttm_ref_object_base_unref(tfile, arg->handle,
  311. TTM_REF_USAGE);
  312. }
  313. static int vmw_shader_alloc(struct vmw_private *dev_priv,
  314. struct vmw_dma_buffer *buffer,
  315. size_t shader_size,
  316. size_t offset,
  317. SVGA3dShaderType shader_type,
  318. struct ttm_object_file *tfile,
  319. u32 *handle)
  320. {
  321. struct vmw_user_shader *ushader;
  322. struct vmw_resource *res, *tmp;
  323. int ret;
  324. /*
  325. * Approximate idr memory usage with 128 bytes. It will be limited
  326. * by maximum number_of shaders anyway.
  327. */
  328. if (unlikely(vmw_user_shader_size == 0))
  329. vmw_user_shader_size =
  330. ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
  331. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  332. vmw_user_shader_size,
  333. false, true);
  334. if (unlikely(ret != 0)) {
  335. if (ret != -ERESTARTSYS)
  336. DRM_ERROR("Out of graphics memory for shader "
  337. "creation.\n");
  338. goto out;
  339. }
  340. ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
  341. if (unlikely(ushader == NULL)) {
  342. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  343. vmw_user_shader_size);
  344. ret = -ENOMEM;
  345. goto out;
  346. }
  347. res = &ushader->shader.res;
  348. ushader->base.shareable = false;
  349. ushader->base.tfile = NULL;
  350. /*
  351. * From here on, the destructor takes over resource freeing.
  352. */
  353. ret = vmw_gb_shader_init(dev_priv, res, shader_size,
  354. offset, shader_type, buffer,
  355. vmw_user_shader_free);
  356. if (unlikely(ret != 0))
  357. goto out;
  358. tmp = vmw_resource_reference(res);
  359. ret = ttm_base_object_init(tfile, &ushader->base, false,
  360. VMW_RES_SHADER,
  361. &vmw_user_shader_base_release, NULL);
  362. if (unlikely(ret != 0)) {
  363. vmw_resource_unreference(&tmp);
  364. goto out_err;
  365. }
  366. if (handle)
  367. *handle = ushader->base.hash.key;
  368. out_err:
  369. vmw_resource_unreference(&res);
  370. out:
  371. return ret;
  372. }
  373. int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  374. struct drm_file *file_priv)
  375. {
  376. struct vmw_private *dev_priv = vmw_priv(dev);
  377. struct drm_vmw_shader_create_arg *arg =
  378. (struct drm_vmw_shader_create_arg *)data;
  379. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  380. struct vmw_dma_buffer *buffer = NULL;
  381. SVGA3dShaderType shader_type;
  382. int ret;
  383. if (arg->buffer_handle != SVGA3D_INVALID_ID) {
  384. ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
  385. &buffer);
  386. if (unlikely(ret != 0)) {
  387. DRM_ERROR("Could not find buffer for shader "
  388. "creation.\n");
  389. return ret;
  390. }
  391. if ((u64)buffer->base.num_pages * PAGE_SIZE <
  392. (u64)arg->size + (u64)arg->offset) {
  393. DRM_ERROR("Illegal buffer- or shader size.\n");
  394. ret = -EINVAL;
  395. goto out_bad_arg;
  396. }
  397. }
  398. switch (arg->shader_type) {
  399. case drm_vmw_shader_type_vs:
  400. shader_type = SVGA3D_SHADERTYPE_VS;
  401. break;
  402. case drm_vmw_shader_type_ps:
  403. shader_type = SVGA3D_SHADERTYPE_PS;
  404. break;
  405. case drm_vmw_shader_type_gs:
  406. shader_type = SVGA3D_SHADERTYPE_GS;
  407. break;
  408. default:
  409. DRM_ERROR("Illegal shader type.\n");
  410. ret = -EINVAL;
  411. goto out_bad_arg;
  412. }
  413. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  414. if (unlikely(ret != 0))
  415. goto out_bad_arg;
  416. ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
  417. shader_type, tfile, &arg->shader_handle);
  418. ttm_read_unlock(&dev_priv->reservation_sem);
  419. out_bad_arg:
  420. vmw_dmabuf_unreference(&buffer);
  421. return ret;
  422. }
  423. /**
  424. * vmw_compat_shader_lookup - Look up a compat shader
  425. *
  426. * @man: Pointer to the compat shader manager.
  427. * @shader_type: The shader type, that combined with the user_key identifies
  428. * the shader.
  429. * @user_key: On entry, this should be a pointer to the user_key.
  430. * On successful exit, it will contain the guest-backed shader's TTM handle.
  431. *
  432. * Returns 0 on success. Non-zero on failure, in which case the value pointed
  433. * to by @user_key is unmodified.
  434. */
  435. int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
  436. SVGA3dShaderType shader_type,
  437. u32 *user_key)
  438. {
  439. struct drm_hash_item *hash;
  440. int ret;
  441. unsigned long key = *user_key | (shader_type << 24);
  442. ret = drm_ht_find_item(&man->shaders, key, &hash);
  443. if (unlikely(ret != 0))
  444. return ret;
  445. *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
  446. hash)->handle;
  447. return 0;
  448. }
  449. /**
  450. * vmw_compat_shader_free - Free a compat shader.
  451. *
  452. * @man: Pointer to the compat shader manager.
  453. * @entry: Pointer to a struct vmw_compat_shader.
  454. *
  455. * Frees a struct vmw_compat_shder entry and drops its reference to the
  456. * guest backed shader.
  457. */
  458. static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
  459. struct vmw_compat_shader *entry)
  460. {
  461. list_del(&entry->head);
  462. WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
  463. WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
  464. TTM_REF_USAGE));
  465. kfree(entry);
  466. }
  467. /**
  468. * vmw_compat_shaders_commit - Commit a list of compat shader actions.
  469. *
  470. * @man: Pointer to the compat shader manager.
  471. * @list: Caller's list of compat shader actions.
  472. *
  473. * This function commits a list of compat shader additions or removals.
  474. * It is typically called when the execbuf ioctl call triggering these
  475. * actions has commited the fifo contents to the device.
  476. */
  477. void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
  478. struct list_head *list)
  479. {
  480. struct vmw_compat_shader *entry, *next;
  481. list_for_each_entry_safe(entry, next, list, head) {
  482. list_del(&entry->head);
  483. switch (entry->state) {
  484. case VMW_COMPAT_ADD:
  485. entry->state = VMW_COMPAT_COMMITED;
  486. list_add_tail(&entry->head, &man->list);
  487. break;
  488. case VMW_COMPAT_DEL:
  489. ttm_ref_object_base_unref(entry->tfile, entry->handle,
  490. TTM_REF_USAGE);
  491. kfree(entry);
  492. break;
  493. default:
  494. BUG();
  495. break;
  496. }
  497. }
  498. }
  499. /**
  500. * vmw_compat_shaders_revert - Revert a list of compat shader actions
  501. *
  502. * @man: Pointer to the compat shader manager.
  503. * @list: Caller's list of compat shader actions.
  504. *
  505. * This function reverts a list of compat shader additions or removals.
  506. * It is typically called when the execbuf ioctl call triggering these
  507. * actions failed for some reason, and the command stream was never
  508. * submitted.
  509. */
  510. void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
  511. struct list_head *list)
  512. {
  513. struct vmw_compat_shader *entry, *next;
  514. int ret;
  515. list_for_each_entry_safe(entry, next, list, head) {
  516. switch (entry->state) {
  517. case VMW_COMPAT_ADD:
  518. vmw_compat_shader_free(man, entry);
  519. break;
  520. case VMW_COMPAT_DEL:
  521. ret = drm_ht_insert_item(&man->shaders, &entry->hash);
  522. list_del(&entry->head);
  523. list_add_tail(&entry->head, &man->list);
  524. entry->state = VMW_COMPAT_COMMITED;
  525. break;
  526. default:
  527. BUG();
  528. break;
  529. }
  530. }
  531. }
  532. /**
  533. * vmw_compat_shader_remove - Stage a compat shader for removal.
  534. *
  535. * @man: Pointer to the compat shader manager
  536. * @user_key: The key that is used to identify the shader. The key is
  537. * unique to the shader type.
  538. * @shader_type: Shader type.
  539. * @list: Caller's list of staged shader actions.
  540. *
  541. * This function stages a compat shader for removal and removes the key from
  542. * the shader manager's hash table. If the shader was previously only staged
  543. * for addition it is completely removed (But the execbuf code may keep a
  544. * reference if it was bound to a context between addition and removal). If
  545. * it was previously commited to the manager, it is staged for removal.
  546. */
  547. int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
  548. u32 user_key, SVGA3dShaderType shader_type,
  549. struct list_head *list)
  550. {
  551. struct vmw_compat_shader *entry;
  552. struct drm_hash_item *hash;
  553. int ret;
  554. ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
  555. &hash);
  556. if (likely(ret != 0))
  557. return -EINVAL;
  558. entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
  559. switch (entry->state) {
  560. case VMW_COMPAT_ADD:
  561. vmw_compat_shader_free(man, entry);
  562. break;
  563. case VMW_COMPAT_COMMITED:
  564. (void) drm_ht_remove_item(&man->shaders, &entry->hash);
  565. list_del(&entry->head);
  566. entry->state = VMW_COMPAT_DEL;
  567. list_add_tail(&entry->head, list);
  568. break;
  569. default:
  570. BUG();
  571. break;
  572. }
  573. return 0;
  574. }
  575. /**
  576. * vmw_compat_shader_add - Create a compat shader and add the
  577. * key to the manager
  578. *
  579. * @man: Pointer to the compat shader manager
  580. * @user_key: The key that is used to identify the shader. The key is
  581. * unique to the shader type.
  582. * @bytecode: Pointer to the bytecode of the shader.
  583. * @shader_type: Shader type.
  584. * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
  585. * to be created with.
  586. * @list: Caller's list of staged shader actions.
  587. *
  588. * Note that only the key is added to the shader manager's hash table.
  589. * The shader is not yet added to the shader manager's list of shaders.
  590. */
  591. int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
  592. u32 user_key, const void *bytecode,
  593. SVGA3dShaderType shader_type,
  594. size_t size,
  595. struct ttm_object_file *tfile,
  596. struct list_head *list)
  597. {
  598. struct vmw_dma_buffer *buf;
  599. struct ttm_bo_kmap_obj map;
  600. bool is_iomem;
  601. struct vmw_compat_shader *compat;
  602. u32 handle;
  603. int ret;
  604. if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
  605. return -EINVAL;
  606. /* Allocate and pin a DMA buffer */
  607. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  608. if (unlikely(buf == NULL))
  609. return -ENOMEM;
  610. ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
  611. true, vmw_dmabuf_bo_free);
  612. if (unlikely(ret != 0))
  613. goto out;
  614. ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
  615. if (unlikely(ret != 0))
  616. goto no_reserve;
  617. /* Map and copy shader bytecode. */
  618. ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
  619. &map);
  620. if (unlikely(ret != 0)) {
  621. ttm_bo_unreserve(&buf->base);
  622. goto no_reserve;
  623. }
  624. memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
  625. WARN_ON(is_iomem);
  626. ttm_bo_kunmap(&map);
  627. ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
  628. WARN_ON(ret != 0);
  629. ttm_bo_unreserve(&buf->base);
  630. /* Create a guest-backed shader container backed by the dma buffer */
  631. ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
  632. tfile, &handle);
  633. vmw_dmabuf_unreference(&buf);
  634. if (unlikely(ret != 0))
  635. goto no_reserve;
  636. /*
  637. * Create a compat shader structure and stage it for insertion
  638. * in the manager
  639. */
  640. compat = kzalloc(sizeof(*compat), GFP_KERNEL);
  641. if (compat == NULL)
  642. goto no_compat;
  643. compat->hash.key = user_key | (shader_type << 24);
  644. ret = drm_ht_insert_item(&man->shaders, &compat->hash);
  645. if (unlikely(ret != 0))
  646. goto out_invalid_key;
  647. compat->state = VMW_COMPAT_ADD;
  648. compat->handle = handle;
  649. compat->tfile = tfile;
  650. list_add_tail(&compat->head, list);
  651. return 0;
  652. out_invalid_key:
  653. kfree(compat);
  654. no_compat:
  655. ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
  656. no_reserve:
  657. out:
  658. return ret;
  659. }
  660. /**
  661. * vmw_compat_shader_man_create - Create a compat shader manager
  662. *
  663. * @dev_priv: Pointer to a device private structure.
  664. *
  665. * Typically done at file open time. If successful returns a pointer to a
  666. * compat shader manager. Otherwise returns an error pointer.
  667. */
  668. struct vmw_compat_shader_manager *
  669. vmw_compat_shader_man_create(struct vmw_private *dev_priv)
  670. {
  671. struct vmw_compat_shader_manager *man;
  672. int ret;
  673. man = kzalloc(sizeof(*man), GFP_KERNEL);
  674. if (man == NULL)
  675. return ERR_PTR(-ENOMEM);
  676. man->dev_priv = dev_priv;
  677. INIT_LIST_HEAD(&man->list);
  678. ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
  679. if (ret == 0)
  680. return man;
  681. kfree(man);
  682. return ERR_PTR(ret);
  683. }
  684. /**
  685. * vmw_compat_shader_man_destroy - Destroy a compat shader manager
  686. *
  687. * @man: Pointer to the shader manager to destroy.
  688. *
  689. * Typically done at file close time.
  690. */
  691. void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
  692. {
  693. struct vmw_compat_shader *entry, *next;
  694. mutex_lock(&man->dev_priv->cmdbuf_mutex);
  695. list_for_each_entry_safe(entry, next, &man->list, head)
  696. vmw_compat_shader_free(man, entry);
  697. mutex_unlock(&man->dev_priv->cmdbuf_mutex);
  698. kfree(man);
  699. }