vmwgfx_shader.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include "ttm/ttm_placement.h"
  30. #define VMW_COMPAT_SHADER_HT_ORDER 12
  31. struct vmw_shader {
  32. struct vmw_resource res;
  33. SVGA3dShaderType type;
  34. uint32_t size;
  35. };
  36. struct vmw_user_shader {
  37. struct ttm_base_object base;
  38. struct vmw_shader shader;
  39. };
  40. /**
  41. * enum vmw_compat_shader_state - Staging state for compat shaders
  42. */
  43. enum vmw_compat_shader_state {
  44. VMW_COMPAT_COMMITED,
  45. VMW_COMPAT_ADD,
  46. VMW_COMPAT_DEL
  47. };
  48. /**
  49. * struct vmw_compat_shader - Metadata for compat shaders.
  50. *
  51. * @handle: The TTM handle of the guest backed shader.
  52. * @tfile: The struct ttm_object_file the guest backed shader is registered
  53. * with.
  54. * @hash: Hash item for lookup.
  55. * @head: List head for staging lists or the compat shader manager list.
  56. * @state: Staging state.
  57. *
  58. * The structure is protected by the cmdbuf lock.
  59. */
  60. struct vmw_compat_shader {
  61. u32 handle;
  62. struct ttm_object_file *tfile;
  63. struct drm_hash_item hash;
  64. struct list_head head;
  65. enum vmw_compat_shader_state state;
  66. };
  67. /**
  68. * struct vmw_compat_shader_manager - Compat shader manager.
  69. *
  70. * @shaders: Hash table containing staged and commited compat shaders
  71. * @list: List of commited shaders.
  72. * @dev_priv: Pointer to a device private structure.
  73. *
  74. * @shaders and @list are protected by the cmdbuf mutex for now.
  75. */
  76. struct vmw_compat_shader_manager {
  77. struct drm_open_hash shaders;
  78. struct list_head list;
  79. struct vmw_private *dev_priv;
  80. };
  81. static void vmw_user_shader_free(struct vmw_resource *res);
  82. static struct vmw_resource *
  83. vmw_user_shader_base_to_res(struct ttm_base_object *base);
  84. static int vmw_gb_shader_create(struct vmw_resource *res);
  85. static int vmw_gb_shader_bind(struct vmw_resource *res,
  86. struct ttm_validate_buffer *val_buf);
  87. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  88. bool readback,
  89. struct ttm_validate_buffer *val_buf);
  90. static int vmw_gb_shader_destroy(struct vmw_resource *res);
  91. static uint64_t vmw_user_shader_size;
  92. static const struct vmw_user_resource_conv user_shader_conv = {
  93. .object_type = VMW_RES_SHADER,
  94. .base_obj_to_res = vmw_user_shader_base_to_res,
  95. .res_free = vmw_user_shader_free
  96. };
  97. const struct vmw_user_resource_conv *user_shader_converter =
  98. &user_shader_conv;
  99. static const struct vmw_res_func vmw_gb_shader_func = {
  100. .res_type = vmw_res_shader,
  101. .needs_backup = true,
  102. .may_evict = true,
  103. .type_name = "guest backed shaders",
  104. .backup_placement = &vmw_mob_placement,
  105. .create = vmw_gb_shader_create,
  106. .destroy = vmw_gb_shader_destroy,
  107. .bind = vmw_gb_shader_bind,
  108. .unbind = vmw_gb_shader_unbind
  109. };
  110. /**
  111. * Shader management:
  112. */
  113. static inline struct vmw_shader *
  114. vmw_res_to_shader(struct vmw_resource *res)
  115. {
  116. return container_of(res, struct vmw_shader, res);
  117. }
  118. static void vmw_hw_shader_destroy(struct vmw_resource *res)
  119. {
  120. (void) vmw_gb_shader_destroy(res);
  121. }
  122. static int vmw_gb_shader_init(struct vmw_private *dev_priv,
  123. struct vmw_resource *res,
  124. uint32_t size,
  125. uint64_t offset,
  126. SVGA3dShaderType type,
  127. struct vmw_dma_buffer *byte_code,
  128. void (*res_free) (struct vmw_resource *res))
  129. {
  130. struct vmw_shader *shader = vmw_res_to_shader(res);
  131. int ret;
  132. ret = vmw_resource_init(dev_priv, res, true,
  133. res_free, &vmw_gb_shader_func);
  134. if (unlikely(ret != 0)) {
  135. if (res_free)
  136. res_free(res);
  137. else
  138. kfree(res);
  139. return ret;
  140. }
  141. res->backup_size = size;
  142. if (byte_code) {
  143. res->backup = vmw_dmabuf_reference(byte_code);
  144. res->backup_offset = offset;
  145. }
  146. shader->size = size;
  147. shader->type = type;
  148. vmw_resource_activate(res, vmw_hw_shader_destroy);
  149. return 0;
  150. }
  151. static int vmw_gb_shader_create(struct vmw_resource *res)
  152. {
  153. struct vmw_private *dev_priv = res->dev_priv;
  154. struct vmw_shader *shader = vmw_res_to_shader(res);
  155. int ret;
  156. struct {
  157. SVGA3dCmdHeader header;
  158. SVGA3dCmdDefineGBShader body;
  159. } *cmd;
  160. if (likely(res->id != -1))
  161. return 0;
  162. ret = vmw_resource_alloc_id(res);
  163. if (unlikely(ret != 0)) {
  164. DRM_ERROR("Failed to allocate a shader id.\n");
  165. goto out_no_id;
  166. }
  167. if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
  168. ret = -EBUSY;
  169. goto out_no_fifo;
  170. }
  171. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  172. if (unlikely(cmd == NULL)) {
  173. DRM_ERROR("Failed reserving FIFO space for shader "
  174. "creation.\n");
  175. ret = -ENOMEM;
  176. goto out_no_fifo;
  177. }
  178. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
  179. cmd->header.size = sizeof(cmd->body);
  180. cmd->body.shid = res->id;
  181. cmd->body.type = shader->type;
  182. cmd->body.sizeInBytes = shader->size;
  183. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  184. (void) vmw_3d_resource_inc(dev_priv, false);
  185. return 0;
  186. out_no_fifo:
  187. vmw_resource_release_id(res);
  188. out_no_id:
  189. return ret;
  190. }
  191. static int vmw_gb_shader_bind(struct vmw_resource *res,
  192. struct ttm_validate_buffer *val_buf)
  193. {
  194. struct vmw_private *dev_priv = res->dev_priv;
  195. struct {
  196. SVGA3dCmdHeader header;
  197. SVGA3dCmdBindGBShader body;
  198. } *cmd;
  199. struct ttm_buffer_object *bo = val_buf->bo;
  200. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  201. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  202. if (unlikely(cmd == NULL)) {
  203. DRM_ERROR("Failed reserving FIFO space for shader "
  204. "binding.\n");
  205. return -ENOMEM;
  206. }
  207. cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  208. cmd->header.size = sizeof(cmd->body);
  209. cmd->body.shid = res->id;
  210. cmd->body.mobid = bo->mem.start;
  211. cmd->body.offsetInBytes = 0;
  212. res->backup_dirty = false;
  213. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  214. return 0;
  215. }
  216. static int vmw_gb_shader_unbind(struct vmw_resource *res,
  217. bool readback,
  218. struct ttm_validate_buffer *val_buf)
  219. {
  220. struct vmw_private *dev_priv = res->dev_priv;
  221. struct {
  222. SVGA3dCmdHeader header;
  223. SVGA3dCmdBindGBShader body;
  224. } *cmd;
  225. struct vmw_fence_obj *fence;
  226. BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
  227. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  228. if (unlikely(cmd == NULL)) {
  229. DRM_ERROR("Failed reserving FIFO space for shader "
  230. "unbinding.\n");
  231. return -ENOMEM;
  232. }
  233. cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
  234. cmd->header.size = sizeof(cmd->body);
  235. cmd->body.shid = res->id;
  236. cmd->body.mobid = SVGA3D_INVALID_ID;
  237. cmd->body.offsetInBytes = 0;
  238. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  239. /*
  240. * Create a fence object and fence the backup buffer.
  241. */
  242. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  243. &fence, NULL);
  244. vmw_fence_single_bo(val_buf->bo, fence);
  245. if (likely(fence != NULL))
  246. vmw_fence_obj_unreference(&fence);
  247. return 0;
  248. }
  249. static int vmw_gb_shader_destroy(struct vmw_resource *res)
  250. {
  251. struct vmw_private *dev_priv = res->dev_priv;
  252. struct {
  253. SVGA3dCmdHeader header;
  254. SVGA3dCmdDestroyGBShader body;
  255. } *cmd;
  256. if (likely(res->id == -1))
  257. return 0;
  258. mutex_lock(&dev_priv->binding_mutex);
  259. vmw_context_binding_res_list_scrub(&res->binding_head);
  260. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  261. if (unlikely(cmd == NULL)) {
  262. DRM_ERROR("Failed reserving FIFO space for shader "
  263. "destruction.\n");
  264. mutex_unlock(&dev_priv->binding_mutex);
  265. return -ENOMEM;
  266. }
  267. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
  268. cmd->header.size = sizeof(cmd->body);
  269. cmd->body.shid = res->id;
  270. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  271. mutex_unlock(&dev_priv->binding_mutex);
  272. vmw_resource_release_id(res);
  273. vmw_3d_resource_dec(dev_priv, false);
  274. return 0;
  275. }
  276. /**
  277. * User-space shader management:
  278. */
  279. static struct vmw_resource *
  280. vmw_user_shader_base_to_res(struct ttm_base_object *base)
  281. {
  282. return &(container_of(base, struct vmw_user_shader, base)->
  283. shader.res);
  284. }
  285. static void vmw_user_shader_free(struct vmw_resource *res)
  286. {
  287. struct vmw_user_shader *ushader =
  288. container_of(res, struct vmw_user_shader, shader.res);
  289. struct vmw_private *dev_priv = res->dev_priv;
  290. ttm_base_object_kfree(ushader, base);
  291. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  292. vmw_user_shader_size);
  293. }
  294. /**
  295. * This function is called when user space has no more references on the
  296. * base object. It releases the base-object's reference on the resource object.
  297. */
  298. static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
  299. {
  300. struct ttm_base_object *base = *p_base;
  301. struct vmw_resource *res = vmw_user_shader_base_to_res(base);
  302. *p_base = NULL;
  303. vmw_resource_unreference(&res);
  304. }
  305. int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  306. struct drm_file *file_priv)
  307. {
  308. struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
  309. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  310. return ttm_ref_object_base_unref(tfile, arg->handle,
  311. TTM_REF_USAGE);
  312. }
  313. static int vmw_shader_alloc(struct vmw_private *dev_priv,
  314. struct vmw_dma_buffer *buffer,
  315. size_t shader_size,
  316. size_t offset,
  317. SVGA3dShaderType shader_type,
  318. struct ttm_object_file *tfile,
  319. u32 *handle)
  320. {
  321. struct vmw_user_shader *ushader;
  322. struct vmw_resource *res, *tmp;
  323. int ret;
  324. /*
  325. * Approximate idr memory usage with 128 bytes. It will be limited
  326. * by maximum number_of shaders anyway.
  327. */
  328. if (unlikely(vmw_user_shader_size == 0))
  329. vmw_user_shader_size =
  330. ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
  331. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  332. vmw_user_shader_size,
  333. false, true);
  334. if (unlikely(ret != 0)) {
  335. if (ret != -ERESTARTSYS)
  336. DRM_ERROR("Out of graphics memory for shader "
  337. "creation.\n");
  338. goto out;
  339. }
  340. ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
  341. if (unlikely(ushader == NULL)) {
  342. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  343. vmw_user_shader_size);
  344. ret = -ENOMEM;
  345. goto out;
  346. }
  347. res = &ushader->shader.res;
  348. ushader->base.shareable = false;
  349. ushader->base.tfile = NULL;
  350. /*
  351. * From here on, the destructor takes over resource freeing.
  352. */
  353. ret = vmw_gb_shader_init(dev_priv, res, shader_size,
  354. offset, shader_type, buffer,
  355. vmw_user_shader_free);
  356. if (unlikely(ret != 0))
  357. goto out;
  358. tmp = vmw_resource_reference(res);
  359. ret = ttm_base_object_init(tfile, &ushader->base, false,
  360. VMW_RES_SHADER,
  361. &vmw_user_shader_base_release, NULL);
  362. if (unlikely(ret != 0)) {
  363. vmw_resource_unreference(&tmp);
  364. goto out_err;
  365. }
  366. if (handle)
  367. *handle = ushader->base.hash.key;
  368. out_err:
  369. vmw_resource_unreference(&res);
  370. out:
  371. return ret;
  372. }
  373. int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  374. struct drm_file *file_priv)
  375. {
  376. struct vmw_private *dev_priv = vmw_priv(dev);
  377. struct drm_vmw_shader_create_arg *arg =
  378. (struct drm_vmw_shader_create_arg *)data;
  379. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  380. struct vmw_master *vmaster = vmw_master(file_priv->master);
  381. struct vmw_dma_buffer *buffer = NULL;
  382. SVGA3dShaderType shader_type;
  383. int ret;
  384. if (arg->buffer_handle != SVGA3D_INVALID_ID) {
  385. ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
  386. &buffer);
  387. if (unlikely(ret != 0)) {
  388. DRM_ERROR("Could not find buffer for shader "
  389. "creation.\n");
  390. return ret;
  391. }
  392. if ((u64)buffer->base.num_pages * PAGE_SIZE <
  393. (u64)arg->size + (u64)arg->offset) {
  394. DRM_ERROR("Illegal buffer- or shader size.\n");
  395. ret = -EINVAL;
  396. goto out_bad_arg;
  397. }
  398. }
  399. switch (arg->shader_type) {
  400. case drm_vmw_shader_type_vs:
  401. shader_type = SVGA3D_SHADERTYPE_VS;
  402. break;
  403. case drm_vmw_shader_type_ps:
  404. shader_type = SVGA3D_SHADERTYPE_PS;
  405. break;
  406. case drm_vmw_shader_type_gs:
  407. shader_type = SVGA3D_SHADERTYPE_GS;
  408. break;
  409. default:
  410. DRM_ERROR("Illegal shader type.\n");
  411. ret = -EINVAL;
  412. goto out_bad_arg;
  413. }
  414. ret = ttm_read_lock(&vmaster->lock, true);
  415. if (unlikely(ret != 0))
  416. goto out_bad_arg;
  417. ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
  418. shader_type, tfile, &arg->shader_handle);
  419. ttm_read_unlock(&vmaster->lock);
  420. out_bad_arg:
  421. vmw_dmabuf_unreference(&buffer);
  422. return ret;
  423. }
  424. /**
  425. * vmw_compat_shader_lookup - Look up a compat shader
  426. *
  427. * @man: Pointer to the compat shader manager.
  428. * @shader_type: The shader type, that combined with the user_key identifies
  429. * the shader.
  430. * @user_key: On entry, this should be a pointer to the user_key.
  431. * On successful exit, it will contain the guest-backed shader's TTM handle.
  432. *
  433. * Returns 0 on success. Non-zero on failure, in which case the value pointed
  434. * to by @user_key is unmodified.
  435. */
  436. int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
  437. SVGA3dShaderType shader_type,
  438. u32 *user_key)
  439. {
  440. struct drm_hash_item *hash;
  441. int ret;
  442. unsigned long key = *user_key | (shader_type << 24);
  443. ret = drm_ht_find_item(&man->shaders, key, &hash);
  444. if (unlikely(ret != 0))
  445. return ret;
  446. *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
  447. hash)->handle;
  448. return 0;
  449. }
  450. /**
  451. * vmw_compat_shader_free - Free a compat shader.
  452. *
  453. * @man: Pointer to the compat shader manager.
  454. * @entry: Pointer to a struct vmw_compat_shader.
  455. *
  456. * Frees a struct vmw_compat_shder entry and drops its reference to the
  457. * guest backed shader.
  458. */
  459. static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
  460. struct vmw_compat_shader *entry)
  461. {
  462. list_del(&entry->head);
  463. WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
  464. WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
  465. TTM_REF_USAGE));
  466. kfree(entry);
  467. }
  468. /**
  469. * vmw_compat_shaders_commit - Commit a list of compat shader actions.
  470. *
  471. * @man: Pointer to the compat shader manager.
  472. * @list: Caller's list of compat shader actions.
  473. *
  474. * This function commits a list of compat shader additions or removals.
  475. * It is typically called when the execbuf ioctl call triggering these
  476. * actions has commited the fifo contents to the device.
  477. */
  478. void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
  479. struct list_head *list)
  480. {
  481. struct vmw_compat_shader *entry, *next;
  482. list_for_each_entry_safe(entry, next, list, head) {
  483. list_del(&entry->head);
  484. switch (entry->state) {
  485. case VMW_COMPAT_ADD:
  486. entry->state = VMW_COMPAT_COMMITED;
  487. list_add_tail(&entry->head, &man->list);
  488. break;
  489. case VMW_COMPAT_DEL:
  490. ttm_ref_object_base_unref(entry->tfile, entry->handle,
  491. TTM_REF_USAGE);
  492. kfree(entry);
  493. break;
  494. default:
  495. BUG();
  496. break;
  497. }
  498. }
  499. }
  500. /**
  501. * vmw_compat_shaders_revert - Revert a list of compat shader actions
  502. *
  503. * @man: Pointer to the compat shader manager.
  504. * @list: Caller's list of compat shader actions.
  505. *
  506. * This function reverts a list of compat shader additions or removals.
  507. * It is typically called when the execbuf ioctl call triggering these
  508. * actions failed for some reason, and the command stream was never
  509. * submitted.
  510. */
  511. void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
  512. struct list_head *list)
  513. {
  514. struct vmw_compat_shader *entry, *next;
  515. int ret;
  516. list_for_each_entry_safe(entry, next, list, head) {
  517. switch (entry->state) {
  518. case VMW_COMPAT_ADD:
  519. vmw_compat_shader_free(man, entry);
  520. break;
  521. case VMW_COMPAT_DEL:
  522. ret = drm_ht_insert_item(&man->shaders, &entry->hash);
  523. list_del(&entry->head);
  524. list_add_tail(&entry->head, &man->list);
  525. entry->state = VMW_COMPAT_COMMITED;
  526. break;
  527. default:
  528. BUG();
  529. break;
  530. }
  531. }
  532. }
  533. /**
  534. * vmw_compat_shader_remove - Stage a compat shader for removal.
  535. *
  536. * @man: Pointer to the compat shader manager
  537. * @user_key: The key that is used to identify the shader. The key is
  538. * unique to the shader type.
  539. * @shader_type: Shader type.
  540. * @list: Caller's list of staged shader actions.
  541. *
  542. * This function stages a compat shader for removal and removes the key from
  543. * the shader manager's hash table. If the shader was previously only staged
  544. * for addition it is completely removed (But the execbuf code may keep a
  545. * reference if it was bound to a context between addition and removal). If
  546. * it was previously commited to the manager, it is staged for removal.
  547. */
  548. int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
  549. u32 user_key, SVGA3dShaderType shader_type,
  550. struct list_head *list)
  551. {
  552. struct vmw_compat_shader *entry;
  553. struct drm_hash_item *hash;
  554. int ret;
  555. ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
  556. &hash);
  557. if (likely(ret != 0))
  558. return -EINVAL;
  559. entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
  560. switch (entry->state) {
  561. case VMW_COMPAT_ADD:
  562. vmw_compat_shader_free(man, entry);
  563. break;
  564. case VMW_COMPAT_COMMITED:
  565. (void) drm_ht_remove_item(&man->shaders, &entry->hash);
  566. list_del(&entry->head);
  567. entry->state = VMW_COMPAT_DEL;
  568. list_add_tail(&entry->head, list);
  569. break;
  570. default:
  571. BUG();
  572. break;
  573. }
  574. return 0;
  575. }
  576. /**
  577. * vmw_compat_shader_add - Create a compat shader and add the
  578. * key to the manager
  579. *
  580. * @man: Pointer to the compat shader manager
  581. * @user_key: The key that is used to identify the shader. The key is
  582. * unique to the shader type.
  583. * @bytecode: Pointer to the bytecode of the shader.
  584. * @shader_type: Shader type.
  585. * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
  586. * to be created with.
  587. * @list: Caller's list of staged shader actions.
  588. *
  589. * Note that only the key is added to the shader manager's hash table.
  590. * The shader is not yet added to the shader manager's list of shaders.
  591. */
  592. int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
  593. u32 user_key, const void *bytecode,
  594. SVGA3dShaderType shader_type,
  595. size_t size,
  596. struct ttm_object_file *tfile,
  597. struct list_head *list)
  598. {
  599. struct vmw_dma_buffer *buf;
  600. struct ttm_bo_kmap_obj map;
  601. bool is_iomem;
  602. struct vmw_compat_shader *compat;
  603. u32 handle;
  604. int ret;
  605. if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
  606. return -EINVAL;
  607. /* Allocate and pin a DMA buffer */
  608. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  609. if (unlikely(buf == NULL))
  610. return -ENOMEM;
  611. ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
  612. true, vmw_dmabuf_bo_free);
  613. if (unlikely(ret != 0))
  614. goto out;
  615. ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
  616. if (unlikely(ret != 0))
  617. goto no_reserve;
  618. /* Map and copy shader bytecode. */
  619. ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
  620. &map);
  621. if (unlikely(ret != 0)) {
  622. ttm_bo_unreserve(&buf->base);
  623. goto no_reserve;
  624. }
  625. memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
  626. WARN_ON(is_iomem);
  627. ttm_bo_kunmap(&map);
  628. ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
  629. WARN_ON(ret != 0);
  630. ttm_bo_unreserve(&buf->base);
  631. /* Create a guest-backed shader container backed by the dma buffer */
  632. ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
  633. tfile, &handle);
  634. vmw_dmabuf_unreference(&buf);
  635. if (unlikely(ret != 0))
  636. goto no_reserve;
  637. /*
  638. * Create a compat shader structure and stage it for insertion
  639. * in the manager
  640. */
  641. compat = kzalloc(sizeof(*compat), GFP_KERNEL);
  642. if (compat == NULL)
  643. goto no_compat;
  644. compat->hash.key = user_key | (shader_type << 24);
  645. ret = drm_ht_insert_item(&man->shaders, &compat->hash);
  646. if (unlikely(ret != 0))
  647. goto out_invalid_key;
  648. compat->state = VMW_COMPAT_ADD;
  649. compat->handle = handle;
  650. compat->tfile = tfile;
  651. list_add_tail(&compat->head, list);
  652. return 0;
  653. out_invalid_key:
  654. kfree(compat);
  655. no_compat:
  656. ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
  657. no_reserve:
  658. out:
  659. return ret;
  660. }
  661. /**
  662. * vmw_compat_shader_man_create - Create a compat shader manager
  663. *
  664. * @dev_priv: Pointer to a device private structure.
  665. *
  666. * Typically done at file open time. If successful returns a pointer to a
  667. * compat shader manager. Otherwise returns an error pointer.
  668. */
  669. struct vmw_compat_shader_manager *
  670. vmw_compat_shader_man_create(struct vmw_private *dev_priv)
  671. {
  672. struct vmw_compat_shader_manager *man;
  673. int ret;
  674. man = kzalloc(sizeof(*man), GFP_KERNEL);
  675. if (man == NULL)
  676. return ERR_PTR(-ENOMEM);
  677. man->dev_priv = dev_priv;
  678. INIT_LIST_HEAD(&man->list);
  679. ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
  680. if (ret == 0)
  681. return man;
  682. kfree(man);
  683. return ERR_PTR(ret);
  684. }
  685. /**
  686. * vmw_compat_shader_man_destroy - Destroy a compat shader manager
  687. *
  688. * @man: Pointer to the shader manager to destroy.
  689. *
  690. * Typically done at file close time.
  691. */
  692. void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
  693. {
  694. struct vmw_compat_shader *entry, *next;
  695. mutex_lock(&man->dev_priv->cmdbuf_mutex);
  696. list_for_each_entry_safe(entry, next, &man->list, head)
  697. vmw_compat_shader_free(man, entry);
  698. mutex_unlock(&man->dev_priv->cmdbuf_mutex);
  699. kfree(man);
  700. }