vmwgfx_surface.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include <ttm/ttm_placement.h>
  30. #include "svga3d_surfacedefs.h"
  31. /**
  32. * struct vmw_user_surface - User-space visible surface resource
  33. *
  34. * @base: The TTM base object handling user-space visibility.
  35. * @srf: The surface metadata.
  36. * @size: TTM accounting size for the surface.
  37. */
  38. struct vmw_user_surface {
  39. struct ttm_prime_object prime;
  40. struct vmw_surface srf;
  41. uint32_t size;
  42. };
  43. /**
  44. * struct vmw_surface_offset - Backing store mip level offset info
  45. *
  46. * @face: Surface face.
  47. * @mip: Mip level.
  48. * @bo_offset: Offset into backing store of this mip level.
  49. *
  50. */
  51. struct vmw_surface_offset {
  52. uint32_t face;
  53. uint32_t mip;
  54. uint32_t bo_offset;
  55. };
  56. static void vmw_user_surface_free(struct vmw_resource *res);
  57. static struct vmw_resource *
  58. vmw_user_surface_base_to_res(struct ttm_base_object *base);
  59. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  60. struct ttm_validate_buffer *val_buf);
  61. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  62. bool readback,
  63. struct ttm_validate_buffer *val_buf);
  64. static int vmw_legacy_srf_create(struct vmw_resource *res);
  65. static int vmw_legacy_srf_destroy(struct vmw_resource *res);
  66. static int vmw_gb_surface_create(struct vmw_resource *res);
  67. static int vmw_gb_surface_bind(struct vmw_resource *res,
  68. struct ttm_validate_buffer *val_buf);
  69. static int vmw_gb_surface_unbind(struct vmw_resource *res,
  70. bool readback,
  71. struct ttm_validate_buffer *val_buf);
  72. static int vmw_gb_surface_destroy(struct vmw_resource *res);
  73. static const struct vmw_user_resource_conv user_surface_conv = {
  74. .object_type = VMW_RES_SURFACE,
  75. .base_obj_to_res = vmw_user_surface_base_to_res,
  76. .res_free = vmw_user_surface_free
  77. };
  78. const struct vmw_user_resource_conv *user_surface_converter =
  79. &user_surface_conv;
  80. static uint64_t vmw_user_surface_size;
  81. static const struct vmw_res_func vmw_legacy_surface_func = {
  82. .res_type = vmw_res_surface,
  83. .needs_backup = false,
  84. .may_evict = true,
  85. .type_name = "legacy surfaces",
  86. .backup_placement = &vmw_srf_placement,
  87. .create = &vmw_legacy_srf_create,
  88. .destroy = &vmw_legacy_srf_destroy,
  89. .bind = &vmw_legacy_srf_bind,
  90. .unbind = &vmw_legacy_srf_unbind
  91. };
  92. static const struct vmw_res_func vmw_gb_surface_func = {
  93. .res_type = vmw_res_surface,
  94. .needs_backup = true,
  95. .may_evict = true,
  96. .type_name = "guest backed surfaces",
  97. .backup_placement = &vmw_mob_placement,
  98. .create = vmw_gb_surface_create,
  99. .destroy = vmw_gb_surface_destroy,
  100. .bind = vmw_gb_surface_bind,
  101. .unbind = vmw_gb_surface_unbind
  102. };
  103. /**
  104. * struct vmw_surface_dma - SVGA3D DMA command
  105. */
  106. struct vmw_surface_dma {
  107. SVGA3dCmdHeader header;
  108. SVGA3dCmdSurfaceDMA body;
  109. SVGA3dCopyBox cb;
  110. SVGA3dCmdSurfaceDMASuffix suffix;
  111. };
  112. /**
  113. * struct vmw_surface_define - SVGA3D Surface Define command
  114. */
  115. struct vmw_surface_define {
  116. SVGA3dCmdHeader header;
  117. SVGA3dCmdDefineSurface body;
  118. };
  119. /**
  120. * struct vmw_surface_destroy - SVGA3D Surface Destroy command
  121. */
  122. struct vmw_surface_destroy {
  123. SVGA3dCmdHeader header;
  124. SVGA3dCmdDestroySurface body;
  125. };
  126. /**
  127. * vmw_surface_dma_size - Compute fifo size for a dma command.
  128. *
  129. * @srf: Pointer to a struct vmw_surface
  130. *
  131. * Computes the required size for a surface dma command for backup or
  132. * restoration of the surface represented by @srf.
  133. */
  134. static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
  135. {
  136. return srf->num_sizes * sizeof(struct vmw_surface_dma);
  137. }
  138. /**
  139. * vmw_surface_define_size - Compute fifo size for a surface define command.
  140. *
  141. * @srf: Pointer to a struct vmw_surface
  142. *
  143. * Computes the required size for a surface define command for the definition
  144. * of the surface represented by @srf.
  145. */
  146. static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
  147. {
  148. return sizeof(struct vmw_surface_define) + srf->num_sizes *
  149. sizeof(SVGA3dSize);
  150. }
  151. /**
  152. * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
  153. *
  154. * Computes the required size for a surface destroy command for the destruction
  155. * of a hw surface.
  156. */
  157. static inline uint32_t vmw_surface_destroy_size(void)
  158. {
  159. return sizeof(struct vmw_surface_destroy);
  160. }
  161. /**
  162. * vmw_surface_destroy_encode - Encode a surface_destroy command.
  163. *
  164. * @id: The surface id
  165. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  166. */
  167. static void vmw_surface_destroy_encode(uint32_t id,
  168. void *cmd_space)
  169. {
  170. struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
  171. cmd_space;
  172. cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
  173. cmd->header.size = sizeof(cmd->body);
  174. cmd->body.sid = id;
  175. }
  176. /**
  177. * vmw_surface_define_encode - Encode a surface_define command.
  178. *
  179. * @srf: Pointer to a struct vmw_surface object.
  180. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  181. */
  182. static void vmw_surface_define_encode(const struct vmw_surface *srf,
  183. void *cmd_space)
  184. {
  185. struct vmw_surface_define *cmd = (struct vmw_surface_define *)
  186. cmd_space;
  187. struct drm_vmw_size *src_size;
  188. SVGA3dSize *cmd_size;
  189. uint32_t cmd_len;
  190. int i;
  191. cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  192. cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
  193. cmd->header.size = cmd_len;
  194. cmd->body.sid = srf->res.id;
  195. cmd->body.surfaceFlags = srf->flags;
  196. cmd->body.format = cpu_to_le32(srf->format);
  197. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  198. cmd->body.face[i].numMipLevels = srf->mip_levels[i];
  199. cmd += 1;
  200. cmd_size = (SVGA3dSize *) cmd;
  201. src_size = srf->sizes;
  202. for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  203. cmd_size->width = src_size->width;
  204. cmd_size->height = src_size->height;
  205. cmd_size->depth = src_size->depth;
  206. }
  207. }
  208. /**
  209. * vmw_surface_dma_encode - Encode a surface_dma command.
  210. *
  211. * @srf: Pointer to a struct vmw_surface object.
  212. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  213. * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
  214. * should be placed or read from.
  215. * @to_surface: Boolean whether to DMA to the surface or from the surface.
  216. */
  217. static void vmw_surface_dma_encode(struct vmw_surface *srf,
  218. void *cmd_space,
  219. const SVGAGuestPtr *ptr,
  220. bool to_surface)
  221. {
  222. uint32_t i;
  223. struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
  224. const struct svga3d_surface_desc *desc =
  225. svga3dsurface_get_desc(srf->format);
  226. for (i = 0; i < srf->num_sizes; ++i) {
  227. SVGA3dCmdHeader *header = &cmd->header;
  228. SVGA3dCmdSurfaceDMA *body = &cmd->body;
  229. SVGA3dCopyBox *cb = &cmd->cb;
  230. SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
  231. const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
  232. const struct drm_vmw_size *cur_size = &srf->sizes[i];
  233. header->id = SVGA_3D_CMD_SURFACE_DMA;
  234. header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
  235. body->guest.ptr = *ptr;
  236. body->guest.ptr.offset += cur_offset->bo_offset;
  237. body->guest.pitch = svga3dsurface_calculate_pitch(desc,
  238. cur_size);
  239. body->host.sid = srf->res.id;
  240. body->host.face = cur_offset->face;
  241. body->host.mipmap = cur_offset->mip;
  242. body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
  243. SVGA3D_READ_HOST_VRAM);
  244. cb->x = 0;
  245. cb->y = 0;
  246. cb->z = 0;
  247. cb->srcx = 0;
  248. cb->srcy = 0;
  249. cb->srcz = 0;
  250. cb->w = cur_size->width;
  251. cb->h = cur_size->height;
  252. cb->d = cur_size->depth;
  253. suffix->suffixSize = sizeof(*suffix);
  254. suffix->maximumOffset =
  255. svga3dsurface_get_image_buffer_size(desc, cur_size,
  256. body->guest.pitch);
  257. suffix->flags.discard = 0;
  258. suffix->flags.unsynchronized = 0;
  259. suffix->flags.reserved = 0;
  260. ++cmd;
  261. }
  262. };
  263. /**
  264. * vmw_hw_surface_destroy - destroy a Device surface
  265. *
  266. * @res: Pointer to a struct vmw_resource embedded in a struct
  267. * vmw_surface.
  268. *
  269. * Destroys a the device surface associated with a struct vmw_surface if
  270. * any, and adjusts accounting and resource count accordingly.
  271. */
  272. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  273. {
  274. struct vmw_private *dev_priv = res->dev_priv;
  275. struct vmw_surface *srf;
  276. void *cmd;
  277. if (res->func->destroy == vmw_gb_surface_destroy) {
  278. (void) vmw_gb_surface_destroy(res);
  279. return;
  280. }
  281. if (res->id != -1) {
  282. cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
  283. if (unlikely(cmd == NULL)) {
  284. DRM_ERROR("Failed reserving FIFO space for surface "
  285. "destruction.\n");
  286. return;
  287. }
  288. vmw_surface_destroy_encode(res->id, cmd);
  289. vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
  290. /*
  291. * used_memory_size_atomic, or separate lock
  292. * to avoid taking dev_priv::cmdbuf_mutex in
  293. * the destroy path.
  294. */
  295. mutex_lock(&dev_priv->cmdbuf_mutex);
  296. srf = vmw_res_to_srf(res);
  297. dev_priv->used_memory_size -= res->backup_size;
  298. mutex_unlock(&dev_priv->cmdbuf_mutex);
  299. }
  300. vmw_3d_resource_dec(dev_priv, false);
  301. }
  302. /**
  303. * vmw_legacy_srf_create - Create a device surface as part of the
  304. * resource validation process.
  305. *
  306. * @res: Pointer to a struct vmw_surface.
  307. *
  308. * If the surface doesn't have a hw id.
  309. *
  310. * Returns -EBUSY if there wasn't sufficient device resources to
  311. * complete the validation. Retry after freeing up resources.
  312. *
  313. * May return other errors if the kernel is out of guest resources.
  314. */
  315. static int vmw_legacy_srf_create(struct vmw_resource *res)
  316. {
  317. struct vmw_private *dev_priv = res->dev_priv;
  318. struct vmw_surface *srf;
  319. uint32_t submit_size;
  320. uint8_t *cmd;
  321. int ret;
  322. if (likely(res->id != -1))
  323. return 0;
  324. srf = vmw_res_to_srf(res);
  325. if (unlikely(dev_priv->used_memory_size + res->backup_size >=
  326. dev_priv->memory_size))
  327. return -EBUSY;
  328. /*
  329. * Alloc id for the resource.
  330. */
  331. ret = vmw_resource_alloc_id(res);
  332. if (unlikely(ret != 0)) {
  333. DRM_ERROR("Failed to allocate a surface id.\n");
  334. goto out_no_id;
  335. }
  336. if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
  337. ret = -EBUSY;
  338. goto out_no_fifo;
  339. }
  340. /*
  341. * Encode surface define- commands.
  342. */
  343. submit_size = vmw_surface_define_size(srf);
  344. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  345. if (unlikely(cmd == NULL)) {
  346. DRM_ERROR("Failed reserving FIFO space for surface "
  347. "creation.\n");
  348. ret = -ENOMEM;
  349. goto out_no_fifo;
  350. }
  351. vmw_surface_define_encode(srf, cmd);
  352. vmw_fifo_commit(dev_priv, submit_size);
  353. /*
  354. * Surface memory usage accounting.
  355. */
  356. dev_priv->used_memory_size += res->backup_size;
  357. return 0;
  358. out_no_fifo:
  359. vmw_resource_release_id(res);
  360. out_no_id:
  361. return ret;
  362. }
  363. /**
  364. * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
  365. *
  366. * @res: Pointer to a struct vmw_res embedded in a struct
  367. * vmw_surface.
  368. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  369. * information about the backup buffer.
  370. * @bind: Boolean wether to DMA to the surface.
  371. *
  372. * Transfer backup data to or from a legacy surface as part of the
  373. * validation process.
  374. * May return other errors if the kernel is out of guest resources.
  375. * The backup buffer will be fenced or idle upon successful completion,
  376. * and if the surface needs persistent backup storage, the backup buffer
  377. * will also be returned reserved iff @bind is true.
  378. */
  379. static int vmw_legacy_srf_dma(struct vmw_resource *res,
  380. struct ttm_validate_buffer *val_buf,
  381. bool bind)
  382. {
  383. SVGAGuestPtr ptr;
  384. struct vmw_fence_obj *fence;
  385. uint32_t submit_size;
  386. struct vmw_surface *srf = vmw_res_to_srf(res);
  387. uint8_t *cmd;
  388. struct vmw_private *dev_priv = res->dev_priv;
  389. BUG_ON(val_buf->bo == NULL);
  390. submit_size = vmw_surface_dma_size(srf);
  391. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  392. if (unlikely(cmd == NULL)) {
  393. DRM_ERROR("Failed reserving FIFO space for surface "
  394. "DMA.\n");
  395. return -ENOMEM;
  396. }
  397. vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
  398. vmw_surface_dma_encode(srf, cmd, &ptr, bind);
  399. vmw_fifo_commit(dev_priv, submit_size);
  400. /*
  401. * Create a fence object and fence the backup buffer.
  402. */
  403. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  404. &fence, NULL);
  405. vmw_fence_single_bo(val_buf->bo, fence);
  406. if (likely(fence != NULL))
  407. vmw_fence_obj_unreference(&fence);
  408. return 0;
  409. }
  410. /**
  411. * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
  412. * surface validation process.
  413. *
  414. * @res: Pointer to a struct vmw_res embedded in a struct
  415. * vmw_surface.
  416. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  417. * information about the backup buffer.
  418. *
  419. * This function will copy backup data to the surface if the
  420. * backup buffer is dirty.
  421. */
  422. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  423. struct ttm_validate_buffer *val_buf)
  424. {
  425. if (!res->backup_dirty)
  426. return 0;
  427. return vmw_legacy_srf_dma(res, val_buf, true);
  428. }
  429. /**
  430. * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
  431. * surface eviction process.
  432. *
  433. * @res: Pointer to a struct vmw_res embedded in a struct
  434. * vmw_surface.
  435. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  436. * information about the backup buffer.
  437. *
  438. * This function will copy backup data from the surface.
  439. */
  440. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  441. bool readback,
  442. struct ttm_validate_buffer *val_buf)
  443. {
  444. if (unlikely(readback))
  445. return vmw_legacy_srf_dma(res, val_buf, false);
  446. return 0;
  447. }
  448. /**
  449. * vmw_legacy_srf_destroy - Destroy a device surface as part of a
  450. * resource eviction process.
  451. *
  452. * @res: Pointer to a struct vmw_res embedded in a struct
  453. * vmw_surface.
  454. */
  455. static int vmw_legacy_srf_destroy(struct vmw_resource *res)
  456. {
  457. struct vmw_private *dev_priv = res->dev_priv;
  458. uint32_t submit_size;
  459. uint8_t *cmd;
  460. BUG_ON(res->id == -1);
  461. /*
  462. * Encode the dma- and surface destroy commands.
  463. */
  464. submit_size = vmw_surface_destroy_size();
  465. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  466. if (unlikely(cmd == NULL)) {
  467. DRM_ERROR("Failed reserving FIFO space for surface "
  468. "eviction.\n");
  469. return -ENOMEM;
  470. }
  471. vmw_surface_destroy_encode(res->id, cmd);
  472. vmw_fifo_commit(dev_priv, submit_size);
  473. /*
  474. * Surface memory usage accounting.
  475. */
  476. dev_priv->used_memory_size -= res->backup_size;
  477. /*
  478. * Release the surface ID.
  479. */
  480. vmw_resource_release_id(res);
  481. return 0;
  482. }
  483. /**
  484. * vmw_surface_init - initialize a struct vmw_surface
  485. *
  486. * @dev_priv: Pointer to a device private struct.
  487. * @srf: Pointer to the struct vmw_surface to initialize.
  488. * @res_free: Pointer to a resource destructor used to free
  489. * the object.
  490. */
  491. static int vmw_surface_init(struct vmw_private *dev_priv,
  492. struct vmw_surface *srf,
  493. void (*res_free) (struct vmw_resource *res))
  494. {
  495. int ret;
  496. struct vmw_resource *res = &srf->res;
  497. BUG_ON(res_free == NULL);
  498. if (!dev_priv->has_mob)
  499. (void) vmw_3d_resource_inc(dev_priv, false);
  500. ret = vmw_resource_init(dev_priv, res, true, res_free,
  501. (dev_priv->has_mob) ? &vmw_gb_surface_func :
  502. &vmw_legacy_surface_func);
  503. if (unlikely(ret != 0)) {
  504. if (!dev_priv->has_mob)
  505. vmw_3d_resource_dec(dev_priv, false);
  506. res_free(res);
  507. return ret;
  508. }
  509. /*
  510. * The surface won't be visible to hardware until a
  511. * surface validate.
  512. */
  513. vmw_resource_activate(res, vmw_hw_surface_destroy);
  514. return ret;
  515. }
  516. /**
  517. * vmw_user_surface_base_to_res - TTM base object to resource converter for
  518. * user visible surfaces
  519. *
  520. * @base: Pointer to a TTM base object
  521. *
  522. * Returns the struct vmw_resource embedded in a struct vmw_surface
  523. * for the user-visible object identified by the TTM base object @base.
  524. */
  525. static struct vmw_resource *
  526. vmw_user_surface_base_to_res(struct ttm_base_object *base)
  527. {
  528. return &(container_of(base, struct vmw_user_surface,
  529. prime.base)->srf.res);
  530. }
  531. /**
  532. * vmw_user_surface_free - User visible surface resource destructor
  533. *
  534. * @res: A struct vmw_resource embedded in a struct vmw_surface.
  535. */
  536. static void vmw_user_surface_free(struct vmw_resource *res)
  537. {
  538. struct vmw_surface *srf = vmw_res_to_srf(res);
  539. struct vmw_user_surface *user_srf =
  540. container_of(srf, struct vmw_user_surface, srf);
  541. struct vmw_private *dev_priv = srf->res.dev_priv;
  542. uint32_t size = user_srf->size;
  543. kfree(srf->offsets);
  544. kfree(srf->sizes);
  545. kfree(srf->snooper.image);
  546. ttm_prime_object_kfree(user_srf, prime);
  547. ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
  548. }
  549. /**
  550. * vmw_user_surface_free - User visible surface TTM base object destructor
  551. *
  552. * @p_base: Pointer to a pointer to a TTM base object
  553. * embedded in a struct vmw_user_surface.
  554. *
  555. * Drops the base object's reference on its resource, and the
  556. * pointer pointed to by *p_base is set to NULL.
  557. */
  558. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  559. {
  560. struct ttm_base_object *base = *p_base;
  561. struct vmw_user_surface *user_srf =
  562. container_of(base, struct vmw_user_surface, prime.base);
  563. struct vmw_resource *res = &user_srf->srf.res;
  564. *p_base = NULL;
  565. vmw_resource_unreference(&res);
  566. }
  567. /**
  568. * vmw_user_surface_destroy_ioctl - Ioctl function implementing
  569. * the user surface destroy functionality.
  570. *
  571. * @dev: Pointer to a struct drm_device.
  572. * @data: Pointer to data copied from / to user-space.
  573. * @file_priv: Pointer to a drm file private structure.
  574. */
  575. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  576. struct drm_file *file_priv)
  577. {
  578. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  579. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  580. return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
  581. }
  582. /**
  583. * vmw_user_surface_define_ioctl - Ioctl function implementing
  584. * the user surface define functionality.
  585. *
  586. * @dev: Pointer to a struct drm_device.
  587. * @data: Pointer to data copied from / to user-space.
  588. * @file_priv: Pointer to a drm file private structure.
  589. */
  590. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  591. struct drm_file *file_priv)
  592. {
  593. struct vmw_private *dev_priv = vmw_priv(dev);
  594. struct vmw_user_surface *user_srf;
  595. struct vmw_surface *srf;
  596. struct vmw_resource *res;
  597. struct vmw_resource *tmp;
  598. union drm_vmw_surface_create_arg *arg =
  599. (union drm_vmw_surface_create_arg *)data;
  600. struct drm_vmw_surface_create_req *req = &arg->req;
  601. struct drm_vmw_surface_arg *rep = &arg->rep;
  602. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  603. struct drm_vmw_size __user *user_sizes;
  604. int ret;
  605. int i, j;
  606. uint32_t cur_bo_offset;
  607. struct drm_vmw_size *cur_size;
  608. struct vmw_surface_offset *cur_offset;
  609. uint32_t num_sizes;
  610. uint32_t size;
  611. const struct svga3d_surface_desc *desc;
  612. if (unlikely(vmw_user_surface_size == 0))
  613. vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
  614. 128;
  615. num_sizes = 0;
  616. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  617. num_sizes += req->mip_levels[i];
  618. if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  619. DRM_VMW_MAX_MIP_LEVELS)
  620. return -EINVAL;
  621. size = vmw_user_surface_size + 128 +
  622. ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
  623. ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
  624. desc = svga3dsurface_get_desc(req->format);
  625. if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
  626. DRM_ERROR("Invalid surface format for surface creation.\n");
  627. return -EINVAL;
  628. }
  629. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  630. if (unlikely(ret != 0))
  631. return ret;
  632. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  633. size, false, true);
  634. if (unlikely(ret != 0)) {
  635. if (ret != -ERESTARTSYS)
  636. DRM_ERROR("Out of graphics memory for surface"
  637. " creation.\n");
  638. goto out_unlock;
  639. }
  640. user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
  641. if (unlikely(user_srf == NULL)) {
  642. ret = -ENOMEM;
  643. goto out_no_user_srf;
  644. }
  645. srf = &user_srf->srf;
  646. res = &srf->res;
  647. srf->flags = req->flags;
  648. srf->format = req->format;
  649. srf->scanout = req->scanout;
  650. memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  651. srf->num_sizes = num_sizes;
  652. user_srf->size = size;
  653. srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  654. if (unlikely(srf->sizes == NULL)) {
  655. ret = -ENOMEM;
  656. goto out_no_sizes;
  657. }
  658. srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
  659. GFP_KERNEL);
  660. if (unlikely(srf->sizes == NULL)) {
  661. ret = -ENOMEM;
  662. goto out_no_offsets;
  663. }
  664. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  665. req->size_addr;
  666. ret = copy_from_user(srf->sizes, user_sizes,
  667. srf->num_sizes * sizeof(*srf->sizes));
  668. if (unlikely(ret != 0)) {
  669. ret = -EFAULT;
  670. goto out_no_copy;
  671. }
  672. srf->base_size = *srf->sizes;
  673. srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
  674. srf->multisample_count = 0;
  675. cur_bo_offset = 0;
  676. cur_offset = srf->offsets;
  677. cur_size = srf->sizes;
  678. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  679. for (j = 0; j < srf->mip_levels[i]; ++j) {
  680. uint32_t stride = svga3dsurface_calculate_pitch
  681. (desc, cur_size);
  682. cur_offset->face = i;
  683. cur_offset->mip = j;
  684. cur_offset->bo_offset = cur_bo_offset;
  685. cur_bo_offset += svga3dsurface_get_image_buffer_size
  686. (desc, cur_size, stride);
  687. ++cur_offset;
  688. ++cur_size;
  689. }
  690. }
  691. res->backup_size = cur_bo_offset;
  692. if (srf->scanout &&
  693. srf->num_sizes == 1 &&
  694. srf->sizes[0].width == 64 &&
  695. srf->sizes[0].height == 64 &&
  696. srf->format == SVGA3D_A8R8G8B8) {
  697. srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
  698. /* clear the image */
  699. if (srf->snooper.image) {
  700. memset(srf->snooper.image, 0x00, 64 * 64 * 4);
  701. } else {
  702. DRM_ERROR("Failed to allocate cursor_image\n");
  703. ret = -ENOMEM;
  704. goto out_no_copy;
  705. }
  706. } else {
  707. srf->snooper.image = NULL;
  708. }
  709. srf->snooper.crtc = NULL;
  710. user_srf->prime.base.shareable = false;
  711. user_srf->prime.base.tfile = NULL;
  712. /**
  713. * From this point, the generic resource management functions
  714. * destroy the object on failure.
  715. */
  716. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  717. if (unlikely(ret != 0))
  718. goto out_unlock;
  719. /*
  720. * A gb-aware client referencing a shared surface will
  721. * expect a backup buffer to be present.
  722. */
  723. if (dev_priv->has_mob && req->shareable) {
  724. uint32_t backup_handle;
  725. ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
  726. res->backup_size,
  727. true,
  728. &backup_handle,
  729. &res->backup);
  730. if (unlikely(ret != 0)) {
  731. vmw_resource_unreference(&res);
  732. goto out_unlock;
  733. }
  734. }
  735. tmp = vmw_resource_reference(&srf->res);
  736. ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
  737. req->shareable, VMW_RES_SURFACE,
  738. &vmw_user_surface_base_release, NULL);
  739. if (unlikely(ret != 0)) {
  740. vmw_resource_unreference(&tmp);
  741. vmw_resource_unreference(&res);
  742. goto out_unlock;
  743. }
  744. rep->sid = user_srf->prime.base.hash.key;
  745. vmw_resource_unreference(&res);
  746. ttm_read_unlock(&dev_priv->reservation_sem);
  747. return 0;
  748. out_no_copy:
  749. kfree(srf->offsets);
  750. out_no_offsets:
  751. kfree(srf->sizes);
  752. out_no_sizes:
  753. ttm_prime_object_kfree(user_srf, prime);
  754. out_no_user_srf:
  755. ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
  756. out_unlock:
  757. ttm_read_unlock(&dev_priv->reservation_sem);
  758. return ret;
  759. }
  760. static int
  761. vmw_surface_handle_reference(struct vmw_private *dev_priv,
  762. struct drm_file *file_priv,
  763. uint32_t u_handle,
  764. enum drm_vmw_handle_type handle_type,
  765. struct ttm_base_object **base_p)
  766. {
  767. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  768. uint32_t handle;
  769. struct ttm_base_object *base;
  770. int ret;
  771. if (handle_type == DRM_VMW_HANDLE_PRIME) {
  772. ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
  773. if (unlikely(ret != 0))
  774. return ret;
  775. } else {
  776. if (unlikely(drm_is_render_client(file_priv))) {
  777. DRM_ERROR("Render client refused legacy "
  778. "surface reference.\n");
  779. return -EACCES;
  780. }
  781. handle = u_handle;
  782. }
  783. ret = -EINVAL;
  784. base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
  785. if (unlikely(base == NULL)) {
  786. DRM_ERROR("Could not find surface to reference.\n");
  787. goto out_no_lookup;
  788. }
  789. if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
  790. DRM_ERROR("Referenced object is not a surface.\n");
  791. goto out_bad_resource;
  792. }
  793. if (handle_type != DRM_VMW_HANDLE_PRIME) {
  794. ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
  795. if (unlikely(ret != 0)) {
  796. DRM_ERROR("Could not add a reference to a surface.\n");
  797. goto out_bad_resource;
  798. }
  799. }
  800. *base_p = base;
  801. return 0;
  802. out_bad_resource:
  803. ttm_base_object_unref(&base);
  804. out_no_lookup:
  805. if (handle_type == DRM_VMW_HANDLE_PRIME)
  806. (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
  807. return ret;
  808. }
  809. /**
  810. * vmw_user_surface_define_ioctl - Ioctl function implementing
  811. * the user surface reference functionality.
  812. *
  813. * @dev: Pointer to a struct drm_device.
  814. * @data: Pointer to data copied from / to user-space.
  815. * @file_priv: Pointer to a drm file private structure.
  816. */
  817. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  818. struct drm_file *file_priv)
  819. {
  820. struct vmw_private *dev_priv = vmw_priv(dev);
  821. union drm_vmw_surface_reference_arg *arg =
  822. (union drm_vmw_surface_reference_arg *)data;
  823. struct drm_vmw_surface_arg *req = &arg->req;
  824. struct drm_vmw_surface_create_req *rep = &arg->rep;
  825. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  826. struct vmw_surface *srf;
  827. struct vmw_user_surface *user_srf;
  828. struct drm_vmw_size __user *user_sizes;
  829. struct ttm_base_object *base;
  830. int ret;
  831. ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
  832. req->handle_type, &base);
  833. if (unlikely(ret != 0))
  834. return ret;
  835. user_srf = container_of(base, struct vmw_user_surface, prime.base);
  836. srf = &user_srf->srf;
  837. rep->flags = srf->flags;
  838. rep->format = srf->format;
  839. memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  840. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  841. rep->size_addr;
  842. if (user_sizes)
  843. ret = copy_to_user(user_sizes, &srf->base_size,
  844. sizeof(srf->base_size));
  845. if (unlikely(ret != 0)) {
  846. DRM_ERROR("copy_to_user failed %p %u\n",
  847. user_sizes, srf->num_sizes);
  848. ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
  849. ret = -EFAULT;
  850. }
  851. ttm_base_object_unref(&base);
  852. return ret;
  853. }
  854. /**
  855. * vmw_surface_define_encode - Encode a surface_define command.
  856. *
  857. * @srf: Pointer to a struct vmw_surface object.
  858. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  859. */
  860. static int vmw_gb_surface_create(struct vmw_resource *res)
  861. {
  862. struct vmw_private *dev_priv = res->dev_priv;
  863. struct vmw_surface *srf = vmw_res_to_srf(res);
  864. uint32_t cmd_len, submit_len;
  865. int ret;
  866. struct {
  867. SVGA3dCmdHeader header;
  868. SVGA3dCmdDefineGBSurface body;
  869. } *cmd;
  870. if (likely(res->id != -1))
  871. return 0;
  872. (void) vmw_3d_resource_inc(dev_priv, false);
  873. ret = vmw_resource_alloc_id(res);
  874. if (unlikely(ret != 0)) {
  875. DRM_ERROR("Failed to allocate a surface id.\n");
  876. goto out_no_id;
  877. }
  878. if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
  879. ret = -EBUSY;
  880. goto out_no_fifo;
  881. }
  882. cmd_len = sizeof(cmd->body);
  883. submit_len = sizeof(*cmd);
  884. cmd = vmw_fifo_reserve(dev_priv, submit_len);
  885. if (unlikely(cmd == NULL)) {
  886. DRM_ERROR("Failed reserving FIFO space for surface "
  887. "creation.\n");
  888. ret = -ENOMEM;
  889. goto out_no_fifo;
  890. }
  891. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
  892. cmd->header.size = cmd_len;
  893. cmd->body.sid = srf->res.id;
  894. cmd->body.surfaceFlags = srf->flags;
  895. cmd->body.format = cpu_to_le32(srf->format);
  896. cmd->body.numMipLevels = srf->mip_levels[0];
  897. cmd->body.multisampleCount = srf->multisample_count;
  898. cmd->body.autogenFilter = srf->autogen_filter;
  899. cmd->body.size.width = srf->base_size.width;
  900. cmd->body.size.height = srf->base_size.height;
  901. cmd->body.size.depth = srf->base_size.depth;
  902. vmw_fifo_commit(dev_priv, submit_len);
  903. return 0;
  904. out_no_fifo:
  905. vmw_resource_release_id(res);
  906. out_no_id:
  907. vmw_3d_resource_dec(dev_priv, false);
  908. return ret;
  909. }
  910. static int vmw_gb_surface_bind(struct vmw_resource *res,
  911. struct ttm_validate_buffer *val_buf)
  912. {
  913. struct vmw_private *dev_priv = res->dev_priv;
  914. struct {
  915. SVGA3dCmdHeader header;
  916. SVGA3dCmdBindGBSurface body;
  917. } *cmd1;
  918. struct {
  919. SVGA3dCmdHeader header;
  920. SVGA3dCmdUpdateGBSurface body;
  921. } *cmd2;
  922. uint32_t submit_size;
  923. struct ttm_buffer_object *bo = val_buf->bo;
  924. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  925. submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
  926. cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
  927. if (unlikely(cmd1 == NULL)) {
  928. DRM_ERROR("Failed reserving FIFO space for surface "
  929. "binding.\n");
  930. return -ENOMEM;
  931. }
  932. cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
  933. cmd1->header.size = sizeof(cmd1->body);
  934. cmd1->body.sid = res->id;
  935. cmd1->body.mobid = bo->mem.start;
  936. if (res->backup_dirty) {
  937. cmd2 = (void *) &cmd1[1];
  938. cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
  939. cmd2->header.size = sizeof(cmd2->body);
  940. cmd2->body.sid = res->id;
  941. res->backup_dirty = false;
  942. }
  943. vmw_fifo_commit(dev_priv, submit_size);
  944. return 0;
  945. }
  946. static int vmw_gb_surface_unbind(struct vmw_resource *res,
  947. bool readback,
  948. struct ttm_validate_buffer *val_buf)
  949. {
  950. struct vmw_private *dev_priv = res->dev_priv;
  951. struct ttm_buffer_object *bo = val_buf->bo;
  952. struct vmw_fence_obj *fence;
  953. struct {
  954. SVGA3dCmdHeader header;
  955. SVGA3dCmdReadbackGBSurface body;
  956. } *cmd1;
  957. struct {
  958. SVGA3dCmdHeader header;
  959. SVGA3dCmdInvalidateGBSurface body;
  960. } *cmd2;
  961. struct {
  962. SVGA3dCmdHeader header;
  963. SVGA3dCmdBindGBSurface body;
  964. } *cmd3;
  965. uint32_t submit_size;
  966. uint8_t *cmd;
  967. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  968. submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
  969. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  970. if (unlikely(cmd == NULL)) {
  971. DRM_ERROR("Failed reserving FIFO space for surface "
  972. "unbinding.\n");
  973. return -ENOMEM;
  974. }
  975. if (readback) {
  976. cmd1 = (void *) cmd;
  977. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
  978. cmd1->header.size = sizeof(cmd1->body);
  979. cmd1->body.sid = res->id;
  980. cmd3 = (void *) &cmd1[1];
  981. } else {
  982. cmd2 = (void *) cmd;
  983. cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
  984. cmd2->header.size = sizeof(cmd2->body);
  985. cmd2->body.sid = res->id;
  986. cmd3 = (void *) &cmd2[1];
  987. }
  988. cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
  989. cmd3->header.size = sizeof(cmd3->body);
  990. cmd3->body.sid = res->id;
  991. cmd3->body.mobid = SVGA3D_INVALID_ID;
  992. vmw_fifo_commit(dev_priv, submit_size);
  993. /*
  994. * Create a fence object and fence the backup buffer.
  995. */
  996. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  997. &fence, NULL);
  998. vmw_fence_single_bo(val_buf->bo, fence);
  999. if (likely(fence != NULL))
  1000. vmw_fence_obj_unreference(&fence);
  1001. return 0;
  1002. }
  1003. static int vmw_gb_surface_destroy(struct vmw_resource *res)
  1004. {
  1005. struct vmw_private *dev_priv = res->dev_priv;
  1006. struct {
  1007. SVGA3dCmdHeader header;
  1008. SVGA3dCmdDestroyGBSurface body;
  1009. } *cmd;
  1010. if (likely(res->id == -1))
  1011. return 0;
  1012. mutex_lock(&dev_priv->binding_mutex);
  1013. vmw_context_binding_res_list_scrub(&res->binding_head);
  1014. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  1015. if (unlikely(cmd == NULL)) {
  1016. DRM_ERROR("Failed reserving FIFO space for surface "
  1017. "destruction.\n");
  1018. mutex_unlock(&dev_priv->binding_mutex);
  1019. return -ENOMEM;
  1020. }
  1021. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
  1022. cmd->header.size = sizeof(cmd->body);
  1023. cmd->body.sid = res->id;
  1024. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  1025. mutex_unlock(&dev_priv->binding_mutex);
  1026. vmw_resource_release_id(res);
  1027. vmw_3d_resource_dec(dev_priv, false);
  1028. return 0;
  1029. }
  1030. /**
  1031. * vmw_gb_surface_define_ioctl - Ioctl function implementing
  1032. * the user surface define functionality.
  1033. *
  1034. * @dev: Pointer to a struct drm_device.
  1035. * @data: Pointer to data copied from / to user-space.
  1036. * @file_priv: Pointer to a drm file private structure.
  1037. */
  1038. int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
  1039. struct drm_file *file_priv)
  1040. {
  1041. struct vmw_private *dev_priv = vmw_priv(dev);
  1042. struct vmw_user_surface *user_srf;
  1043. struct vmw_surface *srf;
  1044. struct vmw_resource *res;
  1045. struct vmw_resource *tmp;
  1046. union drm_vmw_gb_surface_create_arg *arg =
  1047. (union drm_vmw_gb_surface_create_arg *)data;
  1048. struct drm_vmw_gb_surface_create_req *req = &arg->req;
  1049. struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
  1050. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1051. int ret;
  1052. uint32_t size;
  1053. const struct svga3d_surface_desc *desc;
  1054. uint32_t backup_handle;
  1055. if (unlikely(vmw_user_surface_size == 0))
  1056. vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
  1057. 128;
  1058. size = vmw_user_surface_size + 128;
  1059. desc = svga3dsurface_get_desc(req->format);
  1060. if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
  1061. DRM_ERROR("Invalid surface format for surface creation.\n");
  1062. return -EINVAL;
  1063. }
  1064. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  1065. if (unlikely(ret != 0))
  1066. return ret;
  1067. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  1068. size, false, true);
  1069. if (unlikely(ret != 0)) {
  1070. if (ret != -ERESTARTSYS)
  1071. DRM_ERROR("Out of graphics memory for surface"
  1072. " creation.\n");
  1073. goto out_unlock;
  1074. }
  1075. user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
  1076. if (unlikely(user_srf == NULL)) {
  1077. ret = -ENOMEM;
  1078. goto out_no_user_srf;
  1079. }
  1080. srf = &user_srf->srf;
  1081. res = &srf->res;
  1082. srf->flags = req->svga3d_flags;
  1083. srf->format = req->format;
  1084. srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
  1085. srf->mip_levels[0] = req->mip_levels;
  1086. srf->num_sizes = 1;
  1087. srf->sizes = NULL;
  1088. srf->offsets = NULL;
  1089. user_srf->size = size;
  1090. srf->base_size = req->base_size;
  1091. srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
  1092. srf->multisample_count = req->multisample_count;
  1093. res->backup_size = svga3dsurface_get_serialized_size
  1094. (srf->format, srf->base_size, srf->mip_levels[0],
  1095. srf->flags & SVGA3D_SURFACE_CUBEMAP);
  1096. user_srf->prime.base.shareable = false;
  1097. user_srf->prime.base.tfile = NULL;
  1098. /**
  1099. * From this point, the generic resource management functions
  1100. * destroy the object on failure.
  1101. */
  1102. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  1103. if (unlikely(ret != 0))
  1104. goto out_unlock;
  1105. if (req->buffer_handle != SVGA3D_INVALID_ID) {
  1106. ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
  1107. &res->backup);
  1108. } else if (req->drm_surface_flags &
  1109. drm_vmw_surface_flag_create_buffer)
  1110. ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
  1111. res->backup_size,
  1112. req->drm_surface_flags &
  1113. drm_vmw_surface_flag_shareable,
  1114. &backup_handle,
  1115. &res->backup);
  1116. if (unlikely(ret != 0)) {
  1117. vmw_resource_unreference(&res);
  1118. goto out_unlock;
  1119. }
  1120. tmp = vmw_resource_reference(&srf->res);
  1121. ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
  1122. req->drm_surface_flags &
  1123. drm_vmw_surface_flag_shareable,
  1124. VMW_RES_SURFACE,
  1125. &vmw_user_surface_base_release, NULL);
  1126. if (unlikely(ret != 0)) {
  1127. vmw_resource_unreference(&tmp);
  1128. vmw_resource_unreference(&res);
  1129. goto out_unlock;
  1130. }
  1131. rep->handle = user_srf->prime.base.hash.key;
  1132. rep->backup_size = res->backup_size;
  1133. if (res->backup) {
  1134. rep->buffer_map_handle =
  1135. drm_vma_node_offset_addr(&res->backup->base.vma_node);
  1136. rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
  1137. rep->buffer_handle = backup_handle;
  1138. } else {
  1139. rep->buffer_map_handle = 0;
  1140. rep->buffer_size = 0;
  1141. rep->buffer_handle = SVGA3D_INVALID_ID;
  1142. }
  1143. vmw_resource_unreference(&res);
  1144. ttm_read_unlock(&dev_priv->reservation_sem);
  1145. return 0;
  1146. out_no_user_srf:
  1147. ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
  1148. out_unlock:
  1149. ttm_read_unlock(&dev_priv->reservation_sem);
  1150. return ret;
  1151. }
  1152. /**
  1153. * vmw_gb_surface_reference_ioctl - Ioctl function implementing
  1154. * the user surface reference functionality.
  1155. *
  1156. * @dev: Pointer to a struct drm_device.
  1157. * @data: Pointer to data copied from / to user-space.
  1158. * @file_priv: Pointer to a drm file private structure.
  1159. */
  1160. int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
  1161. struct drm_file *file_priv)
  1162. {
  1163. struct vmw_private *dev_priv = vmw_priv(dev);
  1164. union drm_vmw_gb_surface_reference_arg *arg =
  1165. (union drm_vmw_gb_surface_reference_arg *)data;
  1166. struct drm_vmw_surface_arg *req = &arg->req;
  1167. struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
  1168. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1169. struct vmw_surface *srf;
  1170. struct vmw_user_surface *user_srf;
  1171. struct ttm_base_object *base;
  1172. uint32_t backup_handle;
  1173. int ret = -EINVAL;
  1174. ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
  1175. req->handle_type, &base);
  1176. if (unlikely(ret != 0))
  1177. return ret;
  1178. user_srf = container_of(base, struct vmw_user_surface, prime.base);
  1179. srf = &user_srf->srf;
  1180. if (srf->res.backup == NULL) {
  1181. DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
  1182. goto out_bad_resource;
  1183. }
  1184. mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
  1185. ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
  1186. &backup_handle);
  1187. mutex_unlock(&dev_priv->cmdbuf_mutex);
  1188. if (unlikely(ret != 0)) {
  1189. DRM_ERROR("Could not add a reference to a GB surface "
  1190. "backup buffer.\n");
  1191. (void) ttm_ref_object_base_unref(tfile, base->hash.key,
  1192. TTM_REF_USAGE);
  1193. goto out_bad_resource;
  1194. }
  1195. rep->creq.svga3d_flags = srf->flags;
  1196. rep->creq.format = srf->format;
  1197. rep->creq.mip_levels = srf->mip_levels[0];
  1198. rep->creq.drm_surface_flags = 0;
  1199. rep->creq.multisample_count = srf->multisample_count;
  1200. rep->creq.autogen_filter = srf->autogen_filter;
  1201. rep->creq.buffer_handle = backup_handle;
  1202. rep->creq.base_size = srf->base_size;
  1203. rep->crep.handle = user_srf->prime.base.hash.key;
  1204. rep->crep.backup_size = srf->res.backup_size;
  1205. rep->crep.buffer_handle = backup_handle;
  1206. rep->crep.buffer_map_handle =
  1207. drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
  1208. rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
  1209. out_bad_resource:
  1210. ttm_base_object_unref(&base);
  1211. return ret;
  1212. }