vmwgfx_cmdbuf.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <drm/ttm/ttm_bo_api.h>
  28. #include "vmwgfx_drv.h"
  29. /*
  30. * Size of inline command buffers. Try to make sure that a page size is a
  31. * multiple of the DMA pool allocation size.
  32. */
  33. #define VMW_CMDBUF_INLINE_ALIGN 64
  34. #define VMW_CMDBUF_INLINE_SIZE \
  35. (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  36. /**
  37. * struct vmw_cmdbuf_context - Command buffer context queues
  38. *
  39. * @submitted: List of command buffers that have been submitted to the
  40. * manager but not yet submitted to hardware.
  41. * @hw_submitted: List of command buffers submitted to hardware.
  42. * @preempted: List of preempted command buffers.
  43. * @num_hw_submitted: Number of buffers currently being processed by hardware
  44. */
  45. struct vmw_cmdbuf_context {
  46. struct list_head submitted;
  47. struct list_head hw_submitted;
  48. struct list_head preempted;
  49. unsigned num_hw_submitted;
  50. bool block_submission;
  51. };
  52. /**
  53. * struct vmw_cmdbuf_man: - Command buffer manager
  54. *
  55. * @cur_mutex: Mutex protecting the command buffer used for incremental small
  56. * kernel command submissions, @cur.
  57. * @space_mutex: Mutex to protect against starvation when we allocate
  58. * main pool buffer space.
  59. * @error_mutex: Mutex to serialize the work queue error handling.
  60. * Note this is not needed if the same workqueue handler
  61. * can't race with itself...
  62. * @work: A struct work_struct implementeing command buffer error handling.
  63. * Immutable.
  64. * @dev_priv: Pointer to the device private struct. Immutable.
  65. * @ctx: Array of command buffer context queues. The queues and the context
  66. * data is protected by @lock.
  67. * @error: List of command buffers that have caused device errors.
  68. * Protected by @lock.
  69. * @mm: Range manager for the command buffer space. Manager allocations and
  70. * frees are protected by @lock.
  71. * @cmd_space: Buffer object for the command buffer space, unless we were
  72. * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
  73. * @map_obj: Mapping state for @cmd_space. Immutable.
  74. * @map: Pointer to command buffer space. May be a mapped buffer object or
  75. * a contigous coherent DMA memory allocation. Immutable.
  76. * @cur: Command buffer for small kernel command submissions. Protected by
  77. * the @cur_mutex.
  78. * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  79. * @default_size: Default size for the @cur command buffer. Immutable.
  80. * @max_hw_submitted: Max number of in-flight command buffers the device can
  81. * handle. Immutable.
  82. * @lock: Spinlock protecting command submission queues.
  83. * @header: Pool of DMA memory for device command buffer headers.
  84. * Internal protection.
  85. * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  86. * space for inline data. Internal protection.
  87. * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  88. * space.
  89. * @idle_queue: Wait queue for processes waiting for command buffer idle.
  90. * @irq_on: Whether the process function has requested irq to be turned on.
  91. * Protected by @lock.
  92. * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
  93. * allocation. Immutable.
  94. * @has_pool: Has a large pool of DMA memory which allows larger allocations.
  95. * Typically this is false only during bootstrap.
  96. * @handle: DMA address handle for the command buffer space if @using_mob is
  97. * false. Immutable.
  98. * @size: The size of the command buffer space. Immutable.
  99. * @num_contexts: Number of contexts actually enabled.
  100. */
  101. struct vmw_cmdbuf_man {
  102. struct mutex cur_mutex;
  103. struct mutex space_mutex;
  104. struct mutex error_mutex;
  105. struct work_struct work;
  106. struct vmw_private *dev_priv;
  107. struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
  108. struct list_head error;
  109. struct drm_mm mm;
  110. struct ttm_buffer_object *cmd_space;
  111. struct ttm_bo_kmap_obj map_obj;
  112. u8 *map;
  113. struct vmw_cmdbuf_header *cur;
  114. size_t cur_pos;
  115. size_t default_size;
  116. unsigned max_hw_submitted;
  117. spinlock_t lock;
  118. struct dma_pool *headers;
  119. struct dma_pool *dheaders;
  120. wait_queue_head_t alloc_queue;
  121. wait_queue_head_t idle_queue;
  122. bool irq_on;
  123. bool using_mob;
  124. bool has_pool;
  125. dma_addr_t handle;
  126. size_t size;
  127. u32 num_contexts;
  128. };
  129. /**
  130. * struct vmw_cmdbuf_header - Command buffer metadata
  131. *
  132. * @man: The command buffer manager.
  133. * @cb_header: Device command buffer header, allocated from a DMA pool.
  134. * @cb_context: The device command buffer context.
  135. * @list: List head for attaching to the manager lists.
  136. * @node: The range manager node.
  137. * @handle. The DMA address of @cb_header. Handed to the device on command
  138. * buffer submission.
  139. * @cmd: Pointer to the command buffer space of this buffer.
  140. * @size: Size of the command buffer space of this buffer.
  141. * @reserved: Reserved space of this buffer.
  142. * @inline_space: Whether inline command buffer space is used.
  143. */
  144. struct vmw_cmdbuf_header {
  145. struct vmw_cmdbuf_man *man;
  146. SVGACBHeader *cb_header;
  147. SVGACBContext cb_context;
  148. struct list_head list;
  149. struct drm_mm_node node;
  150. dma_addr_t handle;
  151. u8 *cmd;
  152. size_t size;
  153. size_t reserved;
  154. bool inline_space;
  155. };
  156. /**
  157. * struct vmw_cmdbuf_dheader - Device command buffer header with inline
  158. * command buffer space.
  159. *
  160. * @cb_header: Device command buffer header.
  161. * @cmd: Inline command buffer space.
  162. */
  163. struct vmw_cmdbuf_dheader {
  164. SVGACBHeader cb_header;
  165. u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
  166. };
  167. /**
  168. * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
  169. *
  170. * @page_size: Size of requested command buffer space in pages.
  171. * @node: Pointer to the range manager node.
  172. * @done: True if this allocation has succeeded.
  173. */
  174. struct vmw_cmdbuf_alloc_info {
  175. size_t page_size;
  176. struct drm_mm_node *node;
  177. bool done;
  178. };
  179. /* Loop over each context in the command buffer manager. */
  180. #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
  181. for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
  182. ++(_i), ++(_ctx))
  183. static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
  184. bool enable);
  185. static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
  186. /**
  187. * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
  188. *
  189. * @man: The range manager.
  190. * @interruptible: Whether to wait interruptible when locking.
  191. */
  192. static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
  193. {
  194. if (interruptible) {
  195. if (mutex_lock_interruptible(&man->cur_mutex))
  196. return -ERESTARTSYS;
  197. } else {
  198. mutex_lock(&man->cur_mutex);
  199. }
  200. return 0;
  201. }
  202. /**
  203. * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
  204. *
  205. * @man: The range manager.
  206. */
  207. static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
  208. {
  209. mutex_unlock(&man->cur_mutex);
  210. }
  211. /**
  212. * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
  213. * been used for the device context with inline command buffers.
  214. * Need not be called locked.
  215. *
  216. * @header: Pointer to the header to free.
  217. */
  218. static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
  219. {
  220. struct vmw_cmdbuf_dheader *dheader;
  221. if (WARN_ON_ONCE(!header->inline_space))
  222. return;
  223. dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
  224. cb_header);
  225. dma_pool_free(header->man->dheaders, dheader, header->handle);
  226. kfree(header);
  227. }
  228. /**
  229. * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
  230. * associated structures.
  231. *
  232. * header: Pointer to the header to free.
  233. *
  234. * For internal use. Must be called with man::lock held.
  235. */
  236. static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
  237. {
  238. struct vmw_cmdbuf_man *man = header->man;
  239. lockdep_assert_held_once(&man->lock);
  240. if (header->inline_space) {
  241. vmw_cmdbuf_header_inline_free(header);
  242. return;
  243. }
  244. drm_mm_remove_node(&header->node);
  245. wake_up_all(&man->alloc_queue);
  246. if (header->cb_header)
  247. dma_pool_free(man->headers, header->cb_header,
  248. header->handle);
  249. kfree(header);
  250. }
  251. /**
  252. * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
  253. * associated structures.
  254. *
  255. * @header: Pointer to the header to free.
  256. */
  257. void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
  258. {
  259. struct vmw_cmdbuf_man *man = header->man;
  260. /* Avoid locking if inline_space */
  261. if (header->inline_space) {
  262. vmw_cmdbuf_header_inline_free(header);
  263. return;
  264. }
  265. spin_lock(&man->lock);
  266. __vmw_cmdbuf_header_free(header);
  267. spin_unlock(&man->lock);
  268. }
  269. /**
  270. * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
  271. *
  272. * @header: The header of the buffer to submit.
  273. */
  274. static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
  275. {
  276. struct vmw_cmdbuf_man *man = header->man;
  277. u32 val;
  278. val = upper_32_bits(header->handle);
  279. vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
  280. val = lower_32_bits(header->handle);
  281. val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
  282. vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
  283. return header->cb_header->status;
  284. }
  285. /**
  286. * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
  287. *
  288. * @ctx: The command buffer context to initialize
  289. */
  290. static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
  291. {
  292. INIT_LIST_HEAD(&ctx->hw_submitted);
  293. INIT_LIST_HEAD(&ctx->submitted);
  294. INIT_LIST_HEAD(&ctx->preempted);
  295. ctx->num_hw_submitted = 0;
  296. }
  297. /**
  298. * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
  299. * context.
  300. *
  301. * @man: The command buffer manager.
  302. * @ctx: The command buffer context.
  303. *
  304. * Submits command buffers to hardware until there are no more command
  305. * buffers to submit or the hardware can't handle more command buffers.
  306. */
  307. static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
  308. struct vmw_cmdbuf_context *ctx)
  309. {
  310. while (ctx->num_hw_submitted < man->max_hw_submitted &&
  311. !list_empty(&ctx->submitted) &&
  312. !ctx->block_submission) {
  313. struct vmw_cmdbuf_header *entry;
  314. SVGACBStatus status;
  315. entry = list_first_entry(&ctx->submitted,
  316. struct vmw_cmdbuf_header,
  317. list);
  318. status = vmw_cmdbuf_header_submit(entry);
  319. /* This should never happen */
  320. if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
  321. entry->cb_header->status = SVGA_CB_STATUS_NONE;
  322. break;
  323. }
  324. list_del(&entry->list);
  325. list_add_tail(&entry->list, &ctx->hw_submitted);
  326. ctx->num_hw_submitted++;
  327. }
  328. }
  329. /**
  330. * vmw_cmdbuf_ctx_submit: Process a command buffer context.
  331. *
  332. * @man: The command buffer manager.
  333. * @ctx: The command buffer context.
  334. *
  335. * Submit command buffers to hardware if possible, and process finished
  336. * buffers. Typically freeing them, but on preemption or error take
  337. * appropriate action. Wake up waiters if appropriate.
  338. */
  339. static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
  340. struct vmw_cmdbuf_context *ctx,
  341. int *notempty)
  342. {
  343. struct vmw_cmdbuf_header *entry, *next;
  344. vmw_cmdbuf_ctx_submit(man, ctx);
  345. list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
  346. SVGACBStatus status = entry->cb_header->status;
  347. if (status == SVGA_CB_STATUS_NONE)
  348. break;
  349. list_del(&entry->list);
  350. wake_up_all(&man->idle_queue);
  351. ctx->num_hw_submitted--;
  352. switch (status) {
  353. case SVGA_CB_STATUS_COMPLETED:
  354. __vmw_cmdbuf_header_free(entry);
  355. break;
  356. case SVGA_CB_STATUS_COMMAND_ERROR:
  357. entry->cb_header->status = SVGA_CB_STATUS_NONE;
  358. list_add_tail(&entry->list, &man->error);
  359. schedule_work(&man->work);
  360. break;
  361. case SVGA_CB_STATUS_PREEMPTED:
  362. entry->cb_header->status = SVGA_CB_STATUS_NONE;
  363. list_add_tail(&entry->list, &ctx->preempted);
  364. break;
  365. case SVGA_CB_STATUS_CB_HEADER_ERROR:
  366. WARN_ONCE(true, "Command buffer header error.\n");
  367. __vmw_cmdbuf_header_free(entry);
  368. break;
  369. default:
  370. WARN_ONCE(true, "Undefined command buffer status.\n");
  371. __vmw_cmdbuf_header_free(entry);
  372. break;
  373. }
  374. }
  375. vmw_cmdbuf_ctx_submit(man, ctx);
  376. if (!list_empty(&ctx->submitted))
  377. (*notempty)++;
  378. }
  379. /**
  380. * vmw_cmdbuf_man_process - Process all command buffer contexts and
  381. * switch on and off irqs as appropriate.
  382. *
  383. * @man: The command buffer manager.
  384. *
  385. * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
  386. * command buffers left that are not submitted to hardware, Make sure
  387. * IRQ handling is turned on. Otherwise, make sure it's turned off.
  388. */
  389. static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
  390. {
  391. int notempty;
  392. struct vmw_cmdbuf_context *ctx;
  393. int i;
  394. retry:
  395. notempty = 0;
  396. for_each_cmdbuf_ctx(man, i, ctx)
  397. vmw_cmdbuf_ctx_process(man, ctx, &notempty);
  398. if (man->irq_on && !notempty) {
  399. vmw_generic_waiter_remove(man->dev_priv,
  400. SVGA_IRQFLAG_COMMAND_BUFFER,
  401. &man->dev_priv->cmdbuf_waiters);
  402. man->irq_on = false;
  403. } else if (!man->irq_on && notempty) {
  404. vmw_generic_waiter_add(man->dev_priv,
  405. SVGA_IRQFLAG_COMMAND_BUFFER,
  406. &man->dev_priv->cmdbuf_waiters);
  407. man->irq_on = true;
  408. /* Rerun in case we just missed an irq. */
  409. goto retry;
  410. }
  411. }
  412. /**
  413. * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
  414. * command buffer context
  415. *
  416. * @man: The command buffer manager.
  417. * @header: The header of the buffer to submit.
  418. * @cb_context: The command buffer context to use.
  419. *
  420. * This function adds @header to the "submitted" queue of the command
  421. * buffer context identified by @cb_context. It then calls the command buffer
  422. * manager processing to potentially submit the buffer to hardware.
  423. * @man->lock needs to be held when calling this function.
  424. */
  425. static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
  426. struct vmw_cmdbuf_header *header,
  427. SVGACBContext cb_context)
  428. {
  429. if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
  430. header->cb_header->dxContext = 0;
  431. header->cb_context = cb_context;
  432. list_add_tail(&header->list, &man->ctx[cb_context].submitted);
  433. vmw_cmdbuf_man_process(man);
  434. }
  435. /**
  436. * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
  437. * handler implemented as a threaded irq task.
  438. *
  439. * @man: Pointer to the command buffer manager.
  440. *
  441. * The bottom half of the interrupt handler simply calls into the
  442. * command buffer processor to free finished buffers and submit any
  443. * queued buffers to hardware.
  444. */
  445. void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
  446. {
  447. spin_lock(&man->lock);
  448. vmw_cmdbuf_man_process(man);
  449. spin_unlock(&man->lock);
  450. }
  451. /**
  452. * vmw_cmdbuf_work_func - The deferred work function that handles
  453. * command buffer errors.
  454. *
  455. * @work: The work func closure argument.
  456. *
  457. * Restarting the command buffer context after an error requires process
  458. * context, so it is deferred to this work function.
  459. */
  460. static void vmw_cmdbuf_work_func(struct work_struct *work)
  461. {
  462. struct vmw_cmdbuf_man *man =
  463. container_of(work, struct vmw_cmdbuf_man, work);
  464. struct vmw_cmdbuf_header *entry, *next;
  465. uint32_t dummy;
  466. bool restart[SVGA_CB_CONTEXT_MAX];
  467. bool send_fence = false;
  468. struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
  469. int i;
  470. struct vmw_cmdbuf_context *ctx;
  471. bool global_block = false;
  472. for_each_cmdbuf_ctx(man, i, ctx) {
  473. INIT_LIST_HEAD(&restart_head[i]);
  474. restart[i] = false;
  475. }
  476. mutex_lock(&man->error_mutex);
  477. spin_lock(&man->lock);
  478. list_for_each_entry_safe(entry, next, &man->error, list) {
  479. SVGACBHeader *cb_hdr = entry->cb_header;
  480. SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
  481. (entry->cmd + cb_hdr->errorOffset);
  482. u32 error_cmd_size, new_start_offset;
  483. const char *cmd_name;
  484. list_del_init(&entry->list);
  485. restart[entry->cb_context] = true;
  486. global_block = true;
  487. if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
  488. DRM_ERROR("Unknown command causing device error.\n");
  489. DRM_ERROR("Command buffer offset is %lu\n",
  490. (unsigned long) cb_hdr->errorOffset);
  491. __vmw_cmdbuf_header_free(entry);
  492. send_fence = true;
  493. continue;
  494. }
  495. DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
  496. DRM_ERROR("Command buffer offset is %lu\n",
  497. (unsigned long) cb_hdr->errorOffset);
  498. DRM_ERROR("Command size is %lu\n",
  499. (unsigned long) error_cmd_size);
  500. new_start_offset = cb_hdr->errorOffset + error_cmd_size;
  501. if (new_start_offset >= cb_hdr->length) {
  502. __vmw_cmdbuf_header_free(entry);
  503. send_fence = true;
  504. continue;
  505. }
  506. if (man->using_mob)
  507. cb_hdr->ptr.mob.mobOffset += new_start_offset;
  508. else
  509. cb_hdr->ptr.pa += (u64) new_start_offset;
  510. entry->cmd += new_start_offset;
  511. cb_hdr->length -= new_start_offset;
  512. cb_hdr->errorOffset = 0;
  513. cb_hdr->offset = 0;
  514. list_add_tail(&entry->list, &restart_head[entry->cb_context]);
  515. }
  516. for_each_cmdbuf_ctx(man, i, ctx)
  517. man->ctx[i].block_submission = true;
  518. spin_unlock(&man->lock);
  519. /* Preempt all contexts */
  520. if (global_block && vmw_cmdbuf_preempt(man, 0))
  521. DRM_ERROR("Failed preempting command buffer contexts\n");
  522. spin_lock(&man->lock);
  523. for_each_cmdbuf_ctx(man, i, ctx) {
  524. /* Move preempted command buffers to the preempted queue. */
  525. vmw_cmdbuf_ctx_process(man, ctx, &dummy);
  526. /*
  527. * Add the preempted queue after the command buffer
  528. * that caused an error.
  529. */
  530. list_splice_init(&ctx->preempted, restart_head[i].prev);
  531. /*
  532. * Finally add all command buffers first in the submitted
  533. * queue, to rerun them.
  534. */
  535. ctx->block_submission = false;
  536. list_splice_init(&restart_head[i], &ctx->submitted);
  537. }
  538. vmw_cmdbuf_man_process(man);
  539. spin_unlock(&man->lock);
  540. if (global_block && vmw_cmdbuf_startstop(man, 0, true))
  541. DRM_ERROR("Failed restarting command buffer contexts\n");
  542. /* Send a new fence in case one was removed */
  543. if (send_fence) {
  544. vmw_fifo_send_fence(man->dev_priv, &dummy);
  545. wake_up_all(&man->idle_queue);
  546. }
  547. mutex_unlock(&man->error_mutex);
  548. }
  549. /**
  550. * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
  551. *
  552. * @man: The command buffer manager.
  553. * @check_preempted: Check also the preempted queue for pending command buffers.
  554. *
  555. */
  556. static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
  557. bool check_preempted)
  558. {
  559. struct vmw_cmdbuf_context *ctx;
  560. bool idle = false;
  561. int i;
  562. spin_lock(&man->lock);
  563. vmw_cmdbuf_man_process(man);
  564. for_each_cmdbuf_ctx(man, i, ctx) {
  565. if (!list_empty(&ctx->submitted) ||
  566. !list_empty(&ctx->hw_submitted) ||
  567. (check_preempted && !list_empty(&ctx->preempted)))
  568. goto out_unlock;
  569. }
  570. idle = list_empty(&man->error);
  571. out_unlock:
  572. spin_unlock(&man->lock);
  573. return idle;
  574. }
  575. /**
  576. * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
  577. * command submissions
  578. *
  579. * @man: The command buffer manager.
  580. *
  581. * Flushes the current command buffer without allocating a new one. A new one
  582. * is automatically allocated when needed. Call with @man->cur_mutex held.
  583. */
  584. static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
  585. {
  586. struct vmw_cmdbuf_header *cur = man->cur;
  587. WARN_ON(!mutex_is_locked(&man->cur_mutex));
  588. if (!cur)
  589. return;
  590. spin_lock(&man->lock);
  591. if (man->cur_pos == 0) {
  592. __vmw_cmdbuf_header_free(cur);
  593. goto out_unlock;
  594. }
  595. man->cur->cb_header->length = man->cur_pos;
  596. vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
  597. out_unlock:
  598. spin_unlock(&man->lock);
  599. man->cur = NULL;
  600. man->cur_pos = 0;
  601. }
  602. /**
  603. * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
  604. * command submissions
  605. *
  606. * @man: The command buffer manager.
  607. * @interruptible: Whether to sleep interruptible when sleeping.
  608. *
  609. * Flushes the current command buffer without allocating a new one. A new one
  610. * is automatically allocated when needed.
  611. */
  612. int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
  613. bool interruptible)
  614. {
  615. int ret = vmw_cmdbuf_cur_lock(man, interruptible);
  616. if (ret)
  617. return ret;
  618. __vmw_cmdbuf_cur_flush(man);
  619. vmw_cmdbuf_cur_unlock(man);
  620. return 0;
  621. }
  622. /**
  623. * vmw_cmdbuf_idle - Wait for command buffer manager idle.
  624. *
  625. * @man: The command buffer manager.
  626. * @interruptible: Sleep interruptible while waiting.
  627. * @timeout: Time out after this many ticks.
  628. *
  629. * Wait until the command buffer manager has processed all command buffers,
  630. * or until a timeout occurs. If a timeout occurs, the function will return
  631. * -EBUSY.
  632. */
  633. int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
  634. unsigned long timeout)
  635. {
  636. int ret;
  637. ret = vmw_cmdbuf_cur_flush(man, interruptible);
  638. vmw_generic_waiter_add(man->dev_priv,
  639. SVGA_IRQFLAG_COMMAND_BUFFER,
  640. &man->dev_priv->cmdbuf_waiters);
  641. if (interruptible) {
  642. ret = wait_event_interruptible_timeout
  643. (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
  644. timeout);
  645. } else {
  646. ret = wait_event_timeout
  647. (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
  648. timeout);
  649. }
  650. vmw_generic_waiter_remove(man->dev_priv,
  651. SVGA_IRQFLAG_COMMAND_BUFFER,
  652. &man->dev_priv->cmdbuf_waiters);
  653. if (ret == 0) {
  654. if (!vmw_cmdbuf_man_idle(man, true))
  655. ret = -EBUSY;
  656. else
  657. ret = 0;
  658. }
  659. if (ret > 0)
  660. ret = 0;
  661. return ret;
  662. }
  663. /**
  664. * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
  665. *
  666. * @man: The command buffer manager.
  667. * @info: Allocation info. Will hold the size on entry and allocated mm node
  668. * on successful return.
  669. *
  670. * Try to allocate buffer space from the main pool. Returns true if succeeded.
  671. * If a fatal error was hit, the error code is returned in @info->ret.
  672. */
  673. static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
  674. struct vmw_cmdbuf_alloc_info *info)
  675. {
  676. int ret;
  677. if (info->done)
  678. return true;
  679. memset(info->node, 0, sizeof(*info->node));
  680. spin_lock(&man->lock);
  681. ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
  682. if (ret) {
  683. vmw_cmdbuf_man_process(man);
  684. ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
  685. }
  686. spin_unlock(&man->lock);
  687. info->done = !ret;
  688. return info->done;
  689. }
  690. /**
  691. * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
  692. *
  693. * @man: The command buffer manager.
  694. * @node: Pointer to pre-allocated range-manager node.
  695. * @size: The size of the allocation.
  696. * @interruptible: Whether to sleep interruptible while waiting for space.
  697. *
  698. * This function allocates buffer space from the main pool, and if there is
  699. * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
  700. * become available.
  701. */
  702. static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
  703. struct drm_mm_node *node,
  704. size_t size,
  705. bool interruptible)
  706. {
  707. struct vmw_cmdbuf_alloc_info info;
  708. info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
  709. info.node = node;
  710. info.done = false;
  711. /*
  712. * To prevent starvation of large requests, only one allocating call
  713. * at a time waiting for space.
  714. */
  715. if (interruptible) {
  716. if (mutex_lock_interruptible(&man->space_mutex))
  717. return -ERESTARTSYS;
  718. } else {
  719. mutex_lock(&man->space_mutex);
  720. }
  721. /* Try to allocate space without waiting. */
  722. if (vmw_cmdbuf_try_alloc(man, &info))
  723. goto out_unlock;
  724. vmw_generic_waiter_add(man->dev_priv,
  725. SVGA_IRQFLAG_COMMAND_BUFFER,
  726. &man->dev_priv->cmdbuf_waiters);
  727. if (interruptible) {
  728. int ret;
  729. ret = wait_event_interruptible
  730. (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
  731. if (ret) {
  732. vmw_generic_waiter_remove
  733. (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
  734. &man->dev_priv->cmdbuf_waiters);
  735. mutex_unlock(&man->space_mutex);
  736. return ret;
  737. }
  738. } else {
  739. wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
  740. }
  741. vmw_generic_waiter_remove(man->dev_priv,
  742. SVGA_IRQFLAG_COMMAND_BUFFER,
  743. &man->dev_priv->cmdbuf_waiters);
  744. out_unlock:
  745. mutex_unlock(&man->space_mutex);
  746. return 0;
  747. }
  748. /**
  749. * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
  750. * space from the main pool.
  751. *
  752. * @man: The command buffer manager.
  753. * @header: Pointer to the header to set up.
  754. * @size: The requested size of the buffer space.
  755. * @interruptible: Whether to sleep interruptible while waiting for space.
  756. */
  757. static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
  758. struct vmw_cmdbuf_header *header,
  759. size_t size,
  760. bool interruptible)
  761. {
  762. SVGACBHeader *cb_hdr;
  763. size_t offset;
  764. int ret;
  765. if (!man->has_pool)
  766. return -ENOMEM;
  767. ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
  768. if (ret)
  769. return ret;
  770. header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
  771. &header->handle);
  772. if (!header->cb_header) {
  773. ret = -ENOMEM;
  774. goto out_no_cb_header;
  775. }
  776. header->size = header->node.size << PAGE_SHIFT;
  777. cb_hdr = header->cb_header;
  778. offset = header->node.start << PAGE_SHIFT;
  779. header->cmd = man->map + offset;
  780. if (man->using_mob) {
  781. cb_hdr->flags = SVGA_CB_FLAG_MOB;
  782. cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
  783. cb_hdr->ptr.mob.mobOffset = offset;
  784. } else {
  785. cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
  786. }
  787. return 0;
  788. out_no_cb_header:
  789. spin_lock(&man->lock);
  790. drm_mm_remove_node(&header->node);
  791. spin_unlock(&man->lock);
  792. return ret;
  793. }
  794. /**
  795. * vmw_cmdbuf_space_inline - Set up a command buffer header with
  796. * inline command buffer space.
  797. *
  798. * @man: The command buffer manager.
  799. * @header: Pointer to the header to set up.
  800. * @size: The requested size of the buffer space.
  801. */
  802. static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
  803. struct vmw_cmdbuf_header *header,
  804. int size)
  805. {
  806. struct vmw_cmdbuf_dheader *dheader;
  807. SVGACBHeader *cb_hdr;
  808. if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
  809. return -ENOMEM;
  810. dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
  811. &header->handle);
  812. if (!dheader)
  813. return -ENOMEM;
  814. header->inline_space = true;
  815. header->size = VMW_CMDBUF_INLINE_SIZE;
  816. cb_hdr = &dheader->cb_header;
  817. header->cb_header = cb_hdr;
  818. header->cmd = dheader->cmd;
  819. cb_hdr->status = SVGA_CB_STATUS_NONE;
  820. cb_hdr->flags = SVGA_CB_FLAG_NONE;
  821. cb_hdr->ptr.pa = (u64)header->handle +
  822. (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
  823. return 0;
  824. }
  825. /**
  826. * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
  827. * command buffer space.
  828. *
  829. * @man: The command buffer manager.
  830. * @size: The requested size of the buffer space.
  831. * @interruptible: Whether to sleep interruptible while waiting for space.
  832. * @p_header: points to a header pointer to populate on successful return.
  833. *
  834. * Returns a pointer to command buffer space if successful. Otherwise
  835. * returns an error pointer. The header pointer returned in @p_header should
  836. * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
  837. */
  838. void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
  839. size_t size, bool interruptible,
  840. struct vmw_cmdbuf_header **p_header)
  841. {
  842. struct vmw_cmdbuf_header *header;
  843. int ret = 0;
  844. *p_header = NULL;
  845. header = kzalloc(sizeof(*header), GFP_KERNEL);
  846. if (!header)
  847. return ERR_PTR(-ENOMEM);
  848. if (size <= VMW_CMDBUF_INLINE_SIZE)
  849. ret = vmw_cmdbuf_space_inline(man, header, size);
  850. else
  851. ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
  852. if (ret) {
  853. kfree(header);
  854. return ERR_PTR(ret);
  855. }
  856. header->man = man;
  857. INIT_LIST_HEAD(&header->list);
  858. header->cb_header->status = SVGA_CB_STATUS_NONE;
  859. *p_header = header;
  860. return header->cmd;
  861. }
  862. /**
  863. * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
  864. * command buffer.
  865. *
  866. * @man: The command buffer manager.
  867. * @size: The requested size of the commands.
  868. * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
  869. * @interruptible: Whether to sleep interruptible while waiting for space.
  870. *
  871. * Returns a pointer to command buffer space if successful. Otherwise
  872. * returns an error pointer.
  873. */
  874. static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
  875. size_t size,
  876. int ctx_id,
  877. bool interruptible)
  878. {
  879. struct vmw_cmdbuf_header *cur;
  880. void *ret;
  881. if (vmw_cmdbuf_cur_lock(man, interruptible))
  882. return ERR_PTR(-ERESTARTSYS);
  883. cur = man->cur;
  884. if (cur && (size + man->cur_pos > cur->size ||
  885. ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
  886. ctx_id != cur->cb_header->dxContext)))
  887. __vmw_cmdbuf_cur_flush(man);
  888. if (!man->cur) {
  889. ret = vmw_cmdbuf_alloc(man,
  890. max_t(size_t, size, man->default_size),
  891. interruptible, &man->cur);
  892. if (IS_ERR(ret)) {
  893. vmw_cmdbuf_cur_unlock(man);
  894. return ret;
  895. }
  896. cur = man->cur;
  897. }
  898. if (ctx_id != SVGA3D_INVALID_ID) {
  899. cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
  900. cur->cb_header->dxContext = ctx_id;
  901. }
  902. cur->reserved = size;
  903. return (void *) (man->cur->cmd + man->cur_pos);
  904. }
  905. /**
  906. * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
  907. *
  908. * @man: The command buffer manager.
  909. * @size: The size of the commands actually written.
  910. * @flush: Whether to flush the command buffer immediately.
  911. */
  912. static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
  913. size_t size, bool flush)
  914. {
  915. struct vmw_cmdbuf_header *cur = man->cur;
  916. WARN_ON(!mutex_is_locked(&man->cur_mutex));
  917. WARN_ON(size > cur->reserved);
  918. man->cur_pos += size;
  919. if (!size)
  920. cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
  921. if (flush)
  922. __vmw_cmdbuf_cur_flush(man);
  923. vmw_cmdbuf_cur_unlock(man);
  924. }
  925. /**
  926. * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
  927. *
  928. * @man: The command buffer manager.
  929. * @size: The requested size of the commands.
  930. * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
  931. * @interruptible: Whether to sleep interruptible while waiting for space.
  932. * @header: Header of the command buffer. NULL if the current command buffer
  933. * should be used.
  934. *
  935. * Returns a pointer to command buffer space if successful. Otherwise
  936. * returns an error pointer.
  937. */
  938. void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
  939. int ctx_id, bool interruptible,
  940. struct vmw_cmdbuf_header *header)
  941. {
  942. if (!header)
  943. return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
  944. if (size > header->size)
  945. return ERR_PTR(-EINVAL);
  946. if (ctx_id != SVGA3D_INVALID_ID) {
  947. header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
  948. header->cb_header->dxContext = ctx_id;
  949. }
  950. header->reserved = size;
  951. return header->cmd;
  952. }
  953. /**
  954. * vmw_cmdbuf_commit - Commit commands in a command buffer.
  955. *
  956. * @man: The command buffer manager.
  957. * @size: The size of the commands actually written.
  958. * @header: Header of the command buffer. NULL if the current command buffer
  959. * should be used.
  960. * @flush: Whether to flush the command buffer immediately.
  961. */
  962. void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
  963. struct vmw_cmdbuf_header *header, bool flush)
  964. {
  965. if (!header) {
  966. vmw_cmdbuf_commit_cur(man, size, flush);
  967. return;
  968. }
  969. (void) vmw_cmdbuf_cur_lock(man, false);
  970. __vmw_cmdbuf_cur_flush(man);
  971. WARN_ON(size > header->reserved);
  972. man->cur = header;
  973. man->cur_pos = size;
  974. if (!size)
  975. header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
  976. if (flush)
  977. __vmw_cmdbuf_cur_flush(man);
  978. vmw_cmdbuf_cur_unlock(man);
  979. }
  980. /**
  981. * vmw_cmdbuf_send_device_command - Send a command through the device context.
  982. *
  983. * @man: The command buffer manager.
  984. * @command: Pointer to the command to send.
  985. * @size: Size of the command.
  986. *
  987. * Synchronously sends a device context command.
  988. */
  989. static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
  990. const void *command,
  991. size_t size)
  992. {
  993. struct vmw_cmdbuf_header *header;
  994. int status;
  995. void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
  996. if (IS_ERR(cmd))
  997. return PTR_ERR(cmd);
  998. memcpy(cmd, command, size);
  999. header->cb_header->length = size;
  1000. header->cb_context = SVGA_CB_CONTEXT_DEVICE;
  1001. spin_lock(&man->lock);
  1002. status = vmw_cmdbuf_header_submit(header);
  1003. spin_unlock(&man->lock);
  1004. vmw_cmdbuf_header_free(header);
  1005. if (status != SVGA_CB_STATUS_COMPLETED) {
  1006. DRM_ERROR("Device context command failed with status %d\n",
  1007. status);
  1008. return -EINVAL;
  1009. }
  1010. return 0;
  1011. }
  1012. /**
  1013. * vmw_cmdbuf_preempt - Send a preempt command through the device
  1014. * context.
  1015. *
  1016. * @man: The command buffer manager.
  1017. *
  1018. * Synchronously sends a preempt command.
  1019. */
  1020. static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
  1021. {
  1022. struct {
  1023. uint32 id;
  1024. SVGADCCmdPreempt body;
  1025. } __packed cmd;
  1026. cmd.id = SVGA_DC_CMD_PREEMPT;
  1027. cmd.body.context = SVGA_CB_CONTEXT_0 + context;
  1028. cmd.body.ignoreIDZero = 0;
  1029. return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
  1030. }
  1031. /**
  1032. * vmw_cmdbuf_startstop - Send a start / stop command through the device
  1033. * context.
  1034. *
  1035. * @man: The command buffer manager.
  1036. * @enable: Whether to enable or disable the context.
  1037. *
  1038. * Synchronously sends a device start / stop context command.
  1039. */
  1040. static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
  1041. bool enable)
  1042. {
  1043. struct {
  1044. uint32 id;
  1045. SVGADCCmdStartStop body;
  1046. } __packed cmd;
  1047. cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
  1048. cmd.body.enable = (enable) ? 1 : 0;
  1049. cmd.body.context = SVGA_CB_CONTEXT_0 + context;
  1050. return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
  1051. }
  1052. /**
  1053. * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
  1054. *
  1055. * @man: The command buffer manager.
  1056. * @size: The size of the main space pool.
  1057. * @default_size: The default size of the command buffer for small kernel
  1058. * submissions.
  1059. *
  1060. * Set the size and allocate the main command buffer space pool,
  1061. * as well as the default size of the command buffer for
  1062. * small kernel submissions. If successful, this enables large command
  1063. * submissions. Note that this function requires that rudimentary command
  1064. * submission is already available and that the MOB memory manager is alive.
  1065. * Returns 0 on success. Negative error code on failure.
  1066. */
  1067. int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
  1068. size_t size, size_t default_size)
  1069. {
  1070. struct vmw_private *dev_priv = man->dev_priv;
  1071. bool dummy;
  1072. int ret;
  1073. if (man->has_pool)
  1074. return -EINVAL;
  1075. /* First, try to allocate a huge chunk of DMA memory */
  1076. size = PAGE_ALIGN(size);
  1077. man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
  1078. &man->handle, GFP_KERNEL);
  1079. if (man->map) {
  1080. man->using_mob = false;
  1081. } else {
  1082. /*
  1083. * DMA memory failed. If we can have command buffers in a
  1084. * MOB, try to use that instead. Note that this will
  1085. * actually call into the already enabled manager, when
  1086. * binding the MOB.
  1087. */
  1088. if (!(dev_priv->capabilities & SVGA_CAP_DX))
  1089. return -ENOMEM;
  1090. ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
  1091. &vmw_mob_ne_placement, 0, false,
  1092. &man->cmd_space);
  1093. if (ret)
  1094. return ret;
  1095. man->using_mob = true;
  1096. ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
  1097. &man->map_obj);
  1098. if (ret)
  1099. goto out_no_map;
  1100. man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
  1101. }
  1102. man->size = size;
  1103. drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
  1104. man->has_pool = true;
  1105. /*
  1106. * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
  1107. * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
  1108. * needs to wait for space and we block on further command
  1109. * submissions to be able to free up space.
  1110. */
  1111. man->default_size = VMW_CMDBUF_INLINE_SIZE;
  1112. DRM_INFO("Using command buffers with %s pool.\n",
  1113. (man->using_mob) ? "MOB" : "DMA");
  1114. return 0;
  1115. out_no_map:
  1116. if (man->using_mob)
  1117. ttm_bo_unref(&man->cmd_space);
  1118. return ret;
  1119. }
  1120. /**
  1121. * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
  1122. * inline command buffer submissions only.
  1123. *
  1124. * @dev_priv: Pointer to device private structure.
  1125. *
  1126. * Returns a pointer to a cummand buffer manager to success or error pointer
  1127. * on failure. The command buffer manager will be enabled for submissions of
  1128. * size VMW_CMDBUF_INLINE_SIZE only.
  1129. */
  1130. struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
  1131. {
  1132. struct vmw_cmdbuf_man *man;
  1133. struct vmw_cmdbuf_context *ctx;
  1134. unsigned int i;
  1135. int ret;
  1136. if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
  1137. return ERR_PTR(-ENOSYS);
  1138. man = kzalloc(sizeof(*man), GFP_KERNEL);
  1139. if (!man)
  1140. return ERR_PTR(-ENOMEM);
  1141. man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
  1142. 2 : 1;
  1143. man->headers = dma_pool_create("vmwgfx cmdbuf",
  1144. &dev_priv->dev->pdev->dev,
  1145. sizeof(SVGACBHeader),
  1146. 64, PAGE_SIZE);
  1147. if (!man->headers) {
  1148. ret = -ENOMEM;
  1149. goto out_no_pool;
  1150. }
  1151. man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
  1152. &dev_priv->dev->pdev->dev,
  1153. sizeof(struct vmw_cmdbuf_dheader),
  1154. 64, PAGE_SIZE);
  1155. if (!man->dheaders) {
  1156. ret = -ENOMEM;
  1157. goto out_no_dpool;
  1158. }
  1159. for_each_cmdbuf_ctx(man, i, ctx)
  1160. vmw_cmdbuf_ctx_init(ctx);
  1161. INIT_LIST_HEAD(&man->error);
  1162. spin_lock_init(&man->lock);
  1163. mutex_init(&man->cur_mutex);
  1164. mutex_init(&man->space_mutex);
  1165. mutex_init(&man->error_mutex);
  1166. man->default_size = VMW_CMDBUF_INLINE_SIZE;
  1167. init_waitqueue_head(&man->alloc_queue);
  1168. init_waitqueue_head(&man->idle_queue);
  1169. man->dev_priv = dev_priv;
  1170. man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
  1171. INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
  1172. vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
  1173. &dev_priv->error_waiters);
  1174. ret = vmw_cmdbuf_startstop(man, 0, true);
  1175. if (ret) {
  1176. DRM_ERROR("Failed starting command buffer contexts\n");
  1177. vmw_cmdbuf_man_destroy(man);
  1178. return ERR_PTR(ret);
  1179. }
  1180. return man;
  1181. out_no_dpool:
  1182. dma_pool_destroy(man->headers);
  1183. out_no_pool:
  1184. kfree(man);
  1185. return ERR_PTR(ret);
  1186. }
  1187. /**
  1188. * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
  1189. *
  1190. * @man: Pointer to a command buffer manager.
  1191. *
  1192. * This function removes the main buffer space pool, and should be called
  1193. * before MOB memory management is removed. When this function has been called,
  1194. * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
  1195. * less are allowed, and the default size of the command buffer for small kernel
  1196. * submissions is also set to this size.
  1197. */
  1198. void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
  1199. {
  1200. if (!man->has_pool)
  1201. return;
  1202. man->has_pool = false;
  1203. man->default_size = VMW_CMDBUF_INLINE_SIZE;
  1204. (void) vmw_cmdbuf_idle(man, false, 10*HZ);
  1205. if (man->using_mob) {
  1206. (void) ttm_bo_kunmap(&man->map_obj);
  1207. ttm_bo_unref(&man->cmd_space);
  1208. } else {
  1209. dma_free_coherent(&man->dev_priv->dev->pdev->dev,
  1210. man->size, man->map, man->handle);
  1211. }
  1212. }
  1213. /**
  1214. * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
  1215. *
  1216. * @man: Pointer to a command buffer manager.
  1217. *
  1218. * This function idles and then destroys a command buffer manager.
  1219. */
  1220. void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
  1221. {
  1222. WARN_ON_ONCE(man->has_pool);
  1223. (void) vmw_cmdbuf_idle(man, false, 10*HZ);
  1224. if (vmw_cmdbuf_startstop(man, 0, false))
  1225. DRM_ERROR("Failed stopping command buffer contexts.\n");
  1226. vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
  1227. &man->dev_priv->error_waiters);
  1228. (void) cancel_work_sync(&man->work);
  1229. dma_pool_destroy(man->dheaders);
  1230. dma_pool_destroy(man->headers);
  1231. mutex_destroy(&man->cur_mutex);
  1232. mutex_destroy(&man->space_mutex);
  1233. mutex_destroy(&man->error_mutex);
  1234. kfree(man);
  1235. }