1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920 |
- /**************************************************************************
- *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
- #include "vmwgfx_drv.h"
- #include "vmwgfx_reg.h"
- #include <drm/ttm/ttm_bo_api.h>
- #include <drm/ttm/ttm_placement.h>
- #define VMW_RES_HT_ORDER 12
- /**
- * struct vmw_resource_relocation - Relocation info for resources
- *
- * @head: List head for the software context's relocation list.
- * @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of 4 byte entries into the command buffer where the
- * id that needs fixup is located.
- */
- struct vmw_resource_relocation {
- struct list_head head;
- const struct vmw_resource *res;
- unsigned long offset;
- };
- /**
- * struct vmw_resource_val_node - Validation info for resources
- *
- * @head: List head for the software context's resource list.
- * @hash: Hash entry for quick resouce to val_node lookup.
- * @res: Ref-counted pointer to the resource.
- * @switch_backup: Boolean whether to switch backup buffer on unreserve.
- * @new_backup: Refcounted pointer to the new backup buffer.
- * @staged_bindings: If @res is a context, tracks bindings set up during
- * the command batch. Otherwise NULL.
- * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
- * @first_usage: Set to true the first time the resource is referenced in
- * the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
- */
- struct vmw_resource_val_node {
- struct list_head head;
- struct drm_hash_item hash;
- struct vmw_resource *res;
- struct vmw_dma_buffer *new_backup;
- struct vmw_ctx_binding_state *staged_bindings;
- unsigned long new_backup_offset;
- bool first_usage;
- bool no_buffer_needed;
- };
- /**
- * struct vmw_cmd_entry - Describe a command for the verifier
- *
- * @user_allow: Whether allowed from the execbuf ioctl.
- * @gb_disable: Whether disabled if guest-backed objects are available.
- * @gb_enable: Whether enabled iff guest-backed objects are available.
- */
- struct vmw_cmd_entry {
- int (*func) (struct vmw_private *, struct vmw_sw_context *,
- SVGA3dCmdHeader *);
- bool user_allow;
- bool gb_disable;
- bool gb_enable;
- };
- #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
- [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
- (_gb_disable), (_gb_enable)}
- /**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- * @backoff: Whether command submission failed.
- */
- static void vmw_resource_list_unreserve(struct list_head *list,
- bool backoff)
- {
- struct vmw_resource_val_node *val;
- list_for_each_entry(val, list, head) {
- struct vmw_resource *res = val->res;
- struct vmw_dma_buffer *new_backup =
- backoff ? NULL : val->new_backup;
- /*
- * Transfer staged context bindings to the
- * persistent context binding tracker.
- */
- if (unlikely(val->staged_bindings)) {
- if (!backoff) {
- vmw_context_binding_state_transfer
- (val->res, val->staged_bindings);
- }
- kfree(val->staged_bindings);
- val->staged_bindings = NULL;
- }
- vmw_resource_unreserve(res, new_backup,
- val->new_backup_offset);
- vmw_dmabuf_unreference(&val->new_backup);
- }
- }
- /**
- * vmw_resource_val_add - Add a resource to the software context's
- * resource list if it's not already on it.
- *
- * @sw_context: Pointer to the software context.
- * @res: Pointer to the resource.
- * @p_node On successful return points to a valid pointer to a
- * struct vmw_resource_val_node, if non-NULL on entry.
- */
- static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_node)
- {
- struct vmw_resource_val_node *node;
- struct drm_hash_item *hash;
- int ret;
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
- &hash) == 0)) {
- node = container_of(hash, struct vmw_resource_val_node, hash);
- node->first_usage = false;
- if (unlikely(p_node != NULL))
- *p_node = node;
- return 0;
- }
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(node == NULL)) {
- DRM_ERROR("Failed to allocate a resource validation "
- "entry.\n");
- return -ENOMEM;
- }
- node->hash.key = (unsigned long) res;
- ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a resource validation "
- "entry.\n");
- kfree(node);
- return ret;
- }
- list_add_tail(&node->head, &sw_context->resource_list);
- node->res = vmw_resource_reference(res);
- node->first_usage = true;
- if (unlikely(p_node != NULL))
- *p_node = node;
- return 0;
- }
- /**
- * vmw_resource_context_res_add - Put resources previously bound to a context on
- * the validation list
- *
- * @dev_priv: Pointer to a device private structure
- * @sw_context: Pointer to a software context used for this command submission
- * @ctx: Pointer to the context resource
- *
- * This function puts all resources that were previously bound to @ctx on
- * the resource validation list. This is part of the context state reemission
- */
- static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- struct vmw_resource *ctx)
- {
- struct list_head *binding_list;
- struct vmw_ctx_binding *entry;
- int ret = 0;
- struct vmw_resource *res;
- mutex_lock(&dev_priv->binding_mutex);
- binding_list = vmw_context_binding_list(ctx);
- list_for_each_entry(entry, binding_list, ctx_list) {
- res = vmw_resource_reference_unless_doomed(entry->bi.res);
- if (unlikely(res == NULL))
- continue;
- ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
- vmw_resource_unreference(&res);
- if (unlikely(ret != 0))
- break;
- }
- mutex_unlock(&dev_priv->binding_mutex);
- return ret;
- }
- /**
- * vmw_resource_relocation_add - Add a relocation to the relocation list
- *
- * @list: Pointer to head of relocation list.
- * @res: The resource.
- * @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is 4 bytes.
- */
- static int vmw_resource_relocation_add(struct list_head *list,
- const struct vmw_resource *res,
- unsigned long offset)
- {
- struct vmw_resource_relocation *rel;
- rel = kmalloc(sizeof(*rel), GFP_KERNEL);
- if (unlikely(rel == NULL)) {
- DRM_ERROR("Failed to allocate a resource relocation.\n");
- return -ENOMEM;
- }
- rel->res = res;
- rel->offset = offset;
- list_add_tail(&rel->head, list);
- return 0;
- }
- /**
- * vmw_resource_relocations_free - Free all relocations on a list
- *
- * @list: Pointer to the head of the relocation list.
- */
- static void vmw_resource_relocations_free(struct list_head *list)
- {
- struct vmw_resource_relocation *rel, *n;
- list_for_each_entry_safe(rel, n, list, head) {
- list_del(&rel->head);
- kfree(rel);
- }
- }
- /**
- * vmw_resource_relocations_apply - Apply all relocations on a list
- *
- * @cb: Pointer to the start of the command buffer bein patch. This need
- * not be the same buffer as the one being parsed when the relocation
- * list was built, but the contents must be the same modulo the
- * resource ids.
- * @list: Pointer to the head of the relocation list.
- */
- static void vmw_resource_relocations_apply(uint32_t *cb,
- struct list_head *list)
- {
- struct vmw_resource_relocation *rel;
- list_for_each_entry(rel, list, head) {
- if (likely(rel->res != NULL))
- cb[rel->offset] = rel->res->id;
- else
- cb[rel->offset] = SVGA_3D_CMD_NOP;
- }
- }
- static int vmw_cmd_invalid(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
- }
- static int vmw_cmd_ok(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- return 0;
- }
- /**
- * vmw_bo_to_validate_list - add a bo to a validate list
- *
- * @sw_context: The software context used for this command submission batch.
- * @bo: The buffer object to add.
- * @validate_as_mob: Validate this buffer as a MOB.
- * @p_val_node: If non-NULL Will be updated with the validate node number
- * on return.
- *
- * Returns -EINVAL if the limit of number of buffer objects per command
- * submission is reached.
- */
- static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_dma_buffer *vbo,
- bool validate_as_mob,
- uint32_t *p_val_node)
- {
- uint32_t val_node;
- struct vmw_validate_buffer *vval_buf;
- struct ttm_validate_buffer *val_buf;
- struct drm_hash_item *hash;
- int ret;
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
- &hash) == 0)) {
- vval_buf = container_of(hash, struct vmw_validate_buffer,
- hash);
- if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
- DRM_ERROR("Inconsistent buffer usage.\n");
- return -EINVAL;
- }
- val_buf = &vval_buf->base;
- val_node = vval_buf - sw_context->val_bufs;
- } else {
- val_node = sw_context->cur_val_buf;
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission "
- "exceeded.\n");
- return -EINVAL;
- }
- vval_buf = &sw_context->val_bufs[val_node];
- vval_buf->hash.key = (unsigned long) vbo;
- ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a buffer validation "
- "entry.\n");
- return ret;
- }
- ++sw_context->cur_val_buf;
- val_buf = &vval_buf->base;
- val_buf->bo = ttm_bo_reference(&vbo->base);
- val_buf->shared = false;
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- vval_buf->validate_as_mob = validate_as_mob;
- }
- if (p_val_node)
- *p_val_node = val_node;
- return 0;
- }
- /**
- * vmw_resources_reserve - Reserve all resources on the sw_context's
- * resource list.
- *
- * @sw_context: Pointer to the software context.
- *
- * Note that since vmware's command submission currently is protected by
- * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
- * since only a single thread at once will attempt this.
- */
- static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
- {
- struct vmw_resource_val_node *val;
- int ret;
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
- ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
- if (unlikely(ret != 0))
- return ret;
- if (res->backup) {
- struct vmw_dma_buffer *vbo = res->backup;
- ret = vmw_bo_to_validate_list
- (sw_context, vbo,
- vmw_resource_needs_backup(res), NULL);
- if (unlikely(ret != 0))
- return ret;
- }
- }
- return 0;
- }
- /**
- * vmw_resources_validate - Validate all resources on the sw_context's
- * resource list.
- *
- * @sw_context: Pointer to the software context.
- *
- * Before this function is called, all resource backup buffers must have
- * been validated.
- */
- static int vmw_resources_validate(struct vmw_sw_context *sw_context)
- {
- struct vmw_resource_val_node *val;
- int ret;
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
- ret = vmw_resource_validate(res);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to validate resource.\n");
- return ret;
- }
- }
- return 0;
- }
- /**
- * vmw_cmd_res_reloc_add - Add a resource to a software context's
- * relocation- and validation lists.
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
- * @id_loc: Pointer to where the id that needs translation is located.
- * @res: Valid pointer to a struct vmw_resource.
- * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
- * used for this resource is returned here.
- */
- static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- uint32_t *id_loc,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_val)
- {
- int ret;
- struct vmw_resource_val_node *node;
- *p_val = NULL;
- ret = vmw_resource_relocation_add(&sw_context->res_relocations,
- res,
- id_loc - sw_context->buf_start);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_resource_val_add(sw_context, res, &node);
- if (unlikely(ret != 0))
- return ret;
- if (res_type == vmw_res_context && dev_priv->has_mob &&
- node->first_usage) {
- /*
- * Put contexts first on the list to be able to exit
- * list traversal for contexts early.
- */
- list_del(&node->head);
- list_add(&node->head, &sw_context->resource_list);
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
- if (unlikely(ret != 0))
- return ret;
- node->staged_bindings =
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
- if (node->staged_bindings == NULL) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&node->staged_bindings->list);
- }
- if (p_val)
- *p_val = node;
- return 0;
- }
- /**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
- *
- * @dev_priv: Pointer to a device private structure.
- * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
- * @converter: User-space visisble type specific information.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
- */
- static int
- vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
- {
- struct vmw_res_cache_entry *rcache =
- &sw_context->res_cache[res_type];
- struct vmw_resource *res;
- struct vmw_resource_val_node *node;
- int ret;
- if (*id_loc == SVGA3D_INVALID_ID) {
- if (p_val)
- *p_val = NULL;
- if (res_type == vmw_res_context) {
- DRM_ERROR("Illegal context invalid id.\n");
- return -EINVAL;
- }
- return 0;
- }
- /*
- * Fastpath in case of repeated commands referencing the same
- * resource
- */
- if (likely(rcache->valid && *id_loc == rcache->handle)) {
- const struct vmw_resource *res = rcache->res;
- rcache->node->first_usage = false;
- if (p_val)
- *p_val = rcache->node;
- return vmw_resource_relocation_add
- (&sw_context->res_relocations, res,
- id_loc - sw_context->buf_start);
- }
- ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->fp->tfile,
- *id_loc,
- converter,
- &res);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id_loc);
- dump_stack();
- return ret;
- }
- rcache->valid = true;
- rcache->res = res;
- rcache->handle = *id_loc;
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
- res, &node);
- if (unlikely(ret != 0))
- goto out_no_reloc;
- rcache->node = node;
- if (p_val)
- *p_val = node;
- vmw_resource_unreference(&res);
- return 0;
- out_no_reloc:
- BUG_ON(sw_context->error_resource != NULL);
- sw_context->error_resource = res;
- return ret;
- }
- /**
- * vmw_rebind_contexts - Rebind all resources previously bound to
- * referenced contexts.
- *
- * @sw_context: Pointer to the software context.
- *
- * Rebind context binding points that have been scrubbed because of eviction.
- */
- static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
- {
- struct vmw_resource_val_node *val;
- int ret;
- list_for_each_entry(val, &sw_context->resource_list, head) {
- if (unlikely(!val->staged_bindings))
- break;
- ret = vmw_context_rebind_all(val->res);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to rebind context.\n");
- return ret;
- }
- }
- return 0;
- }
- /**
- * vmw_cmd_cid_check - Check a command header for valid context information.
- *
- * @dev_priv: Pointer to a device private structure.
- * @sw_context: Pointer to the software context.
- * @header: A command header with an embedded user-space context handle.
- *
- * Convenience function: Call vmw_cmd_res_check with the user-space context
- * handle embedded in @header.
- */
- static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- uint32_t cid;
- } *cmd;
- cmd = container_of(header, struct vmw_cid_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->cid, NULL);
- }
- static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
- int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx_node);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.target.sid, &res_node);
- if (unlikely(ret != 0))
- return ret;
- if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_rt;
- bi.i1.rt_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
- }
- return 0;
- }
- static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceCopy body;
- } *cmd;
- int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.src.sid, NULL);
- if (ret)
- return ret;
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.dest.sid, NULL);
- }
- static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceStretchBlt body;
- } *cmd;
- int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.src.sid, NULL);
- if (unlikely(ret != 0))
- return ret;
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.dest.sid, NULL);
- }
- static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- cmd = container_of(header, struct vmw_sid_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.srcImage.sid, NULL);
- }
- static int vmw_cmd_present_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- } *cmd;
- cmd = container_of(header, struct vmw_sid_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->body.sid,
- NULL);
- }
- /**
- * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
- *
- * @dev_priv: The device private structure.
- * @new_query_bo: The new buffer holding query results.
- * @sw_context: The software context used for this command submission.
- *
- * This function checks whether @new_query_bo is suitable for holding
- * query results, and if another buffer currently is pinned for query
- * results. If so, the function prepares the state of @sw_context for
- * switching pinned buffers after successful submission of the current
- * command batch.
- */
- static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *new_query_bo,
- struct vmw_sw_context *sw_context)
- {
- struct vmw_res_cache_entry *ctx_entry =
- &sw_context->res_cache[vmw_res_context];
- int ret;
- BUG_ON(!ctx_entry->valid);
- sw_context->last_query_ctx = ctx_entry->res;
- if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.num_pages > 4)) {
- DRM_ERROR("Query buffer too large.\n");
- return -EINVAL;
- }
- if (unlikely(sw_context->cur_query_bo != NULL)) {
- sw_context->needs_post_query_barrier = true;
- ret = vmw_bo_to_validate_list(sw_context,
- sw_context->cur_query_bo,
- dev_priv->has_mob, NULL);
- if (unlikely(ret != 0))
- return ret;
- }
- sw_context->cur_query_bo = new_query_bo;
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- dev_priv->has_mob, NULL);
- if (unlikely(ret != 0))
- return ret;
- }
- return 0;
- }
- /**
- * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
- *
- * @dev_priv: The device private structure.
- * @sw_context: The software context used for this command submission batch.
- *
- * This function will check if we're switching query buffers, and will then,
- * issue a dummy occlusion query wait used as a query barrier. When the fence
- * object following that query wait has signaled, we are sure that all
- * preceding queries have finished, and the old query buffer can be unpinned.
- * However, since both the new query buffer and the old one are fenced with
- * that fence, we can do an asynchronus unpin now, and be sure that the
- * old query buffer won't be moved until the fence has signaled.
- *
- * As mentioned above, both the new - and old query buffers need to be fenced
- * using a sequence emitted *after* calling this function.
- */
- static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context)
- {
- /*
- * The validate list should still hold references to all
- * contexts here.
- */
- if (sw_context->needs_post_query_barrier) {
- struct vmw_res_cache_entry *ctx_entry =
- &sw_context->res_cache[vmw_res_context];
- struct vmw_resource *ctx;
- int ret;
- BUG_ON(!ctx_entry->valid);
- ctx = ctx_entry->res;
- ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
- if (unlikely(ret != 0))
- DRM_ERROR("Out of fifo space for dummy query.\n");
- }
- if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
- if (dev_priv->pinned_bo) {
- vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- }
- if (!sw_context->needs_post_query_barrier) {
- vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
- /*
- * We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
- */
- if (!dev_priv->dummy_query_bo_pinned) {
- vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
- true);
- dev_priv->dummy_query_bo_pinned = true;
- }
- BUG_ON(sw_context->last_query_ctx == NULL);
- dev_priv->query_cid = sw_context->last_query_ctx->id;
- dev_priv->query_cid_valid = true;
- dev_priv->pinned_bo =
- vmw_dmabuf_reference(sw_context->cur_query_bo);
- }
- }
- }
- /**
- * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
- * handle to a MOB id.
- *
- * @dev_priv: Pointer to a device private structure.
- * @sw_context: The software context used for this command batch validation.
- * @id: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
- * user-space handle in @id.
- *
- * This function saves information needed to translate a user-space buffer
- * handle to a MOB id. The translation does not take place immediately, but
- * during a call to vmw_apply_relocations(). This function builds a relocation
- * list and a list of buffers to validate. The former needs to be freed using
- * either vmw_apply_relocations() or vmw_free_relocations(). The latter
- * needs to be freed using vmw_clear_validations.
- */
- static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGAMobId *id,
- struct vmw_dma_buffer **vmw_bo_p)
- {
- struct vmw_dma_buffer *vmw_bo = NULL;
- uint32_t handle = *id;
- struct vmw_relocation *reloc;
- int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use MOB buffer.\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
- DRM_ERROR("Max number relocations per submission"
- " exceeded\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
- reloc->mob_loc = id;
- reloc->location = NULL;
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
- if (unlikely(ret != 0))
- goto out_no_reloc;
- *vmw_bo_p = vmw_bo;
- return 0;
- out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
- *vmw_bo_p = NULL;
- return ret;
- }
- /**
- * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
- * handle to a valid SVGAGuestPtr
- *
- * @dev_priv: Pointer to a device private structure.
- * @sw_context: The software context used for this command batch validation.
- * @ptr: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
- * user-space handle in @id.
- *
- * This function saves information needed to translate a user-space buffer
- * handle to a valid SVGAGuestPtr. The translation does not take place
- * immediately, but during a call to vmw_apply_relocations().
- * This function builds a relocation list and a list of buffers to validate.
- * The former needs to be freed using either vmw_apply_relocations() or
- * vmw_free_relocations(). The latter needs to be freed using
- * vmw_clear_validations.
- */
- static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGAGuestPtr *ptr,
- struct vmw_dma_buffer **vmw_bo_p)
- {
- struct vmw_dma_buffer *vmw_bo = NULL;
- uint32_t handle = ptr->gmrId;
- struct vmw_relocation *reloc;
- int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use GMR region.\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
- DRM_ERROR("Max number relocations per submission"
- " exceeded\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
- reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
- if (unlikely(ret != 0))
- goto out_no_reloc;
- *vmw_bo_p = vmw_bo;
- return 0;
- out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
- *vmw_bo_p = NULL;
- return ret;
- }
- /**
- * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context used for this command submission.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_begin_gb_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } *cmd;
- cmd = container_of(header, struct vmw_begin_gb_query_cmd,
- header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
- }
- /**
- * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context used for this command submission.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_begin_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginQuery q;
- } *cmd;
- cmd = container_of(header, struct vmw_begin_query_cmd,
- header);
- if (unlikely(dev_priv->has_mob)) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } gb_cmd;
- BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
- gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
- gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- memcpy(cmd, &gb_cmd, sizeof(*cmd));
- return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
- }
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
- }
- /**
- * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context used for this command submission.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_dma_buffer *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } *cmd;
- int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
- return ret;
- }
- /**
- * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context used for this command submission.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_end_query(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_dma_buffer *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndQuery q;
- } *cmd;
- int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
- if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } gb_cmd;
- BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
- gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
- gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
- memcpy(cmd, &gb_cmd, sizeof(*cmd));
- return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
- }
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
- return ret;
- }
- /**
- * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context used for this command submission.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_dma_buffer *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } *cmd;
- int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
- vmw_dmabuf_unreference(&vmw_bo);
- return 0;
- }
- /**
- * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context used for this command submission.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_dma_buffer *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForQuery q;
- } *cmd;
- int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
- if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } gb_cmd;
- BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
- gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
- gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
- memcpy(cmd, &gb_cmd, sizeof(*cmd));
- return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
- }
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
- vmw_dmabuf_unreference(&vmw_bo);
- return 0;
- }
- static int vmw_cmd_dma(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_dma_buffer *vmw_bo = NULL;
- struct vmw_surface *srf = NULL;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int ret;
- SVGA3dCmdSurfaceDMASuffix *suffix;
- uint32_t bo_size;
- cmd = container_of(header, struct vmw_dma_cmd, header);
- suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
- header->size - sizeof(*suffix));
- /* Make sure device and verifier stays in sync. */
- if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
- DRM_ERROR("Invalid DMA suffix size.\n");
- return -EINVAL;
- }
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->dma.guest.ptr,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
- /* Make sure DMA doesn't cross BO boundaries. */
- bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
- if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
- DRM_ERROR("Invalid DMA offset.\n");
- return -EINVAL;
- }
- bo_size -= cmd->dma.guest.ptr.offset;
- if (unlikely(suffix->maximumOffset > bo_size))
- suffix->maximumOffset = bo_size;
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->dma.host.sid,
- NULL);
- if (unlikely(ret != 0)) {
- if (unlikely(ret != -ERESTARTSYS))
- DRM_ERROR("could not find surface for DMA.\n");
- goto out_no_surface;
- }
- srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
- header);
- out_no_surface:
- vmw_dmabuf_unreference(&vmw_bo);
- return ret;
- }
- static int vmw_cmd_draw(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_draw_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDrawPrimitives body;
- } *cmd;
- SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
- (unsigned long)header + sizeof(*cmd));
- SVGA3dPrimitiveRange *range;
- uint32_t i;
- uint32_t maxnum;
- int ret;
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- return ret;
- cmd = container_of(header, struct vmw_draw_cmd, header);
- maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
- if (unlikely(cmd->body.numVertexDecls > maxnum)) {
- DRM_ERROR("Illegal number of vertex declarations.\n");
- return -EINVAL;
- }
- for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &decl->array.surfaceId, NULL);
- if (unlikely(ret != 0))
- return ret;
- }
- maxnum = (header->size - sizeof(cmd->body) -
- cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
- if (unlikely(cmd->body.numRanges > maxnum)) {
- DRM_ERROR("Illegal number of index ranges.\n");
- return -EINVAL;
- }
- range = (SVGA3dPrimitiveRange *) decl;
- for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &range->indexArray.surfaceId, NULL);
- if (unlikely(ret != 0))
- return ret;
- }
- return 0;
- }
- static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_tex_state_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetTextureState state;
- } *cmd;
- SVGA3dTextureState *last_state = (SVGA3dTextureState *)
- ((unsigned long) header + header->size + sizeof(header));
- SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
- ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
- int ret;
- cmd = container_of(header, struct vmw_tex_state_cmd,
- header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->state.cid,
- &ctx_node);
- if (unlikely(ret != 0))
- return ret;
- for (; cur_state < last_state; ++cur_state) {
- if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
- continue;
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cur_state->value, &res_node);
- if (unlikely(ret != 0))
- return ret;
- if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_tex;
- bi.i1.texture_stage = cur_state->stage;
- vmw_context_binding_add(ctx_node->staged_bindings,
- &bi);
- }
- }
- return 0;
- }
- static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf)
- {
- struct vmw_dma_buffer *vmw_bo;
- int ret;
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd = buf;
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->body.ptr,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
- vmw_dmabuf_unreference(&vmw_bo);
- return ret;
- }
- /**
- * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @res_type: The resource type.
- * @converter: Information about user-space binding for this resource type.
- * @res_id: Pointer to the user-space resource handle in the command stream.
- * @buf_id: Pointer to the user-space backup buffer handle in the command
- * stream.
- * @backup_offset: Offset of backup into MOB.
- *
- * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
- */
- static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv
- *converter,
- uint32_t *res_id,
- uint32_t *buf_id,
- unsigned long backup_offset)
- {
- int ret;
- struct vmw_dma_buffer *dma_buf;
- struct vmw_resource_val_node *val_node;
- ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
- converter, res_id, &val_node);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
- if (unlikely(ret != 0))
- return ret;
- if (val_node->first_usage)
- val_node->no_buffer_needed = true;
- vmw_dmabuf_unreference(&val_node->new_backup);
- val_node->new_backup = dma_buf;
- val_node->new_backup_offset = backup_offset;
- return 0;
- }
- /**
- * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_bind_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBSurface body;
- } *cmd;
- cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
- return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.sid, &cmd->body.mobid,
- 0);
- }
- /**
- * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBImage body;
- } *cmd;
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.image.sid, NULL);
- }
- /**
- * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBSurface body;
- } *cmd;
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.sid, NULL);
- }
- /**
- * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBImage body;
- } *cmd;
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.image.sid, NULL);
- }
- /**
- * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBSurface body;
- } *cmd;
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.sid, NULL);
- }
- /**
- * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBImage body;
- } *cmd;
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.image.sid, NULL);
- }
- /**
- * vmw_cmd_invalidate_gb_surface - Validate an
- * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBSurface body;
- } *cmd;
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
- return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.sid, NULL);
- }
- /**
- * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_shader_define_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineShader body;
- } *cmd;
- int ret;
- size_t size;
- struct vmw_resource_val_node *val;
- cmd = container_of(header, struct vmw_shader_define_cmd,
- header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &val);
- if (unlikely(ret != 0))
- return ret;
- if (unlikely(!dev_priv->has_mob))
- return 0;
- size = cmd->header.size - sizeof(cmd->body);
- ret = vmw_compat_shader_add(dev_priv,
- vmw_context_res_man(val->res),
- cmd->body.shid, cmd + 1,
- cmd->body.type, size,
- &sw_context->staged_cmd_res);
- if (unlikely(ret != 0))
- return ret;
- return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
- return 0;
- }
- /**
- * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_shader_destroy_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyShader body;
- } *cmd;
- int ret;
- struct vmw_resource_val_node *val;
- cmd = container_of(header, struct vmw_shader_destroy_cmd,
- header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &val);
- if (unlikely(ret != 0))
- return ret;
- if (unlikely(!dev_priv->has_mob))
- return 0;
- ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
- cmd->body.shid,
- cmd->body.type,
- &sw_context->staged_cmd_res);
- if (unlikely(ret != 0))
- return ret;
- return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
- return 0;
- }
- /**
- * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_set_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
- struct vmw_resource_val_node *ctx_node, *res_node = NULL;
- struct vmw_ctx_bindinfo bi;
- struct vmw_resource *res = NULL;
- int ret;
- cmd = container_of(header, struct vmw_set_shader_cmd,
- header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx_node);
- if (unlikely(ret != 0))
- return ret;
- if (!dev_priv->has_mob)
- return 0;
- if (cmd->body.shid != SVGA3D_INVALID_ID) {
- res = vmw_compat_shader_lookup
- (vmw_context_res_man(ctx_node->res),
- cmd->body.shid,
- cmd->body.type);
- if (!IS_ERR(res)) {
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
- vmw_res_shader,
- &cmd->body.shid, res,
- &res_node);
- vmw_resource_unreference(&res);
- if (unlikely(ret != 0))
- return ret;
- }
- }
- if (!res_node) {
- ret = vmw_cmd_res_check(dev_priv, sw_context,
- vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &res_node);
- if (unlikely(ret != 0))
- return ret;
- }
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_shader;
- bi.i1.shader_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
- }
- /**
- * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_set_shader_const_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShaderConst body;
- } *cmd;
- int ret;
- cmd = container_of(header, struct vmw_set_shader_const_cmd,
- header);
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- if (dev_priv->has_mob)
- header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
- return 0;
- }
- /**
- * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
- * command
- *
- * @dev_priv: Pointer to a device private struct.
- * @sw_context: The software context being used for this batch.
- * @header: Pointer to the command header in the command stream.
- */
- static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
- {
- struct vmw_bind_gb_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBShader body;
- } *cmd;
- cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
- header);
- return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &cmd->body.mobid,
- cmd->body.offsetInBytes);
- }
- static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf, uint32_t *size)
- {
- uint32_t size_remaining = *size;
- uint32_t cmd_id;
- cmd_id = ((uint32_t *)buf)[0];
- switch (cmd_id) {
- case SVGA_CMD_UPDATE:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
- break;
- case SVGA_CMD_DEFINE_GMRFB:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
- break;
- case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
- break;
- case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
- break;
- default:
- DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
- return -EINVAL;
- }
- if (*size > size_remaining) {
- DRM_ERROR("Invalid SVGA command (size mismatch):"
- " %u.\n", cmd_id);
- return -EINVAL;
- }
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
- return -EPERM;
- }
- if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
- return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
- return 0;
- }
- static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
- &vmw_cmd_set_render_target_check, true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
- true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check, false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
- true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
- true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
- &vmw_cmd_update_gb_surface, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
- &vmw_cmd_readback_gb_image, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
- &vmw_cmd_readback_gb_surface, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
- &vmw_cmd_invalidate_gb_image, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
- &vmw_cmd_invalidate_gb_surface, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
- true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
- true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
- true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
- true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
- true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
- false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
- true, false, true)
- };
- static int vmw_cmd_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf, uint32_t *size)
- {
- uint32_t cmd_id;
- uint32_t size_remaining = *size;
- SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
- int ret;
- const struct vmw_cmd_entry *entry;
- bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
- cmd_id = ((uint32_t *)buf)[0];
- /* Handle any none 3D commands */
- if (unlikely(cmd_id < SVGA_CMD_MAX))
- return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
- cmd_id = header->id;
- *size = header->size + sizeof(SVGA3dCmdHeader);
- cmd_id -= SVGA_3D_CMD_BASE;
- if (unlikely(*size > size_remaining))
- goto out_invalid;
- if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
- goto out_invalid;
- entry = &vmw_cmd_entries[cmd_id];
- if (unlikely(!entry->func))
- goto out_invalid;
- if (unlikely(!entry->user_allow && !sw_context->kernel))
- goto out_privileged;
- if (unlikely(entry->gb_disable && gb))
- goto out_old;
- if (unlikely(entry->gb_enable && !gb))
- goto out_new;
- ret = entry->func(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- goto out_invalid;
- return 0;
- out_invalid:
- DRM_ERROR("Invalid SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
- return -EINVAL;
- out_privileged:
- DRM_ERROR("Privileged SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
- return -EPERM;
- out_old:
- DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
- return -EINVAL;
- out_new:
- DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
- cmd_id + SVGA_3D_CMD_BASE);
- return -EINVAL;
- }
- static int vmw_cmd_check_all(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf,
- uint32_t size)
- {
- int32_t cur_size = size;
- int ret;
- sw_context->buf_start = buf;
- while (cur_size > 0) {
- size = cur_size;
- ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
- if (unlikely(ret != 0))
- return ret;
- buf = (void *)((unsigned long) buf + size);
- cur_size -= size;
- }
- if (unlikely(cur_size != 0)) {
- DRM_ERROR("Command verifier out of sync.\n");
- return -EINVAL;
- }
- return 0;
- }
- static void vmw_free_relocations(struct vmw_sw_context *sw_context)
- {
- sw_context->cur_reloc = 0;
- }
- static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
- {
- uint32_t i;
- struct vmw_relocation *reloc;
- struct ttm_validate_buffer *validate;
- struct ttm_buffer_object *bo;
- for (i = 0; i < sw_context->cur_reloc; ++i) {
- reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index].base;
- bo = validate->bo;
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- reloc->location->offset += bo->offset;
- reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
- break;
- case VMW_PL_GMR:
- reloc->location->gmrId = bo->mem.start;
- break;
- case VMW_PL_MOB:
- *reloc->mob_loc = bo->mem.start;
- break;
- default:
- BUG();
- }
- }
- vmw_free_relocations(sw_context);
- }
- /**
- * vmw_resource_list_unrefererence - Free up a resource list and unreference
- * all resources referenced by it.
- *
- * @list: The resource list.
- */
- static void vmw_resource_list_unreference(struct list_head *list)
- {
- struct vmw_resource_val_node *val, *val_next;
- /*
- * Drop references to resources held during command submission.
- */
- list_for_each_entry_safe(val, val_next, list, head) {
- list_del_init(&val->head);
- vmw_resource_unreference(&val->res);
- if (unlikely(val->staged_bindings))
- kfree(val->staged_bindings);
- kfree(val);
- }
- }
- static void vmw_clear_validations(struct vmw_sw_context *sw_context)
- {
- struct vmw_validate_buffer *entry, *next;
- struct vmw_resource_val_node *val;
- /*
- * Drop references to DMA buffers held during command submission.
- */
- list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- base.head) {
- list_del(&entry->base.head);
- ttm_bo_unref(&entry->base.bo);
- (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
- sw_context->cur_val_buf--;
- }
- BUG_ON(sw_context->cur_val_buf != 0);
- list_for_each_entry(val, &sw_context->resource_list, head)
- (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
- }
- int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob)
- {
- struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
- base);
- int ret;
- if (vbo->pin_count > 0)
- return 0;
- if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
- false);
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
- false);
- if (likely(ret == 0 || ret == -ERESTARTSYS))
- return ret;
- /**
- * If that failed, try VRAM again, this time evicting
- * previous contents.
- */
- ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
- return ret;
- }
- static int vmw_validate_buffers(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context)
- {
- struct vmw_validate_buffer *entry;
- int ret;
- list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
- true,
- entry->validate_as_mob);
- if (unlikely(ret != 0))
- return ret;
- }
- return 0;
- }
- static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
- uint32_t size)
- {
- if (likely(sw_context->cmd_bounce_size >= size))
- return 0;
- if (sw_context->cmd_bounce_size == 0)
- sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
- while (sw_context->cmd_bounce_size < size) {
- sw_context->cmd_bounce_size =
- PAGE_ALIGN(sw_context->cmd_bounce_size +
- (sw_context->cmd_bounce_size >> 1));
- }
- if (sw_context->cmd_bounce != NULL)
- vfree(sw_context->cmd_bounce);
- sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
- if (sw_context->cmd_bounce == NULL) {
- DRM_ERROR("Failed to allocate command bounce buffer.\n");
- sw_context->cmd_bounce_size = 0;
- return -ENOMEM;
- }
- return 0;
- }
- /**
- * vmw_execbuf_fence_commands - create and submit a command stream fence
- *
- * Creates a fence object and submits a command stream marker.
- * If this fails for some reason, We sync the fifo and return NULL.
- * It is then safe to fence buffers with a NULL pointer.
- *
- * If @p_handle is not NULL @file_priv must also not be NULL. Creates
- * a userspace handle if @p_handle is not NULL, otherwise not.
- */
- int vmw_execbuf_fence_commands(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_fence_obj **p_fence,
- uint32_t *p_handle)
- {
- uint32_t sequence;
- int ret;
- bool synced = false;
- /* p_handle implies file_priv. */
- BUG_ON(p_handle != NULL && file_priv == NULL);
- ret = vmw_fifo_send_fence(dev_priv, &sequence);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Fence submission error. Syncing.\n");
- synced = true;
- }
- if (p_handle != NULL)
- ret = vmw_user_fence_create(file_priv, dev_priv->fman,
- sequence, p_fence, p_handle);
- else
- ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
- if (unlikely(ret != 0 && !synced)) {
- (void) vmw_fallback_wait(dev_priv, false, false,
- sequence, false,
- VMW_FENCE_WAIT_TIMEOUT);
- *p_fence = NULL;
- }
- return 0;
- }
- /**
- * vmw_execbuf_copy_fence_user - copy fence object information to
- * user-space.
- *
- * @dev_priv: Pointer to a vmw_private struct.
- * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
- * @ret: Return value from fence object creation.
- * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
- * which the information should be copied.
- * @fence: Pointer to the fenc object.
- * @fence_handle: User-space fence handle.
- *
- * This function copies fence information to user-space. If copying fails,
- * The user-space struct drm_vmw_fence_rep::error member is hopefully
- * left untouched, and if it's preloaded with an -EFAULT by user-space,
- * the error will hopefully be detected.
- * Also if copying fails, user-space will be unable to signal the fence
- * object so we wait for it immediately, and then unreference the
- * user-space reference.
- */
- void
- vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
- struct vmw_fpriv *vmw_fp,
- int ret,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj *fence,
- uint32_t fence_handle)
- {
- struct drm_vmw_fence_rep fence_rep;
- if (user_fence_rep == NULL)
- return;
- memset(&fence_rep, 0, sizeof(fence_rep));
- fence_rep.error = ret;
- if (ret == 0) {
- BUG_ON(fence == NULL);
- fence_rep.handle = fence_handle;
- fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv, &dev_priv->fifo);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
- }
- /*
- * copy_to_user errors will be detected by user space not
- * seeing fence_rep::error filled in. Typically
- * user-space would have pre-set that member to -EFAULT.
- */
- ret = copy_to_user(user_fence_rep, &fence_rep,
- sizeof(fence_rep));
- /*
- * User-space lost the fence object. We need to sync
- * and unreference the handle.
- */
- if (unlikely(ret != 0) && (fence_rep.error == 0)) {
- ttm_ref_object_base_unref(vmw_fp->tfile,
- fence_handle, TTM_REF_USAGE);
- DRM_ERROR("Fence copy error. Syncing.\n");
- (void) vmw_fence_obj_wait(fence, false, false,
- VMW_FENCE_WAIT_TIMEOUT);
- }
- }
- /**
- * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
- * the fifo.
- *
- * @dev_priv: Pointer to a device private structure.
- * @kernel_commands: Pointer to the unpatched command batch.
- * @command_size: Size of the unpatched command batch.
- * @sw_context: Structure holding the relocation lists.
- *
- * Side effects: If this function returns 0, then the command batch
- * pointed to by @kernel_commands will have been modified.
- */
- static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
- void *kernel_commands,
- u32 command_size,
- struct vmw_sw_context *sw_context)
- {
- void *cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (!cmd) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
- return -ENOMEM;
- }
- vmw_apply_relocations(sw_context);
- memcpy(cmd, kernel_commands, command_size);
- vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_fifo_commit(dev_priv, command_size);
- return 0;
- }
- /**
- * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
- * the command buffer manager.
- *
- * @dev_priv: Pointer to a device private structure.
- * @header: Opaque handle to the command buffer allocation.
- * @command_size: Size of the unpatched command batch.
- * @sw_context: Structure holding the relocation lists.
- *
- * Side effects: If this function returns 0, then the command buffer
- * represented by @header will have been modified.
- */
- static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
- struct vmw_cmdbuf_header *header,
- u32 command_size,
- struct vmw_sw_context *sw_context)
- {
- void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
- SVGA3D_INVALID_ID, false, header);
- vmw_apply_relocations(sw_context);
- vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
- return 0;
- }
- /**
- * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
- * submission using a command buffer.
- *
- * @dev_priv: Pointer to a device private structure.
- * @user_commands: User-space pointer to the commands to be submitted.
- * @command_size: Size of the unpatched command batch.
- * @header: Out parameter returning the opaque pointer to the command buffer.
- *
- * This function checks whether we can use the command buffer manager for
- * submission and if so, creates a command buffer of suitable size and
- * copies the user data into that buffer.
- *
- * On successful return, the function returns a pointer to the data in the
- * command buffer and *@header is set to non-NULL.
- * If command buffers could not be used, the function will return the value
- * of @kernel_commands on function call. That value may be NULL. In that case,
- * the value of *@header will be set to NULL.
- * If an error is encountered, the function will return a pointer error value.
- * If the function is interrupted by a signal while sleeping, it will return
- * -ERESTARTSYS casted to a pointer error value.
- */
- static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
- void __user *user_commands,
- void *kernel_commands,
- u32 command_size,
- struct vmw_cmdbuf_header **header)
- {
- size_t cmdbuf_size;
- int ret;
- *header = NULL;
- if (!dev_priv->cman || kernel_commands)
- return kernel_commands;
- if (command_size > SVGA_CB_MAX_SIZE) {
- DRM_ERROR("Command buffer is too large.\n");
- return ERR_PTR(-EINVAL);
- }
- /* If possible, add a little space for fencing. */
- cmdbuf_size = command_size + 512;
- cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
- kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
- true, header);
- if (IS_ERR(kernel_commands))
- return kernel_commands;
- ret = copy_from_user(kernel_commands, user_commands,
- command_size);
- if (ret) {
- DRM_ERROR("Failed copying commands.\n");
- vmw_cmdbuf_header_free(*header);
- *header = NULL;
- return ERR_PTR(-EFAULT);
- }
- return kernel_commands;
- }
- int vmw_execbuf_process(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- void __user *user_commands,
- void *kernel_commands,
- uint32_t command_size,
- uint64_t throttle_us,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence)
- {
- struct vmw_sw_context *sw_context = &dev_priv->ctx;
- struct vmw_fence_obj *fence = NULL;
- struct vmw_resource *error_resource;
- struct list_head resource_list;
- struct vmw_cmdbuf_header *header;
- struct ww_acquire_ctx ticket;
- uint32_t handle;
- int ret;
- if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
-
- if (ret)
- return ret;
- }
-
- kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
- kernel_commands, command_size,
- &header);
- if (IS_ERR(kernel_commands))
- return PTR_ERR(kernel_commands);
- ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
- if (ret) {
- ret = -ERESTARTSYS;
- goto out_free_header;
- }
- sw_context->kernel = false;
- if (kernel_commands == NULL) {
- ret = vmw_resize_cmd_bounce(sw_context, command_size);
- if (unlikely(ret != 0))
- goto out_unlock;
- ret = copy_from_user(sw_context->cmd_bounce,
- user_commands, command_size);
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- DRM_ERROR("Failed copying commands.\n");
- goto out_unlock;
- }
- kernel_commands = sw_context->cmd_bounce;
- } else if (!header)
- sw_context->kernel = true;
- sw_context->fp = vmw_fpriv(file_priv);
- sw_context->cur_reloc = 0;
- sw_context->cur_val_buf = 0;
- INIT_LIST_HEAD(&sw_context->resource_list);
- sw_context->cur_query_bo = dev_priv->pinned_bo;
- sw_context->last_query_ctx = NULL;
- sw_context->needs_post_query_barrier = false;
- memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
- INIT_LIST_HEAD(&sw_context->validate_nodes);
- INIT_LIST_HEAD(&sw_context->res_relocations);
- if (!sw_context->res_ht_initialized) {
- ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
- if (unlikely(ret != 0))
- goto out_unlock;
- sw_context->res_ht_initialized = true;
- }
- INIT_LIST_HEAD(&sw_context->staged_cmd_res);
- INIT_LIST_HEAD(&resource_list);
- ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
- command_size);
- if (unlikely(ret != 0))
- goto out_err_nores;
- ret = vmw_resources_reserve(sw_context);
- if (unlikely(ret != 0))
- goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
- true, NULL);
- if (unlikely(ret != 0))
- goto out_err;
- ret = vmw_validate_buffers(dev_priv, sw_context);
- if (unlikely(ret != 0))
- goto out_err;
- ret = vmw_resources_validate(sw_context);
- if (unlikely(ret != 0))
- goto out_err;
- ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_err;
- }
- if (dev_priv->has_mob) {
- ret = vmw_rebind_contexts(sw_context);
- if (unlikely(ret != 0))
- goto out_unlock_binding;
- }
- if (!header) {
- ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
- command_size, sw_context);
- } else {
- ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
- sw_context);
- header = NULL;
- }
- if (ret)
- goto out_unlock_binding;
- vmw_query_bo_switch_commit(dev_priv, sw_context);
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
- &fence,
- (user_fence_rep) ? &handle : NULL);
- /*
- * This error is harmless, because if fence submission fails,
- * vmw_fifo_send_fence will sync. The error will be propagated to
- * user-space in @fence_rep
- */
- if (ret != 0)
- DRM_ERROR("Fence submission error. Syncing.\n");
- vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
- ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
- (void *) fence);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
- __vmw_execbuf_release_pinned_bo(dev_priv, fence);
- vmw_clear_validations(sw_context);
- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle);
- /* Don't unreference when handing fence out */
- if (unlikely(out_fence != NULL)) {
- *out_fence = fence;
- fence = NULL;
- } else if (likely(fence != NULL)) {
- vmw_fence_obj_unreference(&fence);
- }
- list_splice_init(&sw_context->resource_list, &resource_list);
- vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
- /*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
- */
- vmw_resource_list_unreference(&resource_list);
- return 0;
- out_unlock_binding:
- mutex_unlock(&dev_priv->binding_mutex);
- out_err:
- ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
- out_err_nores:
- vmw_resource_list_unreserve(&sw_context->resource_list, true);
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_free_relocations(sw_context);
- vmw_clear_validations(sw_context);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
- __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
- out_unlock:
- list_splice_init(&sw_context->resource_list, &resource_list);
- error_resource = sw_context->error_resource;
- sw_context->error_resource = NULL;
- vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
- /*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
- */
- vmw_resource_list_unreference(&resource_list);
- if (unlikely(error_resource != NULL))
- vmw_resource_unreference(&error_resource);
- out_free_header:
- if (header)
- vmw_cmdbuf_header_free(header);
- return ret;
- }
- /**
- * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
- *
- * @dev_priv: The device private structure.
- *
- * This function is called to idle the fifo and unpin the query buffer
- * if the normal way to do this hits an error, which should typically be
- * extremely rare.
- */
- static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
- {
- DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
- (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
- vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
- if (dev_priv->dummy_query_bo_pinned) {
- vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
- }
- }
- /**
- * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
- *
- * @dev_priv: The device private structure.
- * @fence: If non-NULL should point to a struct vmw_fence_obj issued
- * _after_ a query barrier that flushes all queries touching the current
- * buffer pointed to by @dev_priv->pinned_bo
- *
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
- *
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
- *
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
- *
- * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
- * before calling this function.
- */
- void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- struct vmw_fence_obj *fence)
- {
- int ret = 0;
- struct list_head validate_list;
- struct ttm_validate_buffer pinned_val, query_val;
- struct vmw_fence_obj *lfence = NULL;
- struct ww_acquire_ctx ticket;
- if (dev_priv->pinned_bo == NULL)
- goto out_unlock;
- INIT_LIST_HEAD(&validate_list);
- pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
- pinned_val.shared = false;
- list_add_tail(&pinned_val.head, &validate_list);
- query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
- query_val.shared = false;
- list_add_tail(&query_val.head, &validate_list);
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
- false, NULL);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_reserve;
- }
- if (dev_priv->query_cid_valid) {
- BUG_ON(fence != NULL);
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_emit;
- }
- dev_priv->query_cid_valid = false;
- }
- vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
- if (dev_priv->dummy_query_bo_pinned) {
- vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
- }
- if (fence == NULL) {
- (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
- NULL);
- fence = lfence;
- }
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
- if (lfence != NULL)
- vmw_fence_obj_unreference(&lfence);
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- DRM_INFO("Dummy query bo pin count: %d\n",
- dev_priv->dummy_query_bo->pin_count);
- out_unlock:
- return;
- out_no_emit:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- out_no_reserve:
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- }
- /**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
- *
- * @dev_priv: The device private structure.
- *
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
- *
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
- *
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
- */
- void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
- {
- mutex_lock(&dev_priv->cmdbuf_mutex);
- if (dev_priv->query_cid_valid)
- __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
- }
- int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
- int ret;
- /*
- * This will allow us to extend the ioctl argument while
- * maintaining backwards compatibility:
- * We take different code paths depending on the value of
- * arg->version.
- */
- if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
- DRM_ERROR("Incorrect execbuf version.\n");
- DRM_ERROR("You're running outdated experimental "
- "vmwgfx user-space drivers.");
- return -EINVAL;
- }
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_execbuf_process(file_priv, dev_priv,
- (void __user *)(unsigned long)arg->commands,
- NULL, arg->command_size, arg->throttle_us,
- (void __user *)(unsigned long)arg->fence_rep,
- NULL);
- ttm_read_unlock(&dev_priv->reservation_sem);
- if (unlikely(ret != 0))
- return ret;
- vmw_kms_cursor_post_execbuf(dev_priv);
- return 0;
- }
|