emulate.c 146 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645
  1. /******************************************************************************
  2. * emulate.c
  3. *
  4. * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
  5. *
  6. * Copyright (c) 2005 Keir Fraser
  7. *
  8. * Linux coding style, mod r/m decoder, segment base fixes, real-mode
  9. * privileged instructions:
  10. *
  11. * Copyright (C) 2006 Qumranet
  12. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  13. *
  14. * Avi Kivity <avi@qumranet.com>
  15. * Yaniv Kamay <yaniv@qumranet.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  21. */
  22. #include <linux/kvm_host.h>
  23. #include "kvm_cache_regs.h"
  24. #include <asm/kvm_emulate.h>
  25. #include <linux/stringify.h>
  26. #include <asm/debugreg.h>
  27. #include "x86.h"
  28. #include "tss.h"
  29. /*
  30. * Operand types
  31. */
  32. #define OpNone 0ull
  33. #define OpImplicit 1ull /* No generic decode */
  34. #define OpReg 2ull /* Register */
  35. #define OpMem 3ull /* Memory */
  36. #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
  37. #define OpDI 5ull /* ES:DI/EDI/RDI */
  38. #define OpMem64 6ull /* Memory, 64-bit */
  39. #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
  40. #define OpDX 8ull /* DX register */
  41. #define OpCL 9ull /* CL register (for shifts) */
  42. #define OpImmByte 10ull /* 8-bit sign extended immediate */
  43. #define OpOne 11ull /* Implied 1 */
  44. #define OpImm 12ull /* Sign extended up to 32-bit immediate */
  45. #define OpMem16 13ull /* Memory operand (16-bit). */
  46. #define OpMem32 14ull /* Memory operand (32-bit). */
  47. #define OpImmU 15ull /* Immediate operand, zero extended */
  48. #define OpSI 16ull /* SI/ESI/RSI */
  49. #define OpImmFAddr 17ull /* Immediate far address */
  50. #define OpMemFAddr 18ull /* Far address in memory */
  51. #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
  52. #define OpES 20ull /* ES */
  53. #define OpCS 21ull /* CS */
  54. #define OpSS 22ull /* SS */
  55. #define OpDS 23ull /* DS */
  56. #define OpFS 24ull /* FS */
  57. #define OpGS 25ull /* GS */
  58. #define OpMem8 26ull /* 8-bit zero extended memory operand */
  59. #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
  60. #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
  61. #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
  62. #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
  63. #define OpBits 5 /* Width of operand field */
  64. #define OpMask ((1ull << OpBits) - 1)
  65. /*
  66. * Opcode effective-address decode tables.
  67. * Note that we only emulate instructions that have at least one memory
  68. * operand (excluding implicit stack references). We assume that stack
  69. * references and instruction fetches will never occur in special memory
  70. * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  71. * not be handled.
  72. */
  73. /* Operand sizes: 8-bit operands or specified/overridden size. */
  74. #define ByteOp (1<<0) /* 8-bit operands. */
  75. /* Destination operand type. */
  76. #define DstShift 1
  77. #define ImplicitOps (OpImplicit << DstShift)
  78. #define DstReg (OpReg << DstShift)
  79. #define DstMem (OpMem << DstShift)
  80. #define DstAcc (OpAcc << DstShift)
  81. #define DstDI (OpDI << DstShift)
  82. #define DstMem64 (OpMem64 << DstShift)
  83. #define DstMem16 (OpMem16 << DstShift)
  84. #define DstImmUByte (OpImmUByte << DstShift)
  85. #define DstDX (OpDX << DstShift)
  86. #define DstAccLo (OpAccLo << DstShift)
  87. #define DstMask (OpMask << DstShift)
  88. /* Source operand type. */
  89. #define SrcShift 6
  90. #define SrcNone (OpNone << SrcShift)
  91. #define SrcReg (OpReg << SrcShift)
  92. #define SrcMem (OpMem << SrcShift)
  93. #define SrcMem16 (OpMem16 << SrcShift)
  94. #define SrcMem32 (OpMem32 << SrcShift)
  95. #define SrcImm (OpImm << SrcShift)
  96. #define SrcImmByte (OpImmByte << SrcShift)
  97. #define SrcOne (OpOne << SrcShift)
  98. #define SrcImmUByte (OpImmUByte << SrcShift)
  99. #define SrcImmU (OpImmU << SrcShift)
  100. #define SrcSI (OpSI << SrcShift)
  101. #define SrcXLat (OpXLat << SrcShift)
  102. #define SrcImmFAddr (OpImmFAddr << SrcShift)
  103. #define SrcMemFAddr (OpMemFAddr << SrcShift)
  104. #define SrcAcc (OpAcc << SrcShift)
  105. #define SrcImmU16 (OpImmU16 << SrcShift)
  106. #define SrcImm64 (OpImm64 << SrcShift)
  107. #define SrcDX (OpDX << SrcShift)
  108. #define SrcMem8 (OpMem8 << SrcShift)
  109. #define SrcAccHi (OpAccHi << SrcShift)
  110. #define SrcMask (OpMask << SrcShift)
  111. #define BitOp (1<<11)
  112. #define MemAbs (1<<12) /* Memory operand is absolute displacement */
  113. #define String (1<<13) /* String instruction (rep capable) */
  114. #define Stack (1<<14) /* Stack instruction (push/pop) */
  115. #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
  116. #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
  117. #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
  118. #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
  119. #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
  120. #define Escape (5<<15) /* Escape to coprocessor instruction */
  121. #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
  122. #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
  123. #define Sse (1<<18) /* SSE Vector instruction */
  124. /* Generic ModRM decode. */
  125. #define ModRM (1<<19)
  126. /* Destination is only written; never read. */
  127. #define Mov (1<<20)
  128. /* Misc flags */
  129. #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
  130. #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
  131. #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
  132. #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
  133. #define Undefined (1<<25) /* No Such Instruction */
  134. #define Lock (1<<26) /* lock prefix is allowed for the instruction */
  135. #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
  136. #define No64 (1<<28)
  137. #define PageTable (1 << 29) /* instruction used to write page table */
  138. #define NotImpl (1 << 30) /* instruction is not implemented */
  139. /* Source 2 operand type */
  140. #define Src2Shift (31)
  141. #define Src2None (OpNone << Src2Shift)
  142. #define Src2Mem (OpMem << Src2Shift)
  143. #define Src2CL (OpCL << Src2Shift)
  144. #define Src2ImmByte (OpImmByte << Src2Shift)
  145. #define Src2One (OpOne << Src2Shift)
  146. #define Src2Imm (OpImm << Src2Shift)
  147. #define Src2ES (OpES << Src2Shift)
  148. #define Src2CS (OpCS << Src2Shift)
  149. #define Src2SS (OpSS << Src2Shift)
  150. #define Src2DS (OpDS << Src2Shift)
  151. #define Src2FS (OpFS << Src2Shift)
  152. #define Src2GS (OpGS << Src2Shift)
  153. #define Src2Mask (OpMask << Src2Shift)
  154. #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
  155. #define AlignMask ((u64)7 << 41)
  156. #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
  157. #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
  158. #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
  159. #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
  160. #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
  161. #define NoWrite ((u64)1 << 45) /* No writeback */
  162. #define SrcWrite ((u64)1 << 46) /* Write back src operand */
  163. #define NoMod ((u64)1 << 47) /* Mod field is ignored */
  164. #define Intercept ((u64)1 << 48) /* Has valid intercept field */
  165. #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
  166. #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
  167. #define NearBranch ((u64)1 << 52) /* Near branches */
  168. #define No16 ((u64)1 << 53) /* No 16 bit operand */
  169. #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
  170. #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
  171. #define X2(x...) x, x
  172. #define X3(x...) X2(x), x
  173. #define X4(x...) X2(x), X2(x)
  174. #define X5(x...) X4(x), x
  175. #define X6(x...) X4(x), X2(x)
  176. #define X7(x...) X4(x), X3(x)
  177. #define X8(x...) X4(x), X4(x)
  178. #define X16(x...) X8(x), X8(x)
  179. #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
  180. #define FASTOP_SIZE 8
  181. /*
  182. * fastop functions have a special calling convention:
  183. *
  184. * dst: rax (in/out)
  185. * src: rdx (in/out)
  186. * src2: rcx (in)
  187. * flags: rflags (in/out)
  188. * ex: rsi (in:fastop pointer, out:zero if exception)
  189. *
  190. * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
  191. * different operand sizes can be reached by calculation, rather than a jump
  192. * table (which would be bigger than the code).
  193. *
  194. * fastop functions are declared as taking a never-defined fastop parameter,
  195. * so they can't be called from C directly.
  196. */
  197. struct fastop;
  198. struct opcode {
  199. u64 flags : 56;
  200. u64 intercept : 8;
  201. union {
  202. int (*execute)(struct x86_emulate_ctxt *ctxt);
  203. const struct opcode *group;
  204. const struct group_dual *gdual;
  205. const struct gprefix *gprefix;
  206. const struct escape *esc;
  207. const struct instr_dual *idual;
  208. const struct mode_dual *mdual;
  209. void (*fastop)(struct fastop *fake);
  210. } u;
  211. int (*check_perm)(struct x86_emulate_ctxt *ctxt);
  212. };
  213. struct group_dual {
  214. struct opcode mod012[8];
  215. struct opcode mod3[8];
  216. };
  217. struct gprefix {
  218. struct opcode pfx_no;
  219. struct opcode pfx_66;
  220. struct opcode pfx_f2;
  221. struct opcode pfx_f3;
  222. };
  223. struct escape {
  224. struct opcode op[8];
  225. struct opcode high[64];
  226. };
  227. struct instr_dual {
  228. struct opcode mod012;
  229. struct opcode mod3;
  230. };
  231. struct mode_dual {
  232. struct opcode mode32;
  233. struct opcode mode64;
  234. };
  235. #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
  236. enum x86_transfer_type {
  237. X86_TRANSFER_NONE,
  238. X86_TRANSFER_CALL_JMP,
  239. X86_TRANSFER_RET,
  240. X86_TRANSFER_TASK_SWITCH,
  241. };
  242. static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
  243. {
  244. if (!(ctxt->regs_valid & (1 << nr))) {
  245. ctxt->regs_valid |= 1 << nr;
  246. ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
  247. }
  248. return ctxt->_regs[nr];
  249. }
  250. static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
  251. {
  252. ctxt->regs_valid |= 1 << nr;
  253. ctxt->regs_dirty |= 1 << nr;
  254. return &ctxt->_regs[nr];
  255. }
  256. static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
  257. {
  258. reg_read(ctxt, nr);
  259. return reg_write(ctxt, nr);
  260. }
  261. static void writeback_registers(struct x86_emulate_ctxt *ctxt)
  262. {
  263. unsigned reg;
  264. for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
  265. ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
  266. }
  267. static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
  268. {
  269. ctxt->regs_dirty = 0;
  270. ctxt->regs_valid = 0;
  271. }
  272. /*
  273. * These EFLAGS bits are restored from saved value during emulation, and
  274. * any changes are written back to the saved value after emulation.
  275. */
  276. #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
  277. X86_EFLAGS_PF|X86_EFLAGS_CF)
  278. #ifdef CONFIG_X86_64
  279. #define ON64(x) x
  280. #else
  281. #define ON64(x)
  282. #endif
  283. static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
  284. #define FOP_FUNC(name) \
  285. ".align " __stringify(FASTOP_SIZE) " \n\t" \
  286. ".type " name ", @function \n\t" \
  287. name ":\n\t"
  288. #define FOP_RET "ret \n\t"
  289. #define FOP_START(op) \
  290. extern void em_##op(struct fastop *fake); \
  291. asm(".pushsection .text, \"ax\" \n\t" \
  292. ".global em_" #op " \n\t" \
  293. FOP_FUNC("em_" #op)
  294. #define FOP_END \
  295. ".popsection")
  296. #define FOPNOP() \
  297. FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
  298. FOP_RET
  299. #define FOP1E(op, dst) \
  300. FOP_FUNC(#op "_" #dst) \
  301. "10: " #op " %" #dst " \n\t" FOP_RET
  302. #define FOP1EEX(op, dst) \
  303. FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
  304. #define FASTOP1(op) \
  305. FOP_START(op) \
  306. FOP1E(op##b, al) \
  307. FOP1E(op##w, ax) \
  308. FOP1E(op##l, eax) \
  309. ON64(FOP1E(op##q, rax)) \
  310. FOP_END
  311. /* 1-operand, using src2 (for MUL/DIV r/m) */
  312. #define FASTOP1SRC2(op, name) \
  313. FOP_START(name) \
  314. FOP1E(op, cl) \
  315. FOP1E(op, cx) \
  316. FOP1E(op, ecx) \
  317. ON64(FOP1E(op, rcx)) \
  318. FOP_END
  319. /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
  320. #define FASTOP1SRC2EX(op, name) \
  321. FOP_START(name) \
  322. FOP1EEX(op, cl) \
  323. FOP1EEX(op, cx) \
  324. FOP1EEX(op, ecx) \
  325. ON64(FOP1EEX(op, rcx)) \
  326. FOP_END
  327. #define FOP2E(op, dst, src) \
  328. FOP_FUNC(#op "_" #dst "_" #src) \
  329. #op " %" #src ", %" #dst " \n\t" FOP_RET
  330. #define FASTOP2(op) \
  331. FOP_START(op) \
  332. FOP2E(op##b, al, dl) \
  333. FOP2E(op##w, ax, dx) \
  334. FOP2E(op##l, eax, edx) \
  335. ON64(FOP2E(op##q, rax, rdx)) \
  336. FOP_END
  337. /* 2 operand, word only */
  338. #define FASTOP2W(op) \
  339. FOP_START(op) \
  340. FOPNOP() \
  341. FOP2E(op##w, ax, dx) \
  342. FOP2E(op##l, eax, edx) \
  343. ON64(FOP2E(op##q, rax, rdx)) \
  344. FOP_END
  345. /* 2 operand, src is CL */
  346. #define FASTOP2CL(op) \
  347. FOP_START(op) \
  348. FOP2E(op##b, al, cl) \
  349. FOP2E(op##w, ax, cl) \
  350. FOP2E(op##l, eax, cl) \
  351. ON64(FOP2E(op##q, rax, cl)) \
  352. FOP_END
  353. /* 2 operand, src and dest are reversed */
  354. #define FASTOP2R(op, name) \
  355. FOP_START(name) \
  356. FOP2E(op##b, dl, al) \
  357. FOP2E(op##w, dx, ax) \
  358. FOP2E(op##l, edx, eax) \
  359. ON64(FOP2E(op##q, rdx, rax)) \
  360. FOP_END
  361. #define FOP3E(op, dst, src, src2) \
  362. FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
  363. #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
  364. /* 3-operand, word-only, src2=cl */
  365. #define FASTOP3WCL(op) \
  366. FOP_START(op) \
  367. FOPNOP() \
  368. FOP3E(op##w, ax, dx, cl) \
  369. FOP3E(op##l, eax, edx, cl) \
  370. ON64(FOP3E(op##q, rax, rdx, cl)) \
  371. FOP_END
  372. /* Special case for SETcc - 1 instruction per cc */
  373. #define FOP_SETCC(op) \
  374. ".align 4 \n\t" \
  375. ".type " #op ", @function \n\t" \
  376. #op ": \n\t" \
  377. #op " %al \n\t" \
  378. FOP_RET
  379. asm(".global kvm_fastop_exception \n"
  380. "kvm_fastop_exception: xor %esi, %esi; ret");
  381. FOP_START(setcc)
  382. FOP_SETCC(seto)
  383. FOP_SETCC(setno)
  384. FOP_SETCC(setc)
  385. FOP_SETCC(setnc)
  386. FOP_SETCC(setz)
  387. FOP_SETCC(setnz)
  388. FOP_SETCC(setbe)
  389. FOP_SETCC(setnbe)
  390. FOP_SETCC(sets)
  391. FOP_SETCC(setns)
  392. FOP_SETCC(setp)
  393. FOP_SETCC(setnp)
  394. FOP_SETCC(setl)
  395. FOP_SETCC(setnl)
  396. FOP_SETCC(setle)
  397. FOP_SETCC(setnle)
  398. FOP_END;
  399. FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
  400. FOP_END;
  401. /*
  402. * XXX: inoutclob user must know where the argument is being expanded.
  403. * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
  404. */
  405. #define asm_safe(insn, inoutclob...) \
  406. ({ \
  407. int _fault = 0; \
  408. \
  409. asm volatile("1:" insn "\n" \
  410. "2:\n" \
  411. ".pushsection .fixup, \"ax\"\n" \
  412. "3: movl $1, %[_fault]\n" \
  413. " jmp 2b\n" \
  414. ".popsection\n" \
  415. _ASM_EXTABLE(1b, 3b) \
  416. : [_fault] "+qm"(_fault) inoutclob ); \
  417. \
  418. _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
  419. })
  420. static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
  421. enum x86_intercept intercept,
  422. enum x86_intercept_stage stage)
  423. {
  424. struct x86_instruction_info info = {
  425. .intercept = intercept,
  426. .rep_prefix = ctxt->rep_prefix,
  427. .modrm_mod = ctxt->modrm_mod,
  428. .modrm_reg = ctxt->modrm_reg,
  429. .modrm_rm = ctxt->modrm_rm,
  430. .src_val = ctxt->src.val64,
  431. .dst_val = ctxt->dst.val64,
  432. .src_bytes = ctxt->src.bytes,
  433. .dst_bytes = ctxt->dst.bytes,
  434. .ad_bytes = ctxt->ad_bytes,
  435. .next_rip = ctxt->eip,
  436. };
  437. return ctxt->ops->intercept(ctxt, &info, stage);
  438. }
  439. static void assign_masked(ulong *dest, ulong src, ulong mask)
  440. {
  441. *dest = (*dest & ~mask) | (src & mask);
  442. }
  443. static void assign_register(unsigned long *reg, u64 val, int bytes)
  444. {
  445. /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
  446. switch (bytes) {
  447. case 1:
  448. *(u8 *)reg = (u8)val;
  449. break;
  450. case 2:
  451. *(u16 *)reg = (u16)val;
  452. break;
  453. case 4:
  454. *reg = (u32)val;
  455. break; /* 64b: zero-extend */
  456. case 8:
  457. *reg = val;
  458. break;
  459. }
  460. }
  461. static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
  462. {
  463. return (1UL << (ctxt->ad_bytes << 3)) - 1;
  464. }
  465. static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
  466. {
  467. u16 sel;
  468. struct desc_struct ss;
  469. if (ctxt->mode == X86EMUL_MODE_PROT64)
  470. return ~0UL;
  471. ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
  472. return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
  473. }
  474. static int stack_size(struct x86_emulate_ctxt *ctxt)
  475. {
  476. return (__fls(stack_mask(ctxt)) + 1) >> 3;
  477. }
  478. /* Access/update address held in a register, based on addressing mode. */
  479. static inline unsigned long
  480. address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
  481. {
  482. if (ctxt->ad_bytes == sizeof(unsigned long))
  483. return reg;
  484. else
  485. return reg & ad_mask(ctxt);
  486. }
  487. static inline unsigned long
  488. register_address(struct x86_emulate_ctxt *ctxt, int reg)
  489. {
  490. return address_mask(ctxt, reg_read(ctxt, reg));
  491. }
  492. static void masked_increment(ulong *reg, ulong mask, int inc)
  493. {
  494. assign_masked(reg, *reg + inc, mask);
  495. }
  496. static inline void
  497. register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
  498. {
  499. ulong *preg = reg_rmw(ctxt, reg);
  500. assign_register(preg, *preg + inc, ctxt->ad_bytes);
  501. }
  502. static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
  503. {
  504. masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
  505. }
  506. static u32 desc_limit_scaled(struct desc_struct *desc)
  507. {
  508. u32 limit = get_desc_limit(desc);
  509. return desc->g ? (limit << 12) | 0xfff : limit;
  510. }
  511. static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
  512. {
  513. if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
  514. return 0;
  515. return ctxt->ops->get_cached_segment_base(ctxt, seg);
  516. }
  517. static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
  518. u32 error, bool valid)
  519. {
  520. WARN_ON(vec > 0x1f);
  521. ctxt->exception.vector = vec;
  522. ctxt->exception.error_code = error;
  523. ctxt->exception.error_code_valid = valid;
  524. return X86EMUL_PROPAGATE_FAULT;
  525. }
  526. static int emulate_db(struct x86_emulate_ctxt *ctxt)
  527. {
  528. return emulate_exception(ctxt, DB_VECTOR, 0, false);
  529. }
  530. static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
  531. {
  532. return emulate_exception(ctxt, GP_VECTOR, err, true);
  533. }
  534. static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
  535. {
  536. return emulate_exception(ctxt, SS_VECTOR, err, true);
  537. }
  538. static int emulate_ud(struct x86_emulate_ctxt *ctxt)
  539. {
  540. return emulate_exception(ctxt, UD_VECTOR, 0, false);
  541. }
  542. static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
  543. {
  544. return emulate_exception(ctxt, TS_VECTOR, err, true);
  545. }
  546. static int emulate_de(struct x86_emulate_ctxt *ctxt)
  547. {
  548. return emulate_exception(ctxt, DE_VECTOR, 0, false);
  549. }
  550. static int emulate_nm(struct x86_emulate_ctxt *ctxt)
  551. {
  552. return emulate_exception(ctxt, NM_VECTOR, 0, false);
  553. }
  554. static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
  555. {
  556. u16 selector;
  557. struct desc_struct desc;
  558. ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
  559. return selector;
  560. }
  561. static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
  562. unsigned seg)
  563. {
  564. u16 dummy;
  565. u32 base3;
  566. struct desc_struct desc;
  567. ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
  568. ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
  569. }
  570. /*
  571. * x86 defines three classes of vector instructions: explicitly
  572. * aligned, explicitly unaligned, and the rest, which change behaviour
  573. * depending on whether they're AVX encoded or not.
  574. *
  575. * Also included is CMPXCHG16B which is not a vector instruction, yet it is
  576. * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
  577. * 512 bytes of data must be aligned to a 16 byte boundary.
  578. */
  579. static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
  580. {
  581. u64 alignment = ctxt->d & AlignMask;
  582. if (likely(size < 16))
  583. return 1;
  584. switch (alignment) {
  585. case Unaligned:
  586. case Avx:
  587. return 1;
  588. case Aligned16:
  589. return 16;
  590. case Aligned:
  591. default:
  592. return size;
  593. }
  594. }
  595. static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
  596. struct segmented_address addr,
  597. unsigned *max_size, unsigned size,
  598. bool write, bool fetch,
  599. enum x86emul_mode mode, ulong *linear)
  600. {
  601. struct desc_struct desc;
  602. bool usable;
  603. ulong la;
  604. u32 lim;
  605. u16 sel;
  606. la = seg_base(ctxt, addr.seg) + addr.ea;
  607. *max_size = 0;
  608. switch (mode) {
  609. case X86EMUL_MODE_PROT64:
  610. *linear = la;
  611. if (is_noncanonical_address(la))
  612. goto bad;
  613. *max_size = min_t(u64, ~0u, (1ull << 48) - la);
  614. if (size > *max_size)
  615. goto bad;
  616. break;
  617. default:
  618. *linear = la = (u32)la;
  619. usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
  620. addr.seg);
  621. if (!usable)
  622. goto bad;
  623. /* code segment in protected mode or read-only data segment */
  624. if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
  625. || !(desc.type & 2)) && write)
  626. goto bad;
  627. /* unreadable code segment */
  628. if (!fetch && (desc.type & 8) && !(desc.type & 2))
  629. goto bad;
  630. lim = desc_limit_scaled(&desc);
  631. if (!(desc.type & 8) && (desc.type & 4)) {
  632. /* expand-down segment */
  633. if (addr.ea <= lim)
  634. goto bad;
  635. lim = desc.d ? 0xffffffff : 0xffff;
  636. }
  637. if (addr.ea > lim)
  638. goto bad;
  639. if (lim == 0xffffffff)
  640. *max_size = ~0u;
  641. else {
  642. *max_size = (u64)lim + 1 - addr.ea;
  643. if (size > *max_size)
  644. goto bad;
  645. }
  646. break;
  647. }
  648. if (la & (insn_alignment(ctxt, size) - 1))
  649. return emulate_gp(ctxt, 0);
  650. return X86EMUL_CONTINUE;
  651. bad:
  652. if (addr.seg == VCPU_SREG_SS)
  653. return emulate_ss(ctxt, 0);
  654. else
  655. return emulate_gp(ctxt, 0);
  656. }
  657. static int linearize(struct x86_emulate_ctxt *ctxt,
  658. struct segmented_address addr,
  659. unsigned size, bool write,
  660. ulong *linear)
  661. {
  662. unsigned max_size;
  663. return __linearize(ctxt, addr, &max_size, size, write, false,
  664. ctxt->mode, linear);
  665. }
  666. static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
  667. enum x86emul_mode mode)
  668. {
  669. ulong linear;
  670. int rc;
  671. unsigned max_size;
  672. struct segmented_address addr = { .seg = VCPU_SREG_CS,
  673. .ea = dst };
  674. if (ctxt->op_bytes != sizeof(unsigned long))
  675. addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
  676. rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
  677. if (rc == X86EMUL_CONTINUE)
  678. ctxt->_eip = addr.ea;
  679. return rc;
  680. }
  681. static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
  682. {
  683. return assign_eip(ctxt, dst, ctxt->mode);
  684. }
  685. static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
  686. const struct desc_struct *cs_desc)
  687. {
  688. enum x86emul_mode mode = ctxt->mode;
  689. int rc;
  690. #ifdef CONFIG_X86_64
  691. if (ctxt->mode >= X86EMUL_MODE_PROT16) {
  692. if (cs_desc->l) {
  693. u64 efer = 0;
  694. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  695. if (efer & EFER_LMA)
  696. mode = X86EMUL_MODE_PROT64;
  697. } else
  698. mode = X86EMUL_MODE_PROT32; /* temporary value */
  699. }
  700. #endif
  701. if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
  702. mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  703. rc = assign_eip(ctxt, dst, mode);
  704. if (rc == X86EMUL_CONTINUE)
  705. ctxt->mode = mode;
  706. return rc;
  707. }
  708. static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
  709. {
  710. return assign_eip_near(ctxt, ctxt->_eip + rel);
  711. }
  712. static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
  713. struct segmented_address addr,
  714. void *data,
  715. unsigned size)
  716. {
  717. int rc;
  718. ulong linear;
  719. rc = linearize(ctxt, addr, size, false, &linear);
  720. if (rc != X86EMUL_CONTINUE)
  721. return rc;
  722. return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
  723. }
  724. /*
  725. * Prefetch the remaining bytes of the instruction without crossing page
  726. * boundary if they are not in fetch_cache yet.
  727. */
  728. static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
  729. {
  730. int rc;
  731. unsigned size, max_size;
  732. unsigned long linear;
  733. int cur_size = ctxt->fetch.end - ctxt->fetch.data;
  734. struct segmented_address addr = { .seg = VCPU_SREG_CS,
  735. .ea = ctxt->eip + cur_size };
  736. /*
  737. * We do not know exactly how many bytes will be needed, and
  738. * __linearize is expensive, so fetch as much as possible. We
  739. * just have to avoid going beyond the 15 byte limit, the end
  740. * of the segment, or the end of the page.
  741. *
  742. * __linearize is called with size 0 so that it does not do any
  743. * boundary check itself. Instead, we use max_size to check
  744. * against op_size.
  745. */
  746. rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
  747. &linear);
  748. if (unlikely(rc != X86EMUL_CONTINUE))
  749. return rc;
  750. size = min_t(unsigned, 15UL ^ cur_size, max_size);
  751. size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
  752. /*
  753. * One instruction can only straddle two pages,
  754. * and one has been loaded at the beginning of
  755. * x86_decode_insn. So, if not enough bytes
  756. * still, we must have hit the 15-byte boundary.
  757. */
  758. if (unlikely(size < op_size))
  759. return emulate_gp(ctxt, 0);
  760. rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
  761. size, &ctxt->exception);
  762. if (unlikely(rc != X86EMUL_CONTINUE))
  763. return rc;
  764. ctxt->fetch.end += size;
  765. return X86EMUL_CONTINUE;
  766. }
  767. static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
  768. unsigned size)
  769. {
  770. unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
  771. if (unlikely(done_size < size))
  772. return __do_insn_fetch_bytes(ctxt, size - done_size);
  773. else
  774. return X86EMUL_CONTINUE;
  775. }
  776. /* Fetch next part of the instruction being emulated. */
  777. #define insn_fetch(_type, _ctxt) \
  778. ({ _type _x; \
  779. \
  780. rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
  781. if (rc != X86EMUL_CONTINUE) \
  782. goto done; \
  783. ctxt->_eip += sizeof(_type); \
  784. _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
  785. ctxt->fetch.ptr += sizeof(_type); \
  786. _x; \
  787. })
  788. #define insn_fetch_arr(_arr, _size, _ctxt) \
  789. ({ \
  790. rc = do_insn_fetch_bytes(_ctxt, _size); \
  791. if (rc != X86EMUL_CONTINUE) \
  792. goto done; \
  793. ctxt->_eip += (_size); \
  794. memcpy(_arr, ctxt->fetch.ptr, _size); \
  795. ctxt->fetch.ptr += (_size); \
  796. })
  797. /*
  798. * Given the 'reg' portion of a ModRM byte, and a register block, return a
  799. * pointer into the block that addresses the relevant register.
  800. * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
  801. */
  802. static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
  803. int byteop)
  804. {
  805. void *p;
  806. int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
  807. if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
  808. p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
  809. else
  810. p = reg_rmw(ctxt, modrm_reg);
  811. return p;
  812. }
  813. static int read_descriptor(struct x86_emulate_ctxt *ctxt,
  814. struct segmented_address addr,
  815. u16 *size, unsigned long *address, int op_bytes)
  816. {
  817. int rc;
  818. if (op_bytes == 2)
  819. op_bytes = 3;
  820. *address = 0;
  821. rc = segmented_read_std(ctxt, addr, size, 2);
  822. if (rc != X86EMUL_CONTINUE)
  823. return rc;
  824. addr.ea += 2;
  825. rc = segmented_read_std(ctxt, addr, address, op_bytes);
  826. return rc;
  827. }
  828. FASTOP2(add);
  829. FASTOP2(or);
  830. FASTOP2(adc);
  831. FASTOP2(sbb);
  832. FASTOP2(and);
  833. FASTOP2(sub);
  834. FASTOP2(xor);
  835. FASTOP2(cmp);
  836. FASTOP2(test);
  837. FASTOP1SRC2(mul, mul_ex);
  838. FASTOP1SRC2(imul, imul_ex);
  839. FASTOP1SRC2EX(div, div_ex);
  840. FASTOP1SRC2EX(idiv, idiv_ex);
  841. FASTOP3WCL(shld);
  842. FASTOP3WCL(shrd);
  843. FASTOP2W(imul);
  844. FASTOP1(not);
  845. FASTOP1(neg);
  846. FASTOP1(inc);
  847. FASTOP1(dec);
  848. FASTOP2CL(rol);
  849. FASTOP2CL(ror);
  850. FASTOP2CL(rcl);
  851. FASTOP2CL(rcr);
  852. FASTOP2CL(shl);
  853. FASTOP2CL(shr);
  854. FASTOP2CL(sar);
  855. FASTOP2W(bsf);
  856. FASTOP2W(bsr);
  857. FASTOP2W(bt);
  858. FASTOP2W(bts);
  859. FASTOP2W(btr);
  860. FASTOP2W(btc);
  861. FASTOP2(xadd);
  862. FASTOP2R(cmp, cmp_r);
  863. static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
  864. {
  865. /* If src is zero, do not writeback, but update flags */
  866. if (ctxt->src.val == 0)
  867. ctxt->dst.type = OP_NONE;
  868. return fastop(ctxt, em_bsf);
  869. }
  870. static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
  871. {
  872. /* If src is zero, do not writeback, but update flags */
  873. if (ctxt->src.val == 0)
  874. ctxt->dst.type = OP_NONE;
  875. return fastop(ctxt, em_bsr);
  876. }
  877. static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
  878. {
  879. u8 rc;
  880. void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
  881. flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
  882. asm("push %[flags]; popf; call *%[fastop]"
  883. : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
  884. return rc;
  885. }
  886. static void fetch_register_operand(struct operand *op)
  887. {
  888. switch (op->bytes) {
  889. case 1:
  890. op->val = *(u8 *)op->addr.reg;
  891. break;
  892. case 2:
  893. op->val = *(u16 *)op->addr.reg;
  894. break;
  895. case 4:
  896. op->val = *(u32 *)op->addr.reg;
  897. break;
  898. case 8:
  899. op->val = *(u64 *)op->addr.reg;
  900. break;
  901. }
  902. }
  903. static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
  904. {
  905. ctxt->ops->get_fpu(ctxt);
  906. switch (reg) {
  907. case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
  908. case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
  909. case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
  910. case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
  911. case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
  912. case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
  913. case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
  914. case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
  915. #ifdef CONFIG_X86_64
  916. case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
  917. case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
  918. case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
  919. case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
  920. case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
  921. case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
  922. case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
  923. case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
  924. #endif
  925. default: BUG();
  926. }
  927. ctxt->ops->put_fpu(ctxt);
  928. }
  929. static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
  930. int reg)
  931. {
  932. ctxt->ops->get_fpu(ctxt);
  933. switch (reg) {
  934. case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
  935. case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
  936. case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
  937. case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
  938. case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
  939. case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
  940. case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
  941. case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
  942. #ifdef CONFIG_X86_64
  943. case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
  944. case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
  945. case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
  946. case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
  947. case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
  948. case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
  949. case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
  950. case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
  951. #endif
  952. default: BUG();
  953. }
  954. ctxt->ops->put_fpu(ctxt);
  955. }
  956. static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
  957. {
  958. ctxt->ops->get_fpu(ctxt);
  959. switch (reg) {
  960. case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
  961. case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
  962. case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
  963. case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
  964. case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
  965. case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
  966. case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
  967. case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
  968. default: BUG();
  969. }
  970. ctxt->ops->put_fpu(ctxt);
  971. }
  972. static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
  973. {
  974. ctxt->ops->get_fpu(ctxt);
  975. switch (reg) {
  976. case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
  977. case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
  978. case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
  979. case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
  980. case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
  981. case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
  982. case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
  983. case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
  984. default: BUG();
  985. }
  986. ctxt->ops->put_fpu(ctxt);
  987. }
  988. static int em_fninit(struct x86_emulate_ctxt *ctxt)
  989. {
  990. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  991. return emulate_nm(ctxt);
  992. ctxt->ops->get_fpu(ctxt);
  993. asm volatile("fninit");
  994. ctxt->ops->put_fpu(ctxt);
  995. return X86EMUL_CONTINUE;
  996. }
  997. static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
  998. {
  999. u16 fcw;
  1000. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  1001. return emulate_nm(ctxt);
  1002. ctxt->ops->get_fpu(ctxt);
  1003. asm volatile("fnstcw %0": "+m"(fcw));
  1004. ctxt->ops->put_fpu(ctxt);
  1005. ctxt->dst.val = fcw;
  1006. return X86EMUL_CONTINUE;
  1007. }
  1008. static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
  1009. {
  1010. u16 fsw;
  1011. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  1012. return emulate_nm(ctxt);
  1013. ctxt->ops->get_fpu(ctxt);
  1014. asm volatile("fnstsw %0": "+m"(fsw));
  1015. ctxt->ops->put_fpu(ctxt);
  1016. ctxt->dst.val = fsw;
  1017. return X86EMUL_CONTINUE;
  1018. }
  1019. static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
  1020. struct operand *op)
  1021. {
  1022. unsigned reg = ctxt->modrm_reg;
  1023. if (!(ctxt->d & ModRM))
  1024. reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
  1025. if (ctxt->d & Sse) {
  1026. op->type = OP_XMM;
  1027. op->bytes = 16;
  1028. op->addr.xmm = reg;
  1029. read_sse_reg(ctxt, &op->vec_val, reg);
  1030. return;
  1031. }
  1032. if (ctxt->d & Mmx) {
  1033. reg &= 7;
  1034. op->type = OP_MM;
  1035. op->bytes = 8;
  1036. op->addr.mm = reg;
  1037. return;
  1038. }
  1039. op->type = OP_REG;
  1040. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  1041. op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
  1042. fetch_register_operand(op);
  1043. op->orig_val = op->val;
  1044. }
  1045. static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
  1046. {
  1047. if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
  1048. ctxt->modrm_seg = VCPU_SREG_SS;
  1049. }
  1050. static int decode_modrm(struct x86_emulate_ctxt *ctxt,
  1051. struct operand *op)
  1052. {
  1053. u8 sib;
  1054. int index_reg, base_reg, scale;
  1055. int rc = X86EMUL_CONTINUE;
  1056. ulong modrm_ea = 0;
  1057. ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
  1058. index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
  1059. base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
  1060. ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
  1061. ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
  1062. ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
  1063. ctxt->modrm_seg = VCPU_SREG_DS;
  1064. if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
  1065. op->type = OP_REG;
  1066. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  1067. op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
  1068. ctxt->d & ByteOp);
  1069. if (ctxt->d & Sse) {
  1070. op->type = OP_XMM;
  1071. op->bytes = 16;
  1072. op->addr.xmm = ctxt->modrm_rm;
  1073. read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
  1074. return rc;
  1075. }
  1076. if (ctxt->d & Mmx) {
  1077. op->type = OP_MM;
  1078. op->bytes = 8;
  1079. op->addr.mm = ctxt->modrm_rm & 7;
  1080. return rc;
  1081. }
  1082. fetch_register_operand(op);
  1083. return rc;
  1084. }
  1085. op->type = OP_MEM;
  1086. if (ctxt->ad_bytes == 2) {
  1087. unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
  1088. unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
  1089. unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
  1090. unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
  1091. /* 16-bit ModR/M decode. */
  1092. switch (ctxt->modrm_mod) {
  1093. case 0:
  1094. if (ctxt->modrm_rm == 6)
  1095. modrm_ea += insn_fetch(u16, ctxt);
  1096. break;
  1097. case 1:
  1098. modrm_ea += insn_fetch(s8, ctxt);
  1099. break;
  1100. case 2:
  1101. modrm_ea += insn_fetch(u16, ctxt);
  1102. break;
  1103. }
  1104. switch (ctxt->modrm_rm) {
  1105. case 0:
  1106. modrm_ea += bx + si;
  1107. break;
  1108. case 1:
  1109. modrm_ea += bx + di;
  1110. break;
  1111. case 2:
  1112. modrm_ea += bp + si;
  1113. break;
  1114. case 3:
  1115. modrm_ea += bp + di;
  1116. break;
  1117. case 4:
  1118. modrm_ea += si;
  1119. break;
  1120. case 5:
  1121. modrm_ea += di;
  1122. break;
  1123. case 6:
  1124. if (ctxt->modrm_mod != 0)
  1125. modrm_ea += bp;
  1126. break;
  1127. case 7:
  1128. modrm_ea += bx;
  1129. break;
  1130. }
  1131. if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
  1132. (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
  1133. ctxt->modrm_seg = VCPU_SREG_SS;
  1134. modrm_ea = (u16)modrm_ea;
  1135. } else {
  1136. /* 32/64-bit ModR/M decode. */
  1137. if ((ctxt->modrm_rm & 7) == 4) {
  1138. sib = insn_fetch(u8, ctxt);
  1139. index_reg |= (sib >> 3) & 7;
  1140. base_reg |= sib & 7;
  1141. scale = sib >> 6;
  1142. if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
  1143. modrm_ea += insn_fetch(s32, ctxt);
  1144. else {
  1145. modrm_ea += reg_read(ctxt, base_reg);
  1146. adjust_modrm_seg(ctxt, base_reg);
  1147. /* Increment ESP on POP [ESP] */
  1148. if ((ctxt->d & IncSP) &&
  1149. base_reg == VCPU_REGS_RSP)
  1150. modrm_ea += ctxt->op_bytes;
  1151. }
  1152. if (index_reg != 4)
  1153. modrm_ea += reg_read(ctxt, index_reg) << scale;
  1154. } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
  1155. modrm_ea += insn_fetch(s32, ctxt);
  1156. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1157. ctxt->rip_relative = 1;
  1158. } else {
  1159. base_reg = ctxt->modrm_rm;
  1160. modrm_ea += reg_read(ctxt, base_reg);
  1161. adjust_modrm_seg(ctxt, base_reg);
  1162. }
  1163. switch (ctxt->modrm_mod) {
  1164. case 1:
  1165. modrm_ea += insn_fetch(s8, ctxt);
  1166. break;
  1167. case 2:
  1168. modrm_ea += insn_fetch(s32, ctxt);
  1169. break;
  1170. }
  1171. }
  1172. op->addr.mem.ea = modrm_ea;
  1173. if (ctxt->ad_bytes != 8)
  1174. ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
  1175. done:
  1176. return rc;
  1177. }
  1178. static int decode_abs(struct x86_emulate_ctxt *ctxt,
  1179. struct operand *op)
  1180. {
  1181. int rc = X86EMUL_CONTINUE;
  1182. op->type = OP_MEM;
  1183. switch (ctxt->ad_bytes) {
  1184. case 2:
  1185. op->addr.mem.ea = insn_fetch(u16, ctxt);
  1186. break;
  1187. case 4:
  1188. op->addr.mem.ea = insn_fetch(u32, ctxt);
  1189. break;
  1190. case 8:
  1191. op->addr.mem.ea = insn_fetch(u64, ctxt);
  1192. break;
  1193. }
  1194. done:
  1195. return rc;
  1196. }
  1197. static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
  1198. {
  1199. long sv = 0, mask;
  1200. if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
  1201. mask = ~((long)ctxt->dst.bytes * 8 - 1);
  1202. if (ctxt->src.bytes == 2)
  1203. sv = (s16)ctxt->src.val & (s16)mask;
  1204. else if (ctxt->src.bytes == 4)
  1205. sv = (s32)ctxt->src.val & (s32)mask;
  1206. else
  1207. sv = (s64)ctxt->src.val & (s64)mask;
  1208. ctxt->dst.addr.mem.ea = address_mask(ctxt,
  1209. ctxt->dst.addr.mem.ea + (sv >> 3));
  1210. }
  1211. /* only subword offset */
  1212. ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
  1213. }
  1214. static int read_emulated(struct x86_emulate_ctxt *ctxt,
  1215. unsigned long addr, void *dest, unsigned size)
  1216. {
  1217. int rc;
  1218. struct read_cache *mc = &ctxt->mem_read;
  1219. if (mc->pos < mc->end)
  1220. goto read_cached;
  1221. WARN_ON((mc->end + size) >= sizeof(mc->data));
  1222. rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
  1223. &ctxt->exception);
  1224. if (rc != X86EMUL_CONTINUE)
  1225. return rc;
  1226. mc->end += size;
  1227. read_cached:
  1228. memcpy(dest, mc->data + mc->pos, size);
  1229. mc->pos += size;
  1230. return X86EMUL_CONTINUE;
  1231. }
  1232. static int segmented_read(struct x86_emulate_ctxt *ctxt,
  1233. struct segmented_address addr,
  1234. void *data,
  1235. unsigned size)
  1236. {
  1237. int rc;
  1238. ulong linear;
  1239. rc = linearize(ctxt, addr, size, false, &linear);
  1240. if (rc != X86EMUL_CONTINUE)
  1241. return rc;
  1242. return read_emulated(ctxt, linear, data, size);
  1243. }
  1244. static int segmented_write(struct x86_emulate_ctxt *ctxt,
  1245. struct segmented_address addr,
  1246. const void *data,
  1247. unsigned size)
  1248. {
  1249. int rc;
  1250. ulong linear;
  1251. rc = linearize(ctxt, addr, size, true, &linear);
  1252. if (rc != X86EMUL_CONTINUE)
  1253. return rc;
  1254. return ctxt->ops->write_emulated(ctxt, linear, data, size,
  1255. &ctxt->exception);
  1256. }
  1257. static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
  1258. struct segmented_address addr,
  1259. const void *orig_data, const void *data,
  1260. unsigned size)
  1261. {
  1262. int rc;
  1263. ulong linear;
  1264. rc = linearize(ctxt, addr, size, true, &linear);
  1265. if (rc != X86EMUL_CONTINUE)
  1266. return rc;
  1267. return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
  1268. size, &ctxt->exception);
  1269. }
  1270. static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
  1271. unsigned int size, unsigned short port,
  1272. void *dest)
  1273. {
  1274. struct read_cache *rc = &ctxt->io_read;
  1275. if (rc->pos == rc->end) { /* refill pio read ahead */
  1276. unsigned int in_page, n;
  1277. unsigned int count = ctxt->rep_prefix ?
  1278. address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
  1279. in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
  1280. offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
  1281. PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
  1282. n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
  1283. if (n == 0)
  1284. n = 1;
  1285. rc->pos = rc->end = 0;
  1286. if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
  1287. return 0;
  1288. rc->end = n * size;
  1289. }
  1290. if (ctxt->rep_prefix && (ctxt->d & String) &&
  1291. !(ctxt->eflags & X86_EFLAGS_DF)) {
  1292. ctxt->dst.data = rc->data + rc->pos;
  1293. ctxt->dst.type = OP_MEM_STR;
  1294. ctxt->dst.count = (rc->end - rc->pos) / size;
  1295. rc->pos = rc->end;
  1296. } else {
  1297. memcpy(dest, rc->data + rc->pos, size);
  1298. rc->pos += size;
  1299. }
  1300. return 1;
  1301. }
  1302. static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
  1303. u16 index, struct desc_struct *desc)
  1304. {
  1305. struct desc_ptr dt;
  1306. ulong addr;
  1307. ctxt->ops->get_idt(ctxt, &dt);
  1308. if (dt.size < index * 8 + 7)
  1309. return emulate_gp(ctxt, index << 3 | 0x2);
  1310. addr = dt.address + index * 8;
  1311. return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
  1312. &ctxt->exception);
  1313. }
  1314. static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
  1315. u16 selector, struct desc_ptr *dt)
  1316. {
  1317. const struct x86_emulate_ops *ops = ctxt->ops;
  1318. u32 base3 = 0;
  1319. if (selector & 1 << 2) {
  1320. struct desc_struct desc;
  1321. u16 sel;
  1322. memset (dt, 0, sizeof *dt);
  1323. if (!ops->get_segment(ctxt, &sel, &desc, &base3,
  1324. VCPU_SREG_LDTR))
  1325. return;
  1326. dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
  1327. dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
  1328. } else
  1329. ops->get_gdt(ctxt, dt);
  1330. }
  1331. static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
  1332. u16 selector, ulong *desc_addr_p)
  1333. {
  1334. struct desc_ptr dt;
  1335. u16 index = selector >> 3;
  1336. ulong addr;
  1337. get_descriptor_table_ptr(ctxt, selector, &dt);
  1338. if (dt.size < index * 8 + 7)
  1339. return emulate_gp(ctxt, selector & 0xfffc);
  1340. addr = dt.address + index * 8;
  1341. #ifdef CONFIG_X86_64
  1342. if (addr >> 32 != 0) {
  1343. u64 efer = 0;
  1344. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  1345. if (!(efer & EFER_LMA))
  1346. addr &= (u32)-1;
  1347. }
  1348. #endif
  1349. *desc_addr_p = addr;
  1350. return X86EMUL_CONTINUE;
  1351. }
  1352. /* allowed just for 8 bytes segments */
  1353. static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1354. u16 selector, struct desc_struct *desc,
  1355. ulong *desc_addr_p)
  1356. {
  1357. int rc;
  1358. rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
  1359. if (rc != X86EMUL_CONTINUE)
  1360. return rc;
  1361. return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
  1362. &ctxt->exception);
  1363. }
  1364. /* allowed just for 8 bytes segments */
  1365. static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1366. u16 selector, struct desc_struct *desc)
  1367. {
  1368. int rc;
  1369. ulong addr;
  1370. rc = get_descriptor_ptr(ctxt, selector, &addr);
  1371. if (rc != X86EMUL_CONTINUE)
  1372. return rc;
  1373. return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
  1374. &ctxt->exception);
  1375. }
  1376. /* Does not support long mode */
  1377. static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1378. u16 selector, int seg, u8 cpl,
  1379. enum x86_transfer_type transfer,
  1380. struct desc_struct *desc)
  1381. {
  1382. struct desc_struct seg_desc, old_desc;
  1383. u8 dpl, rpl;
  1384. unsigned err_vec = GP_VECTOR;
  1385. u32 err_code = 0;
  1386. bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
  1387. ulong desc_addr;
  1388. int ret;
  1389. u16 dummy;
  1390. u32 base3 = 0;
  1391. memset(&seg_desc, 0, sizeof seg_desc);
  1392. if (ctxt->mode == X86EMUL_MODE_REAL) {
  1393. /* set real mode segment descriptor (keep limit etc. for
  1394. * unreal mode) */
  1395. ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
  1396. set_desc_base(&seg_desc, selector << 4);
  1397. goto load;
  1398. } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
  1399. /* VM86 needs a clean new segment descriptor */
  1400. set_desc_base(&seg_desc, selector << 4);
  1401. set_desc_limit(&seg_desc, 0xffff);
  1402. seg_desc.type = 3;
  1403. seg_desc.p = 1;
  1404. seg_desc.s = 1;
  1405. seg_desc.dpl = 3;
  1406. goto load;
  1407. }
  1408. rpl = selector & 3;
  1409. /* NULL selector is not valid for TR, CS and SS (except for long mode) */
  1410. if ((seg == VCPU_SREG_CS
  1411. || (seg == VCPU_SREG_SS
  1412. && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
  1413. || seg == VCPU_SREG_TR)
  1414. && null_selector)
  1415. goto exception;
  1416. /* TR should be in GDT only */
  1417. if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
  1418. goto exception;
  1419. if (null_selector) /* for NULL selector skip all following checks */
  1420. goto load;
  1421. ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
  1422. if (ret != X86EMUL_CONTINUE)
  1423. return ret;
  1424. err_code = selector & 0xfffc;
  1425. err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
  1426. GP_VECTOR;
  1427. /* can't load system descriptor into segment selector */
  1428. if (seg <= VCPU_SREG_GS && !seg_desc.s) {
  1429. if (transfer == X86_TRANSFER_CALL_JMP)
  1430. return X86EMUL_UNHANDLEABLE;
  1431. goto exception;
  1432. }
  1433. if (!seg_desc.p) {
  1434. err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
  1435. goto exception;
  1436. }
  1437. dpl = seg_desc.dpl;
  1438. switch (seg) {
  1439. case VCPU_SREG_SS:
  1440. /*
  1441. * segment is not a writable data segment or segment
  1442. * selector's RPL != CPL or segment selector's RPL != CPL
  1443. */
  1444. if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
  1445. goto exception;
  1446. break;
  1447. case VCPU_SREG_CS:
  1448. if (!(seg_desc.type & 8))
  1449. goto exception;
  1450. if (seg_desc.type & 4) {
  1451. /* conforming */
  1452. if (dpl > cpl)
  1453. goto exception;
  1454. } else {
  1455. /* nonconforming */
  1456. if (rpl > cpl || dpl != cpl)
  1457. goto exception;
  1458. }
  1459. /* in long-mode d/b must be clear if l is set */
  1460. if (seg_desc.d && seg_desc.l) {
  1461. u64 efer = 0;
  1462. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  1463. if (efer & EFER_LMA)
  1464. goto exception;
  1465. }
  1466. /* CS(RPL) <- CPL */
  1467. selector = (selector & 0xfffc) | cpl;
  1468. break;
  1469. case VCPU_SREG_TR:
  1470. if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
  1471. goto exception;
  1472. old_desc = seg_desc;
  1473. seg_desc.type |= 2; /* busy */
  1474. ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
  1475. sizeof(seg_desc), &ctxt->exception);
  1476. if (ret != X86EMUL_CONTINUE)
  1477. return ret;
  1478. break;
  1479. case VCPU_SREG_LDTR:
  1480. if (seg_desc.s || seg_desc.type != 2)
  1481. goto exception;
  1482. break;
  1483. default: /* DS, ES, FS, or GS */
  1484. /*
  1485. * segment is not a data or readable code segment or
  1486. * ((segment is a data or nonconforming code segment)
  1487. * and (both RPL and CPL > DPL))
  1488. */
  1489. if ((seg_desc.type & 0xa) == 0x8 ||
  1490. (((seg_desc.type & 0xc) != 0xc) &&
  1491. (rpl > dpl && cpl > dpl)))
  1492. goto exception;
  1493. break;
  1494. }
  1495. if (seg_desc.s) {
  1496. /* mark segment as accessed */
  1497. if (!(seg_desc.type & 1)) {
  1498. seg_desc.type |= 1;
  1499. ret = write_segment_descriptor(ctxt, selector,
  1500. &seg_desc);
  1501. if (ret != X86EMUL_CONTINUE)
  1502. return ret;
  1503. }
  1504. } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
  1505. ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
  1506. sizeof(base3), &ctxt->exception);
  1507. if (ret != X86EMUL_CONTINUE)
  1508. return ret;
  1509. if (is_noncanonical_address(get_desc_base(&seg_desc) |
  1510. ((u64)base3 << 32)))
  1511. return emulate_gp(ctxt, 0);
  1512. }
  1513. load:
  1514. ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
  1515. if (desc)
  1516. *desc = seg_desc;
  1517. return X86EMUL_CONTINUE;
  1518. exception:
  1519. return emulate_exception(ctxt, err_vec, err_code, true);
  1520. }
  1521. static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1522. u16 selector, int seg)
  1523. {
  1524. u8 cpl = ctxt->ops->cpl(ctxt);
  1525. return __load_segment_descriptor(ctxt, selector, seg, cpl,
  1526. X86_TRANSFER_NONE, NULL);
  1527. }
  1528. static void write_register_operand(struct operand *op)
  1529. {
  1530. return assign_register(op->addr.reg, op->val, op->bytes);
  1531. }
  1532. static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
  1533. {
  1534. switch (op->type) {
  1535. case OP_REG:
  1536. write_register_operand(op);
  1537. break;
  1538. case OP_MEM:
  1539. if (ctxt->lock_prefix)
  1540. return segmented_cmpxchg(ctxt,
  1541. op->addr.mem,
  1542. &op->orig_val,
  1543. &op->val,
  1544. op->bytes);
  1545. else
  1546. return segmented_write(ctxt,
  1547. op->addr.mem,
  1548. &op->val,
  1549. op->bytes);
  1550. break;
  1551. case OP_MEM_STR:
  1552. return segmented_write(ctxt,
  1553. op->addr.mem,
  1554. op->data,
  1555. op->bytes * op->count);
  1556. break;
  1557. case OP_XMM:
  1558. write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
  1559. break;
  1560. case OP_MM:
  1561. write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
  1562. break;
  1563. case OP_NONE:
  1564. /* no writeback */
  1565. break;
  1566. default:
  1567. break;
  1568. }
  1569. return X86EMUL_CONTINUE;
  1570. }
  1571. static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
  1572. {
  1573. struct segmented_address addr;
  1574. rsp_increment(ctxt, -bytes);
  1575. addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
  1576. addr.seg = VCPU_SREG_SS;
  1577. return segmented_write(ctxt, addr, data, bytes);
  1578. }
  1579. static int em_push(struct x86_emulate_ctxt *ctxt)
  1580. {
  1581. /* Disable writeback. */
  1582. ctxt->dst.type = OP_NONE;
  1583. return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
  1584. }
  1585. static int emulate_pop(struct x86_emulate_ctxt *ctxt,
  1586. void *dest, int len)
  1587. {
  1588. int rc;
  1589. struct segmented_address addr;
  1590. addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
  1591. addr.seg = VCPU_SREG_SS;
  1592. rc = segmented_read(ctxt, addr, dest, len);
  1593. if (rc != X86EMUL_CONTINUE)
  1594. return rc;
  1595. rsp_increment(ctxt, len);
  1596. return rc;
  1597. }
  1598. static int em_pop(struct x86_emulate_ctxt *ctxt)
  1599. {
  1600. return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
  1601. }
  1602. static int emulate_popf(struct x86_emulate_ctxt *ctxt,
  1603. void *dest, int len)
  1604. {
  1605. int rc;
  1606. unsigned long val, change_mask;
  1607. int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
  1608. int cpl = ctxt->ops->cpl(ctxt);
  1609. rc = emulate_pop(ctxt, &val, len);
  1610. if (rc != X86EMUL_CONTINUE)
  1611. return rc;
  1612. change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
  1613. X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
  1614. X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
  1615. X86_EFLAGS_AC | X86_EFLAGS_ID;
  1616. switch(ctxt->mode) {
  1617. case X86EMUL_MODE_PROT64:
  1618. case X86EMUL_MODE_PROT32:
  1619. case X86EMUL_MODE_PROT16:
  1620. if (cpl == 0)
  1621. change_mask |= X86_EFLAGS_IOPL;
  1622. if (cpl <= iopl)
  1623. change_mask |= X86_EFLAGS_IF;
  1624. break;
  1625. case X86EMUL_MODE_VM86:
  1626. if (iopl < 3)
  1627. return emulate_gp(ctxt, 0);
  1628. change_mask |= X86_EFLAGS_IF;
  1629. break;
  1630. default: /* real mode */
  1631. change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
  1632. break;
  1633. }
  1634. *(unsigned long *)dest =
  1635. (ctxt->eflags & ~change_mask) | (val & change_mask);
  1636. return rc;
  1637. }
  1638. static int em_popf(struct x86_emulate_ctxt *ctxt)
  1639. {
  1640. ctxt->dst.type = OP_REG;
  1641. ctxt->dst.addr.reg = &ctxt->eflags;
  1642. ctxt->dst.bytes = ctxt->op_bytes;
  1643. return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
  1644. }
  1645. static int em_enter(struct x86_emulate_ctxt *ctxt)
  1646. {
  1647. int rc;
  1648. unsigned frame_size = ctxt->src.val;
  1649. unsigned nesting_level = ctxt->src2.val & 31;
  1650. ulong rbp;
  1651. if (nesting_level)
  1652. return X86EMUL_UNHANDLEABLE;
  1653. rbp = reg_read(ctxt, VCPU_REGS_RBP);
  1654. rc = push(ctxt, &rbp, stack_size(ctxt));
  1655. if (rc != X86EMUL_CONTINUE)
  1656. return rc;
  1657. assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
  1658. stack_mask(ctxt));
  1659. assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
  1660. reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
  1661. stack_mask(ctxt));
  1662. return X86EMUL_CONTINUE;
  1663. }
  1664. static int em_leave(struct x86_emulate_ctxt *ctxt)
  1665. {
  1666. assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
  1667. stack_mask(ctxt));
  1668. return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
  1669. }
  1670. static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
  1671. {
  1672. int seg = ctxt->src2.val;
  1673. ctxt->src.val = get_segment_selector(ctxt, seg);
  1674. if (ctxt->op_bytes == 4) {
  1675. rsp_increment(ctxt, -2);
  1676. ctxt->op_bytes = 2;
  1677. }
  1678. return em_push(ctxt);
  1679. }
  1680. static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
  1681. {
  1682. int seg = ctxt->src2.val;
  1683. unsigned long selector;
  1684. int rc;
  1685. rc = emulate_pop(ctxt, &selector, 2);
  1686. if (rc != X86EMUL_CONTINUE)
  1687. return rc;
  1688. if (ctxt->modrm_reg == VCPU_SREG_SS)
  1689. ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
  1690. if (ctxt->op_bytes > 2)
  1691. rsp_increment(ctxt, ctxt->op_bytes - 2);
  1692. rc = load_segment_descriptor(ctxt, (u16)selector, seg);
  1693. return rc;
  1694. }
  1695. static int em_pusha(struct x86_emulate_ctxt *ctxt)
  1696. {
  1697. unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
  1698. int rc = X86EMUL_CONTINUE;
  1699. int reg = VCPU_REGS_RAX;
  1700. while (reg <= VCPU_REGS_RDI) {
  1701. (reg == VCPU_REGS_RSP) ?
  1702. (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
  1703. rc = em_push(ctxt);
  1704. if (rc != X86EMUL_CONTINUE)
  1705. return rc;
  1706. ++reg;
  1707. }
  1708. return rc;
  1709. }
  1710. static int em_pushf(struct x86_emulate_ctxt *ctxt)
  1711. {
  1712. ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
  1713. return em_push(ctxt);
  1714. }
  1715. static int em_popa(struct x86_emulate_ctxt *ctxt)
  1716. {
  1717. int rc = X86EMUL_CONTINUE;
  1718. int reg = VCPU_REGS_RDI;
  1719. u32 val;
  1720. while (reg >= VCPU_REGS_RAX) {
  1721. if (reg == VCPU_REGS_RSP) {
  1722. rsp_increment(ctxt, ctxt->op_bytes);
  1723. --reg;
  1724. }
  1725. rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
  1726. if (rc != X86EMUL_CONTINUE)
  1727. break;
  1728. assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
  1729. --reg;
  1730. }
  1731. return rc;
  1732. }
  1733. static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
  1734. {
  1735. const struct x86_emulate_ops *ops = ctxt->ops;
  1736. int rc;
  1737. struct desc_ptr dt;
  1738. gva_t cs_addr;
  1739. gva_t eip_addr;
  1740. u16 cs, eip;
  1741. /* TODO: Add limit checks */
  1742. ctxt->src.val = ctxt->eflags;
  1743. rc = em_push(ctxt);
  1744. if (rc != X86EMUL_CONTINUE)
  1745. return rc;
  1746. ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
  1747. ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
  1748. rc = em_push(ctxt);
  1749. if (rc != X86EMUL_CONTINUE)
  1750. return rc;
  1751. ctxt->src.val = ctxt->_eip;
  1752. rc = em_push(ctxt);
  1753. if (rc != X86EMUL_CONTINUE)
  1754. return rc;
  1755. ops->get_idt(ctxt, &dt);
  1756. eip_addr = dt.address + (irq << 2);
  1757. cs_addr = dt.address + (irq << 2) + 2;
  1758. rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
  1759. if (rc != X86EMUL_CONTINUE)
  1760. return rc;
  1761. rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
  1762. if (rc != X86EMUL_CONTINUE)
  1763. return rc;
  1764. rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
  1765. if (rc != X86EMUL_CONTINUE)
  1766. return rc;
  1767. ctxt->_eip = eip;
  1768. return rc;
  1769. }
  1770. int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
  1771. {
  1772. int rc;
  1773. invalidate_registers(ctxt);
  1774. rc = __emulate_int_real(ctxt, irq);
  1775. if (rc == X86EMUL_CONTINUE)
  1776. writeback_registers(ctxt);
  1777. return rc;
  1778. }
  1779. static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
  1780. {
  1781. switch(ctxt->mode) {
  1782. case X86EMUL_MODE_REAL:
  1783. return __emulate_int_real(ctxt, irq);
  1784. case X86EMUL_MODE_VM86:
  1785. case X86EMUL_MODE_PROT16:
  1786. case X86EMUL_MODE_PROT32:
  1787. case X86EMUL_MODE_PROT64:
  1788. default:
  1789. /* Protected mode interrupts unimplemented yet */
  1790. return X86EMUL_UNHANDLEABLE;
  1791. }
  1792. }
  1793. static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
  1794. {
  1795. int rc = X86EMUL_CONTINUE;
  1796. unsigned long temp_eip = 0;
  1797. unsigned long temp_eflags = 0;
  1798. unsigned long cs = 0;
  1799. unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
  1800. X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
  1801. X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
  1802. X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
  1803. X86_EFLAGS_AC | X86_EFLAGS_ID |
  1804. X86_EFLAGS_FIXED;
  1805. unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
  1806. X86_EFLAGS_VIP;
  1807. /* TODO: Add stack limit check */
  1808. rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
  1809. if (rc != X86EMUL_CONTINUE)
  1810. return rc;
  1811. if (temp_eip & ~0xffff)
  1812. return emulate_gp(ctxt, 0);
  1813. rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
  1814. if (rc != X86EMUL_CONTINUE)
  1815. return rc;
  1816. rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
  1817. if (rc != X86EMUL_CONTINUE)
  1818. return rc;
  1819. rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
  1820. if (rc != X86EMUL_CONTINUE)
  1821. return rc;
  1822. ctxt->_eip = temp_eip;
  1823. if (ctxt->op_bytes == 4)
  1824. ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
  1825. else if (ctxt->op_bytes == 2) {
  1826. ctxt->eflags &= ~0xffff;
  1827. ctxt->eflags |= temp_eflags;
  1828. }
  1829. ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
  1830. ctxt->eflags |= X86_EFLAGS_FIXED;
  1831. ctxt->ops->set_nmi_mask(ctxt, false);
  1832. return rc;
  1833. }
  1834. static int em_iret(struct x86_emulate_ctxt *ctxt)
  1835. {
  1836. switch(ctxt->mode) {
  1837. case X86EMUL_MODE_REAL:
  1838. return emulate_iret_real(ctxt);
  1839. case X86EMUL_MODE_VM86:
  1840. case X86EMUL_MODE_PROT16:
  1841. case X86EMUL_MODE_PROT32:
  1842. case X86EMUL_MODE_PROT64:
  1843. default:
  1844. /* iret from protected mode unimplemented yet */
  1845. return X86EMUL_UNHANDLEABLE;
  1846. }
  1847. }
  1848. static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
  1849. {
  1850. int rc;
  1851. unsigned short sel, old_sel;
  1852. struct desc_struct old_desc, new_desc;
  1853. const struct x86_emulate_ops *ops = ctxt->ops;
  1854. u8 cpl = ctxt->ops->cpl(ctxt);
  1855. /* Assignment of RIP may only fail in 64-bit mode */
  1856. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1857. ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
  1858. VCPU_SREG_CS);
  1859. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  1860. rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
  1861. X86_TRANSFER_CALL_JMP,
  1862. &new_desc);
  1863. if (rc != X86EMUL_CONTINUE)
  1864. return rc;
  1865. rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
  1866. if (rc != X86EMUL_CONTINUE) {
  1867. WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
  1868. /* assigning eip failed; restore the old cs */
  1869. ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
  1870. return rc;
  1871. }
  1872. return rc;
  1873. }
  1874. static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
  1875. {
  1876. return assign_eip_near(ctxt, ctxt->src.val);
  1877. }
  1878. static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
  1879. {
  1880. int rc;
  1881. long int old_eip;
  1882. old_eip = ctxt->_eip;
  1883. rc = assign_eip_near(ctxt, ctxt->src.val);
  1884. if (rc != X86EMUL_CONTINUE)
  1885. return rc;
  1886. ctxt->src.val = old_eip;
  1887. rc = em_push(ctxt);
  1888. return rc;
  1889. }
  1890. static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
  1891. {
  1892. u64 old = ctxt->dst.orig_val64;
  1893. if (ctxt->dst.bytes == 16)
  1894. return X86EMUL_UNHANDLEABLE;
  1895. if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
  1896. ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
  1897. *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
  1898. *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
  1899. ctxt->eflags &= ~X86_EFLAGS_ZF;
  1900. } else {
  1901. ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
  1902. (u32) reg_read(ctxt, VCPU_REGS_RBX);
  1903. ctxt->eflags |= X86_EFLAGS_ZF;
  1904. }
  1905. return X86EMUL_CONTINUE;
  1906. }
  1907. static int em_ret(struct x86_emulate_ctxt *ctxt)
  1908. {
  1909. int rc;
  1910. unsigned long eip;
  1911. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  1912. if (rc != X86EMUL_CONTINUE)
  1913. return rc;
  1914. return assign_eip_near(ctxt, eip);
  1915. }
  1916. static int em_ret_far(struct x86_emulate_ctxt *ctxt)
  1917. {
  1918. int rc;
  1919. unsigned long eip, cs;
  1920. u16 old_cs;
  1921. int cpl = ctxt->ops->cpl(ctxt);
  1922. struct desc_struct old_desc, new_desc;
  1923. const struct x86_emulate_ops *ops = ctxt->ops;
  1924. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1925. ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
  1926. VCPU_SREG_CS);
  1927. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  1928. if (rc != X86EMUL_CONTINUE)
  1929. return rc;
  1930. rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
  1931. if (rc != X86EMUL_CONTINUE)
  1932. return rc;
  1933. /* Outer-privilege level return is not implemented */
  1934. if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
  1935. return X86EMUL_UNHANDLEABLE;
  1936. rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
  1937. X86_TRANSFER_RET,
  1938. &new_desc);
  1939. if (rc != X86EMUL_CONTINUE)
  1940. return rc;
  1941. rc = assign_eip_far(ctxt, eip, &new_desc);
  1942. if (rc != X86EMUL_CONTINUE) {
  1943. WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
  1944. ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
  1945. }
  1946. return rc;
  1947. }
  1948. static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
  1949. {
  1950. int rc;
  1951. rc = em_ret_far(ctxt);
  1952. if (rc != X86EMUL_CONTINUE)
  1953. return rc;
  1954. rsp_increment(ctxt, ctxt->src.val);
  1955. return X86EMUL_CONTINUE;
  1956. }
  1957. static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
  1958. {
  1959. /* Save real source value, then compare EAX against destination. */
  1960. ctxt->dst.orig_val = ctxt->dst.val;
  1961. ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
  1962. ctxt->src.orig_val = ctxt->src.val;
  1963. ctxt->src.val = ctxt->dst.orig_val;
  1964. fastop(ctxt, em_cmp);
  1965. if (ctxt->eflags & X86_EFLAGS_ZF) {
  1966. /* Success: write back to memory; no update of EAX */
  1967. ctxt->src.type = OP_NONE;
  1968. ctxt->dst.val = ctxt->src.orig_val;
  1969. } else {
  1970. /* Failure: write the value we saw to EAX. */
  1971. ctxt->src.type = OP_REG;
  1972. ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
  1973. ctxt->src.val = ctxt->dst.orig_val;
  1974. /* Create write-cycle to dest by writing the same value */
  1975. ctxt->dst.val = ctxt->dst.orig_val;
  1976. }
  1977. return X86EMUL_CONTINUE;
  1978. }
  1979. static int em_lseg(struct x86_emulate_ctxt *ctxt)
  1980. {
  1981. int seg = ctxt->src2.val;
  1982. unsigned short sel;
  1983. int rc;
  1984. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  1985. rc = load_segment_descriptor(ctxt, sel, seg);
  1986. if (rc != X86EMUL_CONTINUE)
  1987. return rc;
  1988. ctxt->dst.val = ctxt->src.val;
  1989. return rc;
  1990. }
  1991. static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
  1992. {
  1993. u32 eax, ebx, ecx, edx;
  1994. eax = 0x80000001;
  1995. ecx = 0;
  1996. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
  1997. return edx & bit(X86_FEATURE_LM);
  1998. }
  1999. #define GET_SMSTATE(type, smbase, offset) \
  2000. ({ \
  2001. type __val; \
  2002. int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
  2003. sizeof(__val)); \
  2004. if (r != X86EMUL_CONTINUE) \
  2005. return X86EMUL_UNHANDLEABLE; \
  2006. __val; \
  2007. })
  2008. static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
  2009. {
  2010. desc->g = (flags >> 23) & 1;
  2011. desc->d = (flags >> 22) & 1;
  2012. desc->l = (flags >> 21) & 1;
  2013. desc->avl = (flags >> 20) & 1;
  2014. desc->p = (flags >> 15) & 1;
  2015. desc->dpl = (flags >> 13) & 3;
  2016. desc->s = (flags >> 12) & 1;
  2017. desc->type = (flags >> 8) & 15;
  2018. }
  2019. static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
  2020. {
  2021. struct desc_struct desc;
  2022. int offset;
  2023. u16 selector;
  2024. selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
  2025. if (n < 3)
  2026. offset = 0x7f84 + n * 12;
  2027. else
  2028. offset = 0x7f2c + (n - 3) * 12;
  2029. set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
  2030. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
  2031. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
  2032. ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
  2033. return X86EMUL_CONTINUE;
  2034. }
  2035. static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
  2036. {
  2037. struct desc_struct desc;
  2038. int offset;
  2039. u16 selector;
  2040. u32 base3;
  2041. offset = 0x7e00 + n * 16;
  2042. selector = GET_SMSTATE(u16, smbase, offset);
  2043. rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
  2044. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
  2045. set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
  2046. base3 = GET_SMSTATE(u32, smbase, offset + 12);
  2047. ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
  2048. return X86EMUL_CONTINUE;
  2049. }
  2050. static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
  2051. u64 cr0, u64 cr4)
  2052. {
  2053. int bad;
  2054. /*
  2055. * First enable PAE, long mode needs it before CR0.PG = 1 is set.
  2056. * Then enable protected mode. However, PCID cannot be enabled
  2057. * if EFER.LMA=0, so set it separately.
  2058. */
  2059. bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
  2060. if (bad)
  2061. return X86EMUL_UNHANDLEABLE;
  2062. bad = ctxt->ops->set_cr(ctxt, 0, cr0);
  2063. if (bad)
  2064. return X86EMUL_UNHANDLEABLE;
  2065. if (cr4 & X86_CR4_PCIDE) {
  2066. bad = ctxt->ops->set_cr(ctxt, 4, cr4);
  2067. if (bad)
  2068. return X86EMUL_UNHANDLEABLE;
  2069. }
  2070. return X86EMUL_CONTINUE;
  2071. }
  2072. static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
  2073. {
  2074. struct desc_struct desc;
  2075. struct desc_ptr dt;
  2076. u16 selector;
  2077. u32 val, cr0, cr4;
  2078. int i;
  2079. cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
  2080. ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
  2081. ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
  2082. ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
  2083. for (i = 0; i < 8; i++)
  2084. *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
  2085. val = GET_SMSTATE(u32, smbase, 0x7fcc);
  2086. ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
  2087. val = GET_SMSTATE(u32, smbase, 0x7fc8);
  2088. ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
  2089. selector = GET_SMSTATE(u32, smbase, 0x7fc4);
  2090. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
  2091. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
  2092. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
  2093. ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
  2094. selector = GET_SMSTATE(u32, smbase, 0x7fc0);
  2095. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
  2096. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
  2097. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
  2098. ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
  2099. dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
  2100. dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
  2101. ctxt->ops->set_gdt(ctxt, &dt);
  2102. dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
  2103. dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
  2104. ctxt->ops->set_idt(ctxt, &dt);
  2105. for (i = 0; i < 6; i++) {
  2106. int r = rsm_load_seg_32(ctxt, smbase, i);
  2107. if (r != X86EMUL_CONTINUE)
  2108. return r;
  2109. }
  2110. cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
  2111. ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
  2112. return rsm_enter_protected_mode(ctxt, cr0, cr4);
  2113. }
  2114. static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
  2115. {
  2116. struct desc_struct desc;
  2117. struct desc_ptr dt;
  2118. u64 val, cr0, cr4;
  2119. u32 base3;
  2120. u16 selector;
  2121. int i, r;
  2122. for (i = 0; i < 16; i++)
  2123. *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
  2124. ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
  2125. ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
  2126. val = GET_SMSTATE(u32, smbase, 0x7f68);
  2127. ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
  2128. val = GET_SMSTATE(u32, smbase, 0x7f60);
  2129. ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
  2130. cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
  2131. ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
  2132. cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
  2133. ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
  2134. val = GET_SMSTATE(u64, smbase, 0x7ed0);
  2135. ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
  2136. selector = GET_SMSTATE(u32, smbase, 0x7e90);
  2137. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
  2138. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
  2139. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
  2140. base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
  2141. ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
  2142. dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
  2143. dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
  2144. ctxt->ops->set_idt(ctxt, &dt);
  2145. selector = GET_SMSTATE(u32, smbase, 0x7e70);
  2146. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
  2147. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
  2148. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
  2149. base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
  2150. ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
  2151. dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
  2152. dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
  2153. ctxt->ops->set_gdt(ctxt, &dt);
  2154. r = rsm_enter_protected_mode(ctxt, cr0, cr4);
  2155. if (r != X86EMUL_CONTINUE)
  2156. return r;
  2157. for (i = 0; i < 6; i++) {
  2158. r = rsm_load_seg_64(ctxt, smbase, i);
  2159. if (r != X86EMUL_CONTINUE)
  2160. return r;
  2161. }
  2162. return X86EMUL_CONTINUE;
  2163. }
  2164. static int em_rsm(struct x86_emulate_ctxt *ctxt)
  2165. {
  2166. unsigned long cr0, cr4, efer;
  2167. u64 smbase;
  2168. int ret;
  2169. if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
  2170. return emulate_ud(ctxt);
  2171. /*
  2172. * Get back to real mode, to prepare a safe state in which to load
  2173. * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
  2174. * supports long mode.
  2175. */
  2176. cr4 = ctxt->ops->get_cr(ctxt, 4);
  2177. if (emulator_has_longmode(ctxt)) {
  2178. struct desc_struct cs_desc;
  2179. /* Zero CR4.PCIDE before CR0.PG. */
  2180. if (cr4 & X86_CR4_PCIDE) {
  2181. ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
  2182. cr4 &= ~X86_CR4_PCIDE;
  2183. }
  2184. /* A 32-bit code segment is required to clear EFER.LMA. */
  2185. memset(&cs_desc, 0, sizeof(cs_desc));
  2186. cs_desc.type = 0xb;
  2187. cs_desc.s = cs_desc.g = cs_desc.p = 1;
  2188. ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
  2189. }
  2190. /* For the 64-bit case, this will clear EFER.LMA. */
  2191. cr0 = ctxt->ops->get_cr(ctxt, 0);
  2192. if (cr0 & X86_CR0_PE)
  2193. ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
  2194. /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
  2195. if (cr4 & X86_CR4_PAE)
  2196. ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
  2197. /* And finally go back to 32-bit mode. */
  2198. efer = 0;
  2199. ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
  2200. smbase = ctxt->ops->get_smbase(ctxt);
  2201. if (emulator_has_longmode(ctxt))
  2202. ret = rsm_load_state_64(ctxt, smbase + 0x8000);
  2203. else
  2204. ret = rsm_load_state_32(ctxt, smbase + 0x8000);
  2205. if (ret != X86EMUL_CONTINUE) {
  2206. /* FIXME: should triple fault */
  2207. return X86EMUL_UNHANDLEABLE;
  2208. }
  2209. if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
  2210. ctxt->ops->set_nmi_mask(ctxt, false);
  2211. ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
  2212. ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
  2213. return X86EMUL_CONTINUE;
  2214. }
  2215. static void
  2216. setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
  2217. struct desc_struct *cs, struct desc_struct *ss)
  2218. {
  2219. cs->l = 0; /* will be adjusted later */
  2220. set_desc_base(cs, 0); /* flat segment */
  2221. cs->g = 1; /* 4kb granularity */
  2222. set_desc_limit(cs, 0xfffff); /* 4GB limit */
  2223. cs->type = 0x0b; /* Read, Execute, Accessed */
  2224. cs->s = 1;
  2225. cs->dpl = 0; /* will be adjusted later */
  2226. cs->p = 1;
  2227. cs->d = 1;
  2228. cs->avl = 0;
  2229. set_desc_base(ss, 0); /* flat segment */
  2230. set_desc_limit(ss, 0xfffff); /* 4GB limit */
  2231. ss->g = 1; /* 4kb granularity */
  2232. ss->s = 1;
  2233. ss->type = 0x03; /* Read/Write, Accessed */
  2234. ss->d = 1; /* 32bit stack segment */
  2235. ss->dpl = 0;
  2236. ss->p = 1;
  2237. ss->l = 0;
  2238. ss->avl = 0;
  2239. }
  2240. static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
  2241. {
  2242. u32 eax, ebx, ecx, edx;
  2243. eax = ecx = 0;
  2244. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
  2245. return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
  2246. && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
  2247. && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
  2248. }
  2249. static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
  2250. {
  2251. const struct x86_emulate_ops *ops = ctxt->ops;
  2252. u32 eax, ebx, ecx, edx;
  2253. /*
  2254. * syscall should always be enabled in longmode - so only become
  2255. * vendor specific (cpuid) if other modes are active...
  2256. */
  2257. if (ctxt->mode == X86EMUL_MODE_PROT64)
  2258. return true;
  2259. eax = 0x00000000;
  2260. ecx = 0x00000000;
  2261. ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
  2262. /*
  2263. * Intel ("GenuineIntel")
  2264. * remark: Intel CPUs only support "syscall" in 64bit
  2265. * longmode. Also an 64bit guest with a
  2266. * 32bit compat-app running will #UD !! While this
  2267. * behaviour can be fixed (by emulating) into AMD
  2268. * response - CPUs of AMD can't behave like Intel.
  2269. */
  2270. if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
  2271. ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
  2272. edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
  2273. return false;
  2274. /* AMD ("AuthenticAMD") */
  2275. if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
  2276. ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
  2277. edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
  2278. return true;
  2279. /* AMD ("AMDisbetter!") */
  2280. if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
  2281. ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
  2282. edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
  2283. return true;
  2284. /* default: (not Intel, not AMD), apply Intel's stricter rules... */
  2285. return false;
  2286. }
  2287. static int em_syscall(struct x86_emulate_ctxt *ctxt)
  2288. {
  2289. const struct x86_emulate_ops *ops = ctxt->ops;
  2290. struct desc_struct cs, ss;
  2291. u64 msr_data;
  2292. u16 cs_sel, ss_sel;
  2293. u64 efer = 0;
  2294. /* syscall is not available in real mode */
  2295. if (ctxt->mode == X86EMUL_MODE_REAL ||
  2296. ctxt->mode == X86EMUL_MODE_VM86)
  2297. return emulate_ud(ctxt);
  2298. if (!(em_syscall_is_enabled(ctxt)))
  2299. return emulate_ud(ctxt);
  2300. ops->get_msr(ctxt, MSR_EFER, &efer);
  2301. setup_syscalls_segments(ctxt, &cs, &ss);
  2302. if (!(efer & EFER_SCE))
  2303. return emulate_ud(ctxt);
  2304. ops->get_msr(ctxt, MSR_STAR, &msr_data);
  2305. msr_data >>= 32;
  2306. cs_sel = (u16)(msr_data & 0xfffc);
  2307. ss_sel = (u16)(msr_data + 8);
  2308. if (efer & EFER_LMA) {
  2309. cs.d = 0;
  2310. cs.l = 1;
  2311. }
  2312. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  2313. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  2314. *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
  2315. if (efer & EFER_LMA) {
  2316. #ifdef CONFIG_X86_64
  2317. *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
  2318. ops->get_msr(ctxt,
  2319. ctxt->mode == X86EMUL_MODE_PROT64 ?
  2320. MSR_LSTAR : MSR_CSTAR, &msr_data);
  2321. ctxt->_eip = msr_data;
  2322. ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
  2323. ctxt->eflags &= ~msr_data;
  2324. ctxt->eflags |= X86_EFLAGS_FIXED;
  2325. #endif
  2326. } else {
  2327. /* legacy mode */
  2328. ops->get_msr(ctxt, MSR_STAR, &msr_data);
  2329. ctxt->_eip = (u32)msr_data;
  2330. ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
  2331. }
  2332. return X86EMUL_CONTINUE;
  2333. }
  2334. static int em_sysenter(struct x86_emulate_ctxt *ctxt)
  2335. {
  2336. const struct x86_emulate_ops *ops = ctxt->ops;
  2337. struct desc_struct cs, ss;
  2338. u64 msr_data;
  2339. u16 cs_sel, ss_sel;
  2340. u64 efer = 0;
  2341. ops->get_msr(ctxt, MSR_EFER, &efer);
  2342. /* inject #GP if in real mode */
  2343. if (ctxt->mode == X86EMUL_MODE_REAL)
  2344. return emulate_gp(ctxt, 0);
  2345. /*
  2346. * Not recognized on AMD in compat mode (but is recognized in legacy
  2347. * mode).
  2348. */
  2349. if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
  2350. && !vendor_intel(ctxt))
  2351. return emulate_ud(ctxt);
  2352. /* sysenter/sysexit have not been tested in 64bit mode. */
  2353. if (ctxt->mode == X86EMUL_MODE_PROT64)
  2354. return X86EMUL_UNHANDLEABLE;
  2355. setup_syscalls_segments(ctxt, &cs, &ss);
  2356. ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
  2357. if ((msr_data & 0xfffc) == 0x0)
  2358. return emulate_gp(ctxt, 0);
  2359. ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
  2360. cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
  2361. ss_sel = cs_sel + 8;
  2362. if (efer & EFER_LMA) {
  2363. cs.d = 0;
  2364. cs.l = 1;
  2365. }
  2366. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  2367. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  2368. ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
  2369. ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
  2370. ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
  2371. *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
  2372. (u32)msr_data;
  2373. return X86EMUL_CONTINUE;
  2374. }
  2375. static int em_sysexit(struct x86_emulate_ctxt *ctxt)
  2376. {
  2377. const struct x86_emulate_ops *ops = ctxt->ops;
  2378. struct desc_struct cs, ss;
  2379. u64 msr_data, rcx, rdx;
  2380. int usermode;
  2381. u16 cs_sel = 0, ss_sel = 0;
  2382. /* inject #GP if in real mode or Virtual 8086 mode */
  2383. if (ctxt->mode == X86EMUL_MODE_REAL ||
  2384. ctxt->mode == X86EMUL_MODE_VM86)
  2385. return emulate_gp(ctxt, 0);
  2386. setup_syscalls_segments(ctxt, &cs, &ss);
  2387. if ((ctxt->rex_prefix & 0x8) != 0x0)
  2388. usermode = X86EMUL_MODE_PROT64;
  2389. else
  2390. usermode = X86EMUL_MODE_PROT32;
  2391. rcx = reg_read(ctxt, VCPU_REGS_RCX);
  2392. rdx = reg_read(ctxt, VCPU_REGS_RDX);
  2393. cs.dpl = 3;
  2394. ss.dpl = 3;
  2395. ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
  2396. switch (usermode) {
  2397. case X86EMUL_MODE_PROT32:
  2398. cs_sel = (u16)(msr_data + 16);
  2399. if ((msr_data & 0xfffc) == 0x0)
  2400. return emulate_gp(ctxt, 0);
  2401. ss_sel = (u16)(msr_data + 24);
  2402. rcx = (u32)rcx;
  2403. rdx = (u32)rdx;
  2404. break;
  2405. case X86EMUL_MODE_PROT64:
  2406. cs_sel = (u16)(msr_data + 32);
  2407. if (msr_data == 0x0)
  2408. return emulate_gp(ctxt, 0);
  2409. ss_sel = cs_sel + 8;
  2410. cs.d = 0;
  2411. cs.l = 1;
  2412. if (is_noncanonical_address(rcx) ||
  2413. is_noncanonical_address(rdx))
  2414. return emulate_gp(ctxt, 0);
  2415. break;
  2416. }
  2417. cs_sel |= SEGMENT_RPL_MASK;
  2418. ss_sel |= SEGMENT_RPL_MASK;
  2419. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  2420. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  2421. ctxt->_eip = rdx;
  2422. *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
  2423. return X86EMUL_CONTINUE;
  2424. }
  2425. static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
  2426. {
  2427. int iopl;
  2428. if (ctxt->mode == X86EMUL_MODE_REAL)
  2429. return false;
  2430. if (ctxt->mode == X86EMUL_MODE_VM86)
  2431. return true;
  2432. iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
  2433. return ctxt->ops->cpl(ctxt) > iopl;
  2434. }
  2435. static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
  2436. u16 port, u16 len)
  2437. {
  2438. const struct x86_emulate_ops *ops = ctxt->ops;
  2439. struct desc_struct tr_seg;
  2440. u32 base3;
  2441. int r;
  2442. u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
  2443. unsigned mask = (1 << len) - 1;
  2444. unsigned long base;
  2445. ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
  2446. if (!tr_seg.p)
  2447. return false;
  2448. if (desc_limit_scaled(&tr_seg) < 103)
  2449. return false;
  2450. base = get_desc_base(&tr_seg);
  2451. #ifdef CONFIG_X86_64
  2452. base |= ((u64)base3) << 32;
  2453. #endif
  2454. r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
  2455. if (r != X86EMUL_CONTINUE)
  2456. return false;
  2457. if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
  2458. return false;
  2459. r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
  2460. if (r != X86EMUL_CONTINUE)
  2461. return false;
  2462. if ((perm >> bit_idx) & mask)
  2463. return false;
  2464. return true;
  2465. }
  2466. static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
  2467. u16 port, u16 len)
  2468. {
  2469. if (ctxt->perm_ok)
  2470. return true;
  2471. if (emulator_bad_iopl(ctxt))
  2472. if (!emulator_io_port_access_allowed(ctxt, port, len))
  2473. return false;
  2474. ctxt->perm_ok = true;
  2475. return true;
  2476. }
  2477. static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
  2478. {
  2479. /*
  2480. * Intel CPUs mask the counter and pointers in quite strange
  2481. * manner when ECX is zero due to REP-string optimizations.
  2482. */
  2483. #ifdef CONFIG_X86_64
  2484. if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
  2485. return;
  2486. *reg_write(ctxt, VCPU_REGS_RCX) = 0;
  2487. switch (ctxt->b) {
  2488. case 0xa4: /* movsb */
  2489. case 0xa5: /* movsd/w */
  2490. *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
  2491. /* fall through */
  2492. case 0xaa: /* stosb */
  2493. case 0xab: /* stosd/w */
  2494. *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
  2495. }
  2496. #endif
  2497. }
  2498. static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
  2499. struct tss_segment_16 *tss)
  2500. {
  2501. tss->ip = ctxt->_eip;
  2502. tss->flag = ctxt->eflags;
  2503. tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
  2504. tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
  2505. tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
  2506. tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
  2507. tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
  2508. tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
  2509. tss->si = reg_read(ctxt, VCPU_REGS_RSI);
  2510. tss->di = reg_read(ctxt, VCPU_REGS_RDI);
  2511. tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
  2512. tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
  2513. tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
  2514. tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
  2515. tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
  2516. }
  2517. static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
  2518. struct tss_segment_16 *tss)
  2519. {
  2520. int ret;
  2521. u8 cpl;
  2522. ctxt->_eip = tss->ip;
  2523. ctxt->eflags = tss->flag | 2;
  2524. *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
  2525. *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
  2526. *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
  2527. *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
  2528. *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
  2529. *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
  2530. *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
  2531. *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
  2532. /*
  2533. * SDM says that segment selectors are loaded before segment
  2534. * descriptors
  2535. */
  2536. set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
  2537. set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
  2538. set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
  2539. set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
  2540. set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
  2541. cpl = tss->cs & 3;
  2542. /*
  2543. * Now load segment descriptors. If fault happens at this stage
  2544. * it is handled in a context of new task
  2545. */
  2546. ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
  2547. X86_TRANSFER_TASK_SWITCH, NULL);
  2548. if (ret != X86EMUL_CONTINUE)
  2549. return ret;
  2550. ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
  2551. X86_TRANSFER_TASK_SWITCH, NULL);
  2552. if (ret != X86EMUL_CONTINUE)
  2553. return ret;
  2554. ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
  2555. X86_TRANSFER_TASK_SWITCH, NULL);
  2556. if (ret != X86EMUL_CONTINUE)
  2557. return ret;
  2558. ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
  2559. X86_TRANSFER_TASK_SWITCH, NULL);
  2560. if (ret != X86EMUL_CONTINUE)
  2561. return ret;
  2562. ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
  2563. X86_TRANSFER_TASK_SWITCH, NULL);
  2564. if (ret != X86EMUL_CONTINUE)
  2565. return ret;
  2566. return X86EMUL_CONTINUE;
  2567. }
  2568. static int task_switch_16(struct x86_emulate_ctxt *ctxt,
  2569. u16 tss_selector, u16 old_tss_sel,
  2570. ulong old_tss_base, struct desc_struct *new_desc)
  2571. {
  2572. const struct x86_emulate_ops *ops = ctxt->ops;
  2573. struct tss_segment_16 tss_seg;
  2574. int ret;
  2575. u32 new_tss_base = get_desc_base(new_desc);
  2576. ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
  2577. &ctxt->exception);
  2578. if (ret != X86EMUL_CONTINUE)
  2579. return ret;
  2580. save_state_to_tss16(ctxt, &tss_seg);
  2581. ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
  2582. &ctxt->exception);
  2583. if (ret != X86EMUL_CONTINUE)
  2584. return ret;
  2585. ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
  2586. &ctxt->exception);
  2587. if (ret != X86EMUL_CONTINUE)
  2588. return ret;
  2589. if (old_tss_sel != 0xffff) {
  2590. tss_seg.prev_task_link = old_tss_sel;
  2591. ret = ops->write_std(ctxt, new_tss_base,
  2592. &tss_seg.prev_task_link,
  2593. sizeof tss_seg.prev_task_link,
  2594. &ctxt->exception);
  2595. if (ret != X86EMUL_CONTINUE)
  2596. return ret;
  2597. }
  2598. return load_state_from_tss16(ctxt, &tss_seg);
  2599. }
  2600. static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
  2601. struct tss_segment_32 *tss)
  2602. {
  2603. /* CR3 and ldt selector are not saved intentionally */
  2604. tss->eip = ctxt->_eip;
  2605. tss->eflags = ctxt->eflags;
  2606. tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
  2607. tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
  2608. tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
  2609. tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
  2610. tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
  2611. tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
  2612. tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
  2613. tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
  2614. tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
  2615. tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
  2616. tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
  2617. tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
  2618. tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
  2619. tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
  2620. }
  2621. static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
  2622. struct tss_segment_32 *tss)
  2623. {
  2624. int ret;
  2625. u8 cpl;
  2626. if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
  2627. return emulate_gp(ctxt, 0);
  2628. ctxt->_eip = tss->eip;
  2629. ctxt->eflags = tss->eflags | 2;
  2630. /* General purpose registers */
  2631. *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
  2632. *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
  2633. *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
  2634. *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
  2635. *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
  2636. *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
  2637. *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
  2638. *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
  2639. /*
  2640. * SDM says that segment selectors are loaded before segment
  2641. * descriptors. This is important because CPL checks will
  2642. * use CS.RPL.
  2643. */
  2644. set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
  2645. set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
  2646. set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
  2647. set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
  2648. set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
  2649. set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
  2650. set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
  2651. /*
  2652. * If we're switching between Protected Mode and VM86, we need to make
  2653. * sure to update the mode before loading the segment descriptors so
  2654. * that the selectors are interpreted correctly.
  2655. */
  2656. if (ctxt->eflags & X86_EFLAGS_VM) {
  2657. ctxt->mode = X86EMUL_MODE_VM86;
  2658. cpl = 3;
  2659. } else {
  2660. ctxt->mode = X86EMUL_MODE_PROT32;
  2661. cpl = tss->cs & 3;
  2662. }
  2663. /*
  2664. * Now load segment descriptors. If fault happenes at this stage
  2665. * it is handled in a context of new task
  2666. */
  2667. ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
  2668. cpl, X86_TRANSFER_TASK_SWITCH, NULL);
  2669. if (ret != X86EMUL_CONTINUE)
  2670. return ret;
  2671. ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
  2672. X86_TRANSFER_TASK_SWITCH, NULL);
  2673. if (ret != X86EMUL_CONTINUE)
  2674. return ret;
  2675. ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
  2676. X86_TRANSFER_TASK_SWITCH, NULL);
  2677. if (ret != X86EMUL_CONTINUE)
  2678. return ret;
  2679. ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
  2680. X86_TRANSFER_TASK_SWITCH, NULL);
  2681. if (ret != X86EMUL_CONTINUE)
  2682. return ret;
  2683. ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
  2684. X86_TRANSFER_TASK_SWITCH, NULL);
  2685. if (ret != X86EMUL_CONTINUE)
  2686. return ret;
  2687. ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
  2688. X86_TRANSFER_TASK_SWITCH, NULL);
  2689. if (ret != X86EMUL_CONTINUE)
  2690. return ret;
  2691. ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
  2692. X86_TRANSFER_TASK_SWITCH, NULL);
  2693. return ret;
  2694. }
  2695. static int task_switch_32(struct x86_emulate_ctxt *ctxt,
  2696. u16 tss_selector, u16 old_tss_sel,
  2697. ulong old_tss_base, struct desc_struct *new_desc)
  2698. {
  2699. const struct x86_emulate_ops *ops = ctxt->ops;
  2700. struct tss_segment_32 tss_seg;
  2701. int ret;
  2702. u32 new_tss_base = get_desc_base(new_desc);
  2703. u32 eip_offset = offsetof(struct tss_segment_32, eip);
  2704. u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
  2705. ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
  2706. &ctxt->exception);
  2707. if (ret != X86EMUL_CONTINUE)
  2708. return ret;
  2709. save_state_to_tss32(ctxt, &tss_seg);
  2710. /* Only GP registers and segment selectors are saved */
  2711. ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
  2712. ldt_sel_offset - eip_offset, &ctxt->exception);
  2713. if (ret != X86EMUL_CONTINUE)
  2714. return ret;
  2715. ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
  2716. &ctxt->exception);
  2717. if (ret != X86EMUL_CONTINUE)
  2718. return ret;
  2719. if (old_tss_sel != 0xffff) {
  2720. tss_seg.prev_task_link = old_tss_sel;
  2721. ret = ops->write_std(ctxt, new_tss_base,
  2722. &tss_seg.prev_task_link,
  2723. sizeof tss_seg.prev_task_link,
  2724. &ctxt->exception);
  2725. if (ret != X86EMUL_CONTINUE)
  2726. return ret;
  2727. }
  2728. return load_state_from_tss32(ctxt, &tss_seg);
  2729. }
  2730. static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
  2731. u16 tss_selector, int idt_index, int reason,
  2732. bool has_error_code, u32 error_code)
  2733. {
  2734. const struct x86_emulate_ops *ops = ctxt->ops;
  2735. struct desc_struct curr_tss_desc, next_tss_desc;
  2736. int ret;
  2737. u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
  2738. ulong old_tss_base =
  2739. ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
  2740. u32 desc_limit;
  2741. ulong desc_addr, dr7;
  2742. /* FIXME: old_tss_base == ~0 ? */
  2743. ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
  2744. if (ret != X86EMUL_CONTINUE)
  2745. return ret;
  2746. ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
  2747. if (ret != X86EMUL_CONTINUE)
  2748. return ret;
  2749. /* FIXME: check that next_tss_desc is tss */
  2750. /*
  2751. * Check privileges. The three cases are task switch caused by...
  2752. *
  2753. * 1. jmp/call/int to task gate: Check against DPL of the task gate
  2754. * 2. Exception/IRQ/iret: No check is performed
  2755. * 3. jmp/call to TSS/task-gate: No check is performed since the
  2756. * hardware checks it before exiting.
  2757. */
  2758. if (reason == TASK_SWITCH_GATE) {
  2759. if (idt_index != -1) {
  2760. /* Software interrupts */
  2761. struct desc_struct task_gate_desc;
  2762. int dpl;
  2763. ret = read_interrupt_descriptor(ctxt, idt_index,
  2764. &task_gate_desc);
  2765. if (ret != X86EMUL_CONTINUE)
  2766. return ret;
  2767. dpl = task_gate_desc.dpl;
  2768. if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
  2769. return emulate_gp(ctxt, (idt_index << 3) | 0x2);
  2770. }
  2771. }
  2772. desc_limit = desc_limit_scaled(&next_tss_desc);
  2773. if (!next_tss_desc.p ||
  2774. ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
  2775. desc_limit < 0x2b)) {
  2776. return emulate_ts(ctxt, tss_selector & 0xfffc);
  2777. }
  2778. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  2779. curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
  2780. write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
  2781. }
  2782. if (reason == TASK_SWITCH_IRET)
  2783. ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
  2784. /* set back link to prev task only if NT bit is set in eflags
  2785. note that old_tss_sel is not used after this point */
  2786. if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
  2787. old_tss_sel = 0xffff;
  2788. if (next_tss_desc.type & 8)
  2789. ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
  2790. old_tss_base, &next_tss_desc);
  2791. else
  2792. ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
  2793. old_tss_base, &next_tss_desc);
  2794. if (ret != X86EMUL_CONTINUE)
  2795. return ret;
  2796. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
  2797. ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
  2798. if (reason != TASK_SWITCH_IRET) {
  2799. next_tss_desc.type |= (1 << 1); /* set busy flag */
  2800. write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
  2801. }
  2802. ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
  2803. ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
  2804. if (has_error_code) {
  2805. ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
  2806. ctxt->lock_prefix = 0;
  2807. ctxt->src.val = (unsigned long) error_code;
  2808. ret = em_push(ctxt);
  2809. }
  2810. ops->get_dr(ctxt, 7, &dr7);
  2811. ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
  2812. return ret;
  2813. }
  2814. int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
  2815. u16 tss_selector, int idt_index, int reason,
  2816. bool has_error_code, u32 error_code)
  2817. {
  2818. int rc;
  2819. invalidate_registers(ctxt);
  2820. ctxt->_eip = ctxt->eip;
  2821. ctxt->dst.type = OP_NONE;
  2822. rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
  2823. has_error_code, error_code);
  2824. if (rc == X86EMUL_CONTINUE) {
  2825. ctxt->eip = ctxt->_eip;
  2826. writeback_registers(ctxt);
  2827. }
  2828. return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
  2829. }
  2830. static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
  2831. struct operand *op)
  2832. {
  2833. int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
  2834. register_address_increment(ctxt, reg, df * op->bytes);
  2835. op->addr.mem.ea = register_address(ctxt, reg);
  2836. }
  2837. static int em_das(struct x86_emulate_ctxt *ctxt)
  2838. {
  2839. u8 al, old_al;
  2840. bool af, cf, old_cf;
  2841. cf = ctxt->eflags & X86_EFLAGS_CF;
  2842. al = ctxt->dst.val;
  2843. old_al = al;
  2844. old_cf = cf;
  2845. cf = false;
  2846. af = ctxt->eflags & X86_EFLAGS_AF;
  2847. if ((al & 0x0f) > 9 || af) {
  2848. al -= 6;
  2849. cf = old_cf | (al >= 250);
  2850. af = true;
  2851. } else {
  2852. af = false;
  2853. }
  2854. if (old_al > 0x99 || old_cf) {
  2855. al -= 0x60;
  2856. cf = true;
  2857. }
  2858. ctxt->dst.val = al;
  2859. /* Set PF, ZF, SF */
  2860. ctxt->src.type = OP_IMM;
  2861. ctxt->src.val = 0;
  2862. ctxt->src.bytes = 1;
  2863. fastop(ctxt, em_or);
  2864. ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
  2865. if (cf)
  2866. ctxt->eflags |= X86_EFLAGS_CF;
  2867. if (af)
  2868. ctxt->eflags |= X86_EFLAGS_AF;
  2869. return X86EMUL_CONTINUE;
  2870. }
  2871. static int em_aam(struct x86_emulate_ctxt *ctxt)
  2872. {
  2873. u8 al, ah;
  2874. if (ctxt->src.val == 0)
  2875. return emulate_de(ctxt);
  2876. al = ctxt->dst.val & 0xff;
  2877. ah = al / ctxt->src.val;
  2878. al %= ctxt->src.val;
  2879. ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
  2880. /* Set PF, ZF, SF */
  2881. ctxt->src.type = OP_IMM;
  2882. ctxt->src.val = 0;
  2883. ctxt->src.bytes = 1;
  2884. fastop(ctxt, em_or);
  2885. return X86EMUL_CONTINUE;
  2886. }
  2887. static int em_aad(struct x86_emulate_ctxt *ctxt)
  2888. {
  2889. u8 al = ctxt->dst.val & 0xff;
  2890. u8 ah = (ctxt->dst.val >> 8) & 0xff;
  2891. al = (al + (ah * ctxt->src.val)) & 0xff;
  2892. ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
  2893. /* Set PF, ZF, SF */
  2894. ctxt->src.type = OP_IMM;
  2895. ctxt->src.val = 0;
  2896. ctxt->src.bytes = 1;
  2897. fastop(ctxt, em_or);
  2898. return X86EMUL_CONTINUE;
  2899. }
  2900. static int em_call(struct x86_emulate_ctxt *ctxt)
  2901. {
  2902. int rc;
  2903. long rel = ctxt->src.val;
  2904. ctxt->src.val = (unsigned long)ctxt->_eip;
  2905. rc = jmp_rel(ctxt, rel);
  2906. if (rc != X86EMUL_CONTINUE)
  2907. return rc;
  2908. return em_push(ctxt);
  2909. }
  2910. static int em_call_far(struct x86_emulate_ctxt *ctxt)
  2911. {
  2912. u16 sel, old_cs;
  2913. ulong old_eip;
  2914. int rc;
  2915. struct desc_struct old_desc, new_desc;
  2916. const struct x86_emulate_ops *ops = ctxt->ops;
  2917. int cpl = ctxt->ops->cpl(ctxt);
  2918. enum x86emul_mode prev_mode = ctxt->mode;
  2919. old_eip = ctxt->_eip;
  2920. ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
  2921. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  2922. rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
  2923. X86_TRANSFER_CALL_JMP, &new_desc);
  2924. if (rc != X86EMUL_CONTINUE)
  2925. return rc;
  2926. rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
  2927. if (rc != X86EMUL_CONTINUE)
  2928. goto fail;
  2929. ctxt->src.val = old_cs;
  2930. rc = em_push(ctxt);
  2931. if (rc != X86EMUL_CONTINUE)
  2932. goto fail;
  2933. ctxt->src.val = old_eip;
  2934. rc = em_push(ctxt);
  2935. /* If we failed, we tainted the memory, but the very least we should
  2936. restore cs */
  2937. if (rc != X86EMUL_CONTINUE) {
  2938. pr_warn_once("faulting far call emulation tainted memory\n");
  2939. goto fail;
  2940. }
  2941. return rc;
  2942. fail:
  2943. ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
  2944. ctxt->mode = prev_mode;
  2945. return rc;
  2946. }
  2947. static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
  2948. {
  2949. int rc;
  2950. unsigned long eip;
  2951. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  2952. if (rc != X86EMUL_CONTINUE)
  2953. return rc;
  2954. rc = assign_eip_near(ctxt, eip);
  2955. if (rc != X86EMUL_CONTINUE)
  2956. return rc;
  2957. rsp_increment(ctxt, ctxt->src.val);
  2958. return X86EMUL_CONTINUE;
  2959. }
  2960. static int em_xchg(struct x86_emulate_ctxt *ctxt)
  2961. {
  2962. /* Write back the register source. */
  2963. ctxt->src.val = ctxt->dst.val;
  2964. write_register_operand(&ctxt->src);
  2965. /* Write back the memory destination with implicit LOCK prefix. */
  2966. ctxt->dst.val = ctxt->src.orig_val;
  2967. ctxt->lock_prefix = 1;
  2968. return X86EMUL_CONTINUE;
  2969. }
  2970. static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
  2971. {
  2972. ctxt->dst.val = ctxt->src2.val;
  2973. return fastop(ctxt, em_imul);
  2974. }
  2975. static int em_cwd(struct x86_emulate_ctxt *ctxt)
  2976. {
  2977. ctxt->dst.type = OP_REG;
  2978. ctxt->dst.bytes = ctxt->src.bytes;
  2979. ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
  2980. ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
  2981. return X86EMUL_CONTINUE;
  2982. }
  2983. static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
  2984. {
  2985. u64 tsc = 0;
  2986. ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
  2987. *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
  2988. *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
  2989. return X86EMUL_CONTINUE;
  2990. }
  2991. static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
  2992. {
  2993. u64 pmc;
  2994. if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
  2995. return emulate_gp(ctxt, 0);
  2996. *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
  2997. *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
  2998. return X86EMUL_CONTINUE;
  2999. }
  3000. static int em_mov(struct x86_emulate_ctxt *ctxt)
  3001. {
  3002. memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
  3003. return X86EMUL_CONTINUE;
  3004. }
  3005. #define FFL(x) bit(X86_FEATURE_##x)
  3006. static int em_movbe(struct x86_emulate_ctxt *ctxt)
  3007. {
  3008. u32 ebx, ecx, edx, eax = 1;
  3009. u16 tmp;
  3010. /*
  3011. * Check MOVBE is set in the guest-visible CPUID leaf.
  3012. */
  3013. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
  3014. if (!(ecx & FFL(MOVBE)))
  3015. return emulate_ud(ctxt);
  3016. switch (ctxt->op_bytes) {
  3017. case 2:
  3018. /*
  3019. * From MOVBE definition: "...When the operand size is 16 bits,
  3020. * the upper word of the destination register remains unchanged
  3021. * ..."
  3022. *
  3023. * Both casting ->valptr and ->val to u16 breaks strict aliasing
  3024. * rules so we have to do the operation almost per hand.
  3025. */
  3026. tmp = (u16)ctxt->src.val;
  3027. ctxt->dst.val &= ~0xffffUL;
  3028. ctxt->dst.val |= (unsigned long)swab16(tmp);
  3029. break;
  3030. case 4:
  3031. ctxt->dst.val = swab32((u32)ctxt->src.val);
  3032. break;
  3033. case 8:
  3034. ctxt->dst.val = swab64(ctxt->src.val);
  3035. break;
  3036. default:
  3037. BUG();
  3038. }
  3039. return X86EMUL_CONTINUE;
  3040. }
  3041. static int em_cr_write(struct x86_emulate_ctxt *ctxt)
  3042. {
  3043. if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
  3044. return emulate_gp(ctxt, 0);
  3045. /* Disable writeback. */
  3046. ctxt->dst.type = OP_NONE;
  3047. return X86EMUL_CONTINUE;
  3048. }
  3049. static int em_dr_write(struct x86_emulate_ctxt *ctxt)
  3050. {
  3051. unsigned long val;
  3052. if (ctxt->mode == X86EMUL_MODE_PROT64)
  3053. val = ctxt->src.val & ~0ULL;
  3054. else
  3055. val = ctxt->src.val & ~0U;
  3056. /* #UD condition is already handled. */
  3057. if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
  3058. return emulate_gp(ctxt, 0);
  3059. /* Disable writeback. */
  3060. ctxt->dst.type = OP_NONE;
  3061. return X86EMUL_CONTINUE;
  3062. }
  3063. static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
  3064. {
  3065. u64 msr_data;
  3066. msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
  3067. | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
  3068. if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
  3069. return emulate_gp(ctxt, 0);
  3070. return X86EMUL_CONTINUE;
  3071. }
  3072. static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
  3073. {
  3074. u64 msr_data;
  3075. if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
  3076. return emulate_gp(ctxt, 0);
  3077. *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
  3078. *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
  3079. return X86EMUL_CONTINUE;
  3080. }
  3081. static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
  3082. {
  3083. if (ctxt->modrm_reg > VCPU_SREG_GS)
  3084. return emulate_ud(ctxt);
  3085. ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
  3086. if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
  3087. ctxt->dst.bytes = 2;
  3088. return X86EMUL_CONTINUE;
  3089. }
  3090. static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
  3091. {
  3092. u16 sel = ctxt->src.val;
  3093. if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
  3094. return emulate_ud(ctxt);
  3095. if (ctxt->modrm_reg == VCPU_SREG_SS)
  3096. ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
  3097. /* Disable writeback. */
  3098. ctxt->dst.type = OP_NONE;
  3099. return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
  3100. }
  3101. static int em_lldt(struct x86_emulate_ctxt *ctxt)
  3102. {
  3103. u16 sel = ctxt->src.val;
  3104. /* Disable writeback. */
  3105. ctxt->dst.type = OP_NONE;
  3106. return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
  3107. }
  3108. static int em_ltr(struct x86_emulate_ctxt *ctxt)
  3109. {
  3110. u16 sel = ctxt->src.val;
  3111. /* Disable writeback. */
  3112. ctxt->dst.type = OP_NONE;
  3113. return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
  3114. }
  3115. static int em_invlpg(struct x86_emulate_ctxt *ctxt)
  3116. {
  3117. int rc;
  3118. ulong linear;
  3119. rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
  3120. if (rc == X86EMUL_CONTINUE)
  3121. ctxt->ops->invlpg(ctxt, linear);
  3122. /* Disable writeback. */
  3123. ctxt->dst.type = OP_NONE;
  3124. return X86EMUL_CONTINUE;
  3125. }
  3126. static int em_clts(struct x86_emulate_ctxt *ctxt)
  3127. {
  3128. ulong cr0;
  3129. cr0 = ctxt->ops->get_cr(ctxt, 0);
  3130. cr0 &= ~X86_CR0_TS;
  3131. ctxt->ops->set_cr(ctxt, 0, cr0);
  3132. return X86EMUL_CONTINUE;
  3133. }
  3134. static int em_hypercall(struct x86_emulate_ctxt *ctxt)
  3135. {
  3136. int rc = ctxt->ops->fix_hypercall(ctxt);
  3137. if (rc != X86EMUL_CONTINUE)
  3138. return rc;
  3139. /* Let the processor re-execute the fixed hypercall */
  3140. ctxt->_eip = ctxt->eip;
  3141. /* Disable writeback. */
  3142. ctxt->dst.type = OP_NONE;
  3143. return X86EMUL_CONTINUE;
  3144. }
  3145. static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
  3146. void (*get)(struct x86_emulate_ctxt *ctxt,
  3147. struct desc_ptr *ptr))
  3148. {
  3149. struct desc_ptr desc_ptr;
  3150. if (ctxt->mode == X86EMUL_MODE_PROT64)
  3151. ctxt->op_bytes = 8;
  3152. get(ctxt, &desc_ptr);
  3153. if (ctxt->op_bytes == 2) {
  3154. ctxt->op_bytes = 4;
  3155. desc_ptr.address &= 0x00ffffff;
  3156. }
  3157. /* Disable writeback. */
  3158. ctxt->dst.type = OP_NONE;
  3159. return segmented_write(ctxt, ctxt->dst.addr.mem,
  3160. &desc_ptr, 2 + ctxt->op_bytes);
  3161. }
  3162. static int em_sgdt(struct x86_emulate_ctxt *ctxt)
  3163. {
  3164. return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
  3165. }
  3166. static int em_sidt(struct x86_emulate_ctxt *ctxt)
  3167. {
  3168. return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
  3169. }
  3170. static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
  3171. {
  3172. struct desc_ptr desc_ptr;
  3173. int rc;
  3174. if (ctxt->mode == X86EMUL_MODE_PROT64)
  3175. ctxt->op_bytes = 8;
  3176. rc = read_descriptor(ctxt, ctxt->src.addr.mem,
  3177. &desc_ptr.size, &desc_ptr.address,
  3178. ctxt->op_bytes);
  3179. if (rc != X86EMUL_CONTINUE)
  3180. return rc;
  3181. if (ctxt->mode == X86EMUL_MODE_PROT64 &&
  3182. is_noncanonical_address(desc_ptr.address))
  3183. return emulate_gp(ctxt, 0);
  3184. if (lgdt)
  3185. ctxt->ops->set_gdt(ctxt, &desc_ptr);
  3186. else
  3187. ctxt->ops->set_idt(ctxt, &desc_ptr);
  3188. /* Disable writeback. */
  3189. ctxt->dst.type = OP_NONE;
  3190. return X86EMUL_CONTINUE;
  3191. }
  3192. static int em_lgdt(struct x86_emulate_ctxt *ctxt)
  3193. {
  3194. return em_lgdt_lidt(ctxt, true);
  3195. }
  3196. static int em_lidt(struct x86_emulate_ctxt *ctxt)
  3197. {
  3198. return em_lgdt_lidt(ctxt, false);
  3199. }
  3200. static int em_smsw(struct x86_emulate_ctxt *ctxt)
  3201. {
  3202. if (ctxt->dst.type == OP_MEM)
  3203. ctxt->dst.bytes = 2;
  3204. ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
  3205. return X86EMUL_CONTINUE;
  3206. }
  3207. static int em_lmsw(struct x86_emulate_ctxt *ctxt)
  3208. {
  3209. ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
  3210. | (ctxt->src.val & 0x0f));
  3211. ctxt->dst.type = OP_NONE;
  3212. return X86EMUL_CONTINUE;
  3213. }
  3214. static int em_loop(struct x86_emulate_ctxt *ctxt)
  3215. {
  3216. int rc = X86EMUL_CONTINUE;
  3217. register_address_increment(ctxt, VCPU_REGS_RCX, -1);
  3218. if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
  3219. (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
  3220. rc = jmp_rel(ctxt, ctxt->src.val);
  3221. return rc;
  3222. }
  3223. static int em_jcxz(struct x86_emulate_ctxt *ctxt)
  3224. {
  3225. int rc = X86EMUL_CONTINUE;
  3226. if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
  3227. rc = jmp_rel(ctxt, ctxt->src.val);
  3228. return rc;
  3229. }
  3230. static int em_in(struct x86_emulate_ctxt *ctxt)
  3231. {
  3232. if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
  3233. &ctxt->dst.val))
  3234. return X86EMUL_IO_NEEDED;
  3235. return X86EMUL_CONTINUE;
  3236. }
  3237. static int em_out(struct x86_emulate_ctxt *ctxt)
  3238. {
  3239. ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
  3240. &ctxt->src.val, 1);
  3241. /* Disable writeback. */
  3242. ctxt->dst.type = OP_NONE;
  3243. return X86EMUL_CONTINUE;
  3244. }
  3245. static int em_cli(struct x86_emulate_ctxt *ctxt)
  3246. {
  3247. if (emulator_bad_iopl(ctxt))
  3248. return emulate_gp(ctxt, 0);
  3249. ctxt->eflags &= ~X86_EFLAGS_IF;
  3250. return X86EMUL_CONTINUE;
  3251. }
  3252. static int em_sti(struct x86_emulate_ctxt *ctxt)
  3253. {
  3254. if (emulator_bad_iopl(ctxt))
  3255. return emulate_gp(ctxt, 0);
  3256. ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
  3257. ctxt->eflags |= X86_EFLAGS_IF;
  3258. return X86EMUL_CONTINUE;
  3259. }
  3260. static int em_cpuid(struct x86_emulate_ctxt *ctxt)
  3261. {
  3262. u32 eax, ebx, ecx, edx;
  3263. eax = reg_read(ctxt, VCPU_REGS_RAX);
  3264. ecx = reg_read(ctxt, VCPU_REGS_RCX);
  3265. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
  3266. *reg_write(ctxt, VCPU_REGS_RAX) = eax;
  3267. *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
  3268. *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
  3269. *reg_write(ctxt, VCPU_REGS_RDX) = edx;
  3270. return X86EMUL_CONTINUE;
  3271. }
  3272. static int em_sahf(struct x86_emulate_ctxt *ctxt)
  3273. {
  3274. u32 flags;
  3275. flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
  3276. X86_EFLAGS_SF;
  3277. flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
  3278. ctxt->eflags &= ~0xffUL;
  3279. ctxt->eflags |= flags | X86_EFLAGS_FIXED;
  3280. return X86EMUL_CONTINUE;
  3281. }
  3282. static int em_lahf(struct x86_emulate_ctxt *ctxt)
  3283. {
  3284. *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
  3285. *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
  3286. return X86EMUL_CONTINUE;
  3287. }
  3288. static int em_bswap(struct x86_emulate_ctxt *ctxt)
  3289. {
  3290. switch (ctxt->op_bytes) {
  3291. #ifdef CONFIG_X86_64
  3292. case 8:
  3293. asm("bswap %0" : "+r"(ctxt->dst.val));
  3294. break;
  3295. #endif
  3296. default:
  3297. asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
  3298. break;
  3299. }
  3300. return X86EMUL_CONTINUE;
  3301. }
  3302. static int em_clflush(struct x86_emulate_ctxt *ctxt)
  3303. {
  3304. /* emulating clflush regardless of cpuid */
  3305. return X86EMUL_CONTINUE;
  3306. }
  3307. static int em_movsxd(struct x86_emulate_ctxt *ctxt)
  3308. {
  3309. ctxt->dst.val = (s32) ctxt->src.val;
  3310. return X86EMUL_CONTINUE;
  3311. }
  3312. static int check_fxsr(struct x86_emulate_ctxt *ctxt)
  3313. {
  3314. u32 eax = 1, ebx, ecx = 0, edx;
  3315. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
  3316. if (!(edx & FFL(FXSR)))
  3317. return emulate_ud(ctxt);
  3318. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  3319. return emulate_nm(ctxt);
  3320. /*
  3321. * Don't emulate a case that should never be hit, instead of working
  3322. * around a lack of fxsave64/fxrstor64 on old compilers.
  3323. */
  3324. if (ctxt->mode >= X86EMUL_MODE_PROT64)
  3325. return X86EMUL_UNHANDLEABLE;
  3326. return X86EMUL_CONTINUE;
  3327. }
  3328. /*
  3329. * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
  3330. * 1) 16 bit mode
  3331. * 2) 32 bit mode
  3332. * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
  3333. * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
  3334. * save and restore
  3335. * 3) 64-bit mode with REX.W prefix
  3336. * - like (2), but XMM 8-15 are being saved and restored
  3337. * 4) 64-bit mode without REX.W prefix
  3338. * - like (3), but FIP and FDP are 64 bit
  3339. *
  3340. * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
  3341. * desired result. (4) is not emulated.
  3342. *
  3343. * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
  3344. * and FPU DS) should match.
  3345. */
  3346. static int em_fxsave(struct x86_emulate_ctxt *ctxt)
  3347. {
  3348. struct fxregs_state fx_state;
  3349. size_t size;
  3350. int rc;
  3351. rc = check_fxsr(ctxt);
  3352. if (rc != X86EMUL_CONTINUE)
  3353. return rc;
  3354. ctxt->ops->get_fpu(ctxt);
  3355. rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
  3356. ctxt->ops->put_fpu(ctxt);
  3357. if (rc != X86EMUL_CONTINUE)
  3358. return rc;
  3359. if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
  3360. size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
  3361. else
  3362. size = offsetof(struct fxregs_state, xmm_space[0]);
  3363. return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
  3364. }
  3365. static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
  3366. struct fxregs_state *new)
  3367. {
  3368. int rc = X86EMUL_CONTINUE;
  3369. struct fxregs_state old;
  3370. rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
  3371. if (rc != X86EMUL_CONTINUE)
  3372. return rc;
  3373. /*
  3374. * 64 bit host will restore XMM 8-15, which is not correct on non-64
  3375. * bit guests. Load the current values in order to preserve 64 bit
  3376. * XMMs after fxrstor.
  3377. */
  3378. #ifdef CONFIG_X86_64
  3379. /* XXX: accessing XMM 8-15 very awkwardly */
  3380. memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
  3381. #endif
  3382. /*
  3383. * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
  3384. * does save and restore MXCSR.
  3385. */
  3386. if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
  3387. memcpy(new->xmm_space, old.xmm_space, 8 * 16);
  3388. return rc;
  3389. }
  3390. static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
  3391. {
  3392. struct fxregs_state fx_state;
  3393. int rc;
  3394. rc = check_fxsr(ctxt);
  3395. if (rc != X86EMUL_CONTINUE)
  3396. return rc;
  3397. rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
  3398. if (rc != X86EMUL_CONTINUE)
  3399. return rc;
  3400. if (fx_state.mxcsr >> 16)
  3401. return emulate_gp(ctxt, 0);
  3402. ctxt->ops->get_fpu(ctxt);
  3403. if (ctxt->mode < X86EMUL_MODE_PROT64)
  3404. rc = fxrstor_fixup(ctxt, &fx_state);
  3405. if (rc == X86EMUL_CONTINUE)
  3406. rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
  3407. ctxt->ops->put_fpu(ctxt);
  3408. return rc;
  3409. }
  3410. static bool valid_cr(int nr)
  3411. {
  3412. switch (nr) {
  3413. case 0:
  3414. case 2 ... 4:
  3415. case 8:
  3416. return true;
  3417. default:
  3418. return false;
  3419. }
  3420. }
  3421. static int check_cr_read(struct x86_emulate_ctxt *ctxt)
  3422. {
  3423. if (!valid_cr(ctxt->modrm_reg))
  3424. return emulate_ud(ctxt);
  3425. return X86EMUL_CONTINUE;
  3426. }
  3427. static int check_cr_write(struct x86_emulate_ctxt *ctxt)
  3428. {
  3429. u64 new_val = ctxt->src.val64;
  3430. int cr = ctxt->modrm_reg;
  3431. u64 efer = 0;
  3432. static u64 cr_reserved_bits[] = {
  3433. 0xffffffff00000000ULL,
  3434. 0, 0, 0, /* CR3 checked later */
  3435. CR4_RESERVED_BITS,
  3436. 0, 0, 0,
  3437. CR8_RESERVED_BITS,
  3438. };
  3439. if (!valid_cr(cr))
  3440. return emulate_ud(ctxt);
  3441. if (new_val & cr_reserved_bits[cr])
  3442. return emulate_gp(ctxt, 0);
  3443. switch (cr) {
  3444. case 0: {
  3445. u64 cr4;
  3446. if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
  3447. ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
  3448. return emulate_gp(ctxt, 0);
  3449. cr4 = ctxt->ops->get_cr(ctxt, 4);
  3450. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3451. if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
  3452. !(cr4 & X86_CR4_PAE))
  3453. return emulate_gp(ctxt, 0);
  3454. break;
  3455. }
  3456. case 3: {
  3457. u64 rsvd = 0;
  3458. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3459. if (efer & EFER_LMA)
  3460. rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
  3461. if (new_val & rsvd)
  3462. return emulate_gp(ctxt, 0);
  3463. break;
  3464. }
  3465. case 4: {
  3466. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3467. if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
  3468. return emulate_gp(ctxt, 0);
  3469. break;
  3470. }
  3471. }
  3472. return X86EMUL_CONTINUE;
  3473. }
  3474. static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
  3475. {
  3476. unsigned long dr7;
  3477. ctxt->ops->get_dr(ctxt, 7, &dr7);
  3478. /* Check if DR7.Global_Enable is set */
  3479. return dr7 & (1 << 13);
  3480. }
  3481. static int check_dr_read(struct x86_emulate_ctxt *ctxt)
  3482. {
  3483. int dr = ctxt->modrm_reg;
  3484. u64 cr4;
  3485. if (dr > 7)
  3486. return emulate_ud(ctxt);
  3487. cr4 = ctxt->ops->get_cr(ctxt, 4);
  3488. if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
  3489. return emulate_ud(ctxt);
  3490. if (check_dr7_gd(ctxt)) {
  3491. ulong dr6;
  3492. ctxt->ops->get_dr(ctxt, 6, &dr6);
  3493. dr6 &= ~15;
  3494. dr6 |= DR6_BD | DR6_RTM;
  3495. ctxt->ops->set_dr(ctxt, 6, dr6);
  3496. return emulate_db(ctxt);
  3497. }
  3498. return X86EMUL_CONTINUE;
  3499. }
  3500. static int check_dr_write(struct x86_emulate_ctxt *ctxt)
  3501. {
  3502. u64 new_val = ctxt->src.val64;
  3503. int dr = ctxt->modrm_reg;
  3504. if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
  3505. return emulate_gp(ctxt, 0);
  3506. return check_dr_read(ctxt);
  3507. }
  3508. static int check_svme(struct x86_emulate_ctxt *ctxt)
  3509. {
  3510. u64 efer;
  3511. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3512. if (!(efer & EFER_SVME))
  3513. return emulate_ud(ctxt);
  3514. return X86EMUL_CONTINUE;
  3515. }
  3516. static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
  3517. {
  3518. u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
  3519. /* Valid physical address? */
  3520. if (rax & 0xffff000000000000ULL)
  3521. return emulate_gp(ctxt, 0);
  3522. return check_svme(ctxt);
  3523. }
  3524. static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
  3525. {
  3526. u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
  3527. if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
  3528. return emulate_ud(ctxt);
  3529. return X86EMUL_CONTINUE;
  3530. }
  3531. static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
  3532. {
  3533. u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
  3534. u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
  3535. if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
  3536. ctxt->ops->check_pmc(ctxt, rcx))
  3537. return emulate_gp(ctxt, 0);
  3538. return X86EMUL_CONTINUE;
  3539. }
  3540. static int check_perm_in(struct x86_emulate_ctxt *ctxt)
  3541. {
  3542. ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
  3543. if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
  3544. return emulate_gp(ctxt, 0);
  3545. return X86EMUL_CONTINUE;
  3546. }
  3547. static int check_perm_out(struct x86_emulate_ctxt *ctxt)
  3548. {
  3549. ctxt->src.bytes = min(ctxt->src.bytes, 4u);
  3550. if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
  3551. return emulate_gp(ctxt, 0);
  3552. return X86EMUL_CONTINUE;
  3553. }
  3554. #define D(_y) { .flags = (_y) }
  3555. #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
  3556. #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
  3557. .intercept = x86_intercept_##_i, .check_perm = (_p) }
  3558. #define N D(NotImpl)
  3559. #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
  3560. #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
  3561. #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
  3562. #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
  3563. #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
  3564. #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
  3565. #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
  3566. #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
  3567. #define II(_f, _e, _i) \
  3568. { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
  3569. #define IIP(_f, _e, _i, _p) \
  3570. { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
  3571. .intercept = x86_intercept_##_i, .check_perm = (_p) }
  3572. #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
  3573. #define D2bv(_f) D((_f) | ByteOp), D(_f)
  3574. #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
  3575. #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
  3576. #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
  3577. #define I2bvIP(_f, _e, _i, _p) \
  3578. IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
  3579. #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
  3580. F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
  3581. F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
  3582. static const struct opcode group7_rm0[] = {
  3583. N,
  3584. I(SrcNone | Priv | EmulateOnUD, em_hypercall),
  3585. N, N, N, N, N, N,
  3586. };
  3587. static const struct opcode group7_rm1[] = {
  3588. DI(SrcNone | Priv, monitor),
  3589. DI(SrcNone | Priv, mwait),
  3590. N, N, N, N, N, N,
  3591. };
  3592. static const struct opcode group7_rm3[] = {
  3593. DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
  3594. II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
  3595. DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
  3596. DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
  3597. DIP(SrcNone | Prot | Priv, stgi, check_svme),
  3598. DIP(SrcNone | Prot | Priv, clgi, check_svme),
  3599. DIP(SrcNone | Prot | Priv, skinit, check_svme),
  3600. DIP(SrcNone | Prot | Priv, invlpga, check_svme),
  3601. };
  3602. static const struct opcode group7_rm7[] = {
  3603. N,
  3604. DIP(SrcNone, rdtscp, check_rdtsc),
  3605. N, N, N, N, N, N,
  3606. };
  3607. static const struct opcode group1[] = {
  3608. F(Lock, em_add),
  3609. F(Lock | PageTable, em_or),
  3610. F(Lock, em_adc),
  3611. F(Lock, em_sbb),
  3612. F(Lock | PageTable, em_and),
  3613. F(Lock, em_sub),
  3614. F(Lock, em_xor),
  3615. F(NoWrite, em_cmp),
  3616. };
  3617. static const struct opcode group1A[] = {
  3618. I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
  3619. };
  3620. static const struct opcode group2[] = {
  3621. F(DstMem | ModRM, em_rol),
  3622. F(DstMem | ModRM, em_ror),
  3623. F(DstMem | ModRM, em_rcl),
  3624. F(DstMem | ModRM, em_rcr),
  3625. F(DstMem | ModRM, em_shl),
  3626. F(DstMem | ModRM, em_shr),
  3627. F(DstMem | ModRM, em_shl),
  3628. F(DstMem | ModRM, em_sar),
  3629. };
  3630. static const struct opcode group3[] = {
  3631. F(DstMem | SrcImm | NoWrite, em_test),
  3632. F(DstMem | SrcImm | NoWrite, em_test),
  3633. F(DstMem | SrcNone | Lock, em_not),
  3634. F(DstMem | SrcNone | Lock, em_neg),
  3635. F(DstXacc | Src2Mem, em_mul_ex),
  3636. F(DstXacc | Src2Mem, em_imul_ex),
  3637. F(DstXacc | Src2Mem, em_div_ex),
  3638. F(DstXacc | Src2Mem, em_idiv_ex),
  3639. };
  3640. static const struct opcode group4[] = {
  3641. F(ByteOp | DstMem | SrcNone | Lock, em_inc),
  3642. F(ByteOp | DstMem | SrcNone | Lock, em_dec),
  3643. N, N, N, N, N, N,
  3644. };
  3645. static const struct opcode group5[] = {
  3646. F(DstMem | SrcNone | Lock, em_inc),
  3647. F(DstMem | SrcNone | Lock, em_dec),
  3648. I(SrcMem | NearBranch, em_call_near_abs),
  3649. I(SrcMemFAddr | ImplicitOps, em_call_far),
  3650. I(SrcMem | NearBranch, em_jmp_abs),
  3651. I(SrcMemFAddr | ImplicitOps, em_jmp_far),
  3652. I(SrcMem | Stack, em_push), D(Undefined),
  3653. };
  3654. static const struct opcode group6[] = {
  3655. DI(Prot | DstMem, sldt),
  3656. DI(Prot | DstMem, str),
  3657. II(Prot | Priv | SrcMem16, em_lldt, lldt),
  3658. II(Prot | Priv | SrcMem16, em_ltr, ltr),
  3659. N, N, N, N,
  3660. };
  3661. static const struct group_dual group7 = { {
  3662. II(Mov | DstMem, em_sgdt, sgdt),
  3663. II(Mov | DstMem, em_sidt, sidt),
  3664. II(SrcMem | Priv, em_lgdt, lgdt),
  3665. II(SrcMem | Priv, em_lidt, lidt),
  3666. II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
  3667. II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
  3668. II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
  3669. }, {
  3670. EXT(0, group7_rm0),
  3671. EXT(0, group7_rm1),
  3672. N, EXT(0, group7_rm3),
  3673. II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
  3674. II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
  3675. EXT(0, group7_rm7),
  3676. } };
  3677. static const struct opcode group8[] = {
  3678. N, N, N, N,
  3679. F(DstMem | SrcImmByte | NoWrite, em_bt),
  3680. F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
  3681. F(DstMem | SrcImmByte | Lock, em_btr),
  3682. F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
  3683. };
  3684. static const struct group_dual group9 = { {
  3685. N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
  3686. }, {
  3687. N, N, N, N, N, N, N, N,
  3688. } };
  3689. static const struct opcode group11[] = {
  3690. I(DstMem | SrcImm | Mov | PageTable, em_mov),
  3691. X7(D(Undefined)),
  3692. };
  3693. static const struct gprefix pfx_0f_ae_7 = {
  3694. I(SrcMem | ByteOp, em_clflush), N, N, N,
  3695. };
  3696. static const struct group_dual group15 = { {
  3697. I(ModRM | Aligned16, em_fxsave),
  3698. I(ModRM | Aligned16, em_fxrstor),
  3699. N, N, N, N, N, GP(0, &pfx_0f_ae_7),
  3700. }, {
  3701. N, N, N, N, N, N, N, N,
  3702. } };
  3703. static const struct gprefix pfx_0f_6f_0f_7f = {
  3704. I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
  3705. };
  3706. static const struct instr_dual instr_dual_0f_2b = {
  3707. I(0, em_mov), N
  3708. };
  3709. static const struct gprefix pfx_0f_2b = {
  3710. ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
  3711. };
  3712. static const struct gprefix pfx_0f_28_0f_29 = {
  3713. I(Aligned, em_mov), I(Aligned, em_mov), N, N,
  3714. };
  3715. static const struct gprefix pfx_0f_e7 = {
  3716. N, I(Sse, em_mov), N, N,
  3717. };
  3718. static const struct escape escape_d9 = { {
  3719. N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
  3720. }, {
  3721. /* 0xC0 - 0xC7 */
  3722. N, N, N, N, N, N, N, N,
  3723. /* 0xC8 - 0xCF */
  3724. N, N, N, N, N, N, N, N,
  3725. /* 0xD0 - 0xC7 */
  3726. N, N, N, N, N, N, N, N,
  3727. /* 0xD8 - 0xDF */
  3728. N, N, N, N, N, N, N, N,
  3729. /* 0xE0 - 0xE7 */
  3730. N, N, N, N, N, N, N, N,
  3731. /* 0xE8 - 0xEF */
  3732. N, N, N, N, N, N, N, N,
  3733. /* 0xF0 - 0xF7 */
  3734. N, N, N, N, N, N, N, N,
  3735. /* 0xF8 - 0xFF */
  3736. N, N, N, N, N, N, N, N,
  3737. } };
  3738. static const struct escape escape_db = { {
  3739. N, N, N, N, N, N, N, N,
  3740. }, {
  3741. /* 0xC0 - 0xC7 */
  3742. N, N, N, N, N, N, N, N,
  3743. /* 0xC8 - 0xCF */
  3744. N, N, N, N, N, N, N, N,
  3745. /* 0xD0 - 0xC7 */
  3746. N, N, N, N, N, N, N, N,
  3747. /* 0xD8 - 0xDF */
  3748. N, N, N, N, N, N, N, N,
  3749. /* 0xE0 - 0xE7 */
  3750. N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
  3751. /* 0xE8 - 0xEF */
  3752. N, N, N, N, N, N, N, N,
  3753. /* 0xF0 - 0xF7 */
  3754. N, N, N, N, N, N, N, N,
  3755. /* 0xF8 - 0xFF */
  3756. N, N, N, N, N, N, N, N,
  3757. } };
  3758. static const struct escape escape_dd = { {
  3759. N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
  3760. }, {
  3761. /* 0xC0 - 0xC7 */
  3762. N, N, N, N, N, N, N, N,
  3763. /* 0xC8 - 0xCF */
  3764. N, N, N, N, N, N, N, N,
  3765. /* 0xD0 - 0xC7 */
  3766. N, N, N, N, N, N, N, N,
  3767. /* 0xD8 - 0xDF */
  3768. N, N, N, N, N, N, N, N,
  3769. /* 0xE0 - 0xE7 */
  3770. N, N, N, N, N, N, N, N,
  3771. /* 0xE8 - 0xEF */
  3772. N, N, N, N, N, N, N, N,
  3773. /* 0xF0 - 0xF7 */
  3774. N, N, N, N, N, N, N, N,
  3775. /* 0xF8 - 0xFF */
  3776. N, N, N, N, N, N, N, N,
  3777. } };
  3778. static const struct instr_dual instr_dual_0f_c3 = {
  3779. I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
  3780. };
  3781. static const struct mode_dual mode_dual_63 = {
  3782. N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
  3783. };
  3784. static const struct opcode opcode_table[256] = {
  3785. /* 0x00 - 0x07 */
  3786. F6ALU(Lock, em_add),
  3787. I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
  3788. I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
  3789. /* 0x08 - 0x0F */
  3790. F6ALU(Lock | PageTable, em_or),
  3791. I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
  3792. N,
  3793. /* 0x10 - 0x17 */
  3794. F6ALU(Lock, em_adc),
  3795. I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
  3796. I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
  3797. /* 0x18 - 0x1F */
  3798. F6ALU(Lock, em_sbb),
  3799. I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
  3800. I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
  3801. /* 0x20 - 0x27 */
  3802. F6ALU(Lock | PageTable, em_and), N, N,
  3803. /* 0x28 - 0x2F */
  3804. F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
  3805. /* 0x30 - 0x37 */
  3806. F6ALU(Lock, em_xor), N, N,
  3807. /* 0x38 - 0x3F */
  3808. F6ALU(NoWrite, em_cmp), N, N,
  3809. /* 0x40 - 0x4F */
  3810. X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
  3811. /* 0x50 - 0x57 */
  3812. X8(I(SrcReg | Stack, em_push)),
  3813. /* 0x58 - 0x5F */
  3814. X8(I(DstReg | Stack, em_pop)),
  3815. /* 0x60 - 0x67 */
  3816. I(ImplicitOps | Stack | No64, em_pusha),
  3817. I(ImplicitOps | Stack | No64, em_popa),
  3818. N, MD(ModRM, &mode_dual_63),
  3819. N, N, N, N,
  3820. /* 0x68 - 0x6F */
  3821. I(SrcImm | Mov | Stack, em_push),
  3822. I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
  3823. I(SrcImmByte | Mov | Stack, em_push),
  3824. I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
  3825. I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
  3826. I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
  3827. /* 0x70 - 0x7F */
  3828. X16(D(SrcImmByte | NearBranch)),
  3829. /* 0x80 - 0x87 */
  3830. G(ByteOp | DstMem | SrcImm, group1),
  3831. G(DstMem | SrcImm, group1),
  3832. G(ByteOp | DstMem | SrcImm | No64, group1),
  3833. G(DstMem | SrcImmByte, group1),
  3834. F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
  3835. I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
  3836. /* 0x88 - 0x8F */
  3837. I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
  3838. I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
  3839. I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
  3840. D(ModRM | SrcMem | NoAccess | DstReg),
  3841. I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
  3842. G(0, group1A),
  3843. /* 0x90 - 0x97 */
  3844. DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
  3845. /* 0x98 - 0x9F */
  3846. D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
  3847. I(SrcImmFAddr | No64, em_call_far), N,
  3848. II(ImplicitOps | Stack, em_pushf, pushf),
  3849. II(ImplicitOps | Stack, em_popf, popf),
  3850. I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
  3851. /* 0xA0 - 0xA7 */
  3852. I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
  3853. I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
  3854. I2bv(SrcSI | DstDI | Mov | String, em_mov),
  3855. F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
  3856. /* 0xA8 - 0xAF */
  3857. F2bv(DstAcc | SrcImm | NoWrite, em_test),
  3858. I2bv(SrcAcc | DstDI | Mov | String, em_mov),
  3859. I2bv(SrcSI | DstAcc | Mov | String, em_mov),
  3860. F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
  3861. /* 0xB0 - 0xB7 */
  3862. X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
  3863. /* 0xB8 - 0xBF */
  3864. X8(I(DstReg | SrcImm64 | Mov, em_mov)),
  3865. /* 0xC0 - 0xC7 */
  3866. G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
  3867. I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
  3868. I(ImplicitOps | NearBranch, em_ret),
  3869. I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
  3870. I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
  3871. G(ByteOp, group11), G(0, group11),
  3872. /* 0xC8 - 0xCF */
  3873. I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
  3874. I(ImplicitOps | SrcImmU16, em_ret_far_imm),
  3875. I(ImplicitOps, em_ret_far),
  3876. D(ImplicitOps), DI(SrcImmByte, intn),
  3877. D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
  3878. /* 0xD0 - 0xD7 */
  3879. G(Src2One | ByteOp, group2), G(Src2One, group2),
  3880. G(Src2CL | ByteOp, group2), G(Src2CL, group2),
  3881. I(DstAcc | SrcImmUByte | No64, em_aam),
  3882. I(DstAcc | SrcImmUByte | No64, em_aad),
  3883. F(DstAcc | ByteOp | No64, em_salc),
  3884. I(DstAcc | SrcXLat | ByteOp, em_mov),
  3885. /* 0xD8 - 0xDF */
  3886. N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
  3887. /* 0xE0 - 0xE7 */
  3888. X3(I(SrcImmByte | NearBranch, em_loop)),
  3889. I(SrcImmByte | NearBranch, em_jcxz),
  3890. I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
  3891. I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
  3892. /* 0xE8 - 0xEF */
  3893. I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
  3894. I(SrcImmFAddr | No64, em_jmp_far),
  3895. D(SrcImmByte | ImplicitOps | NearBranch),
  3896. I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
  3897. I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
  3898. /* 0xF0 - 0xF7 */
  3899. N, DI(ImplicitOps, icebp), N, N,
  3900. DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
  3901. G(ByteOp, group3), G(0, group3),
  3902. /* 0xF8 - 0xFF */
  3903. D(ImplicitOps), D(ImplicitOps),
  3904. I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
  3905. D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
  3906. };
  3907. static const struct opcode twobyte_table[256] = {
  3908. /* 0x00 - 0x0F */
  3909. G(0, group6), GD(0, &group7), N, N,
  3910. N, I(ImplicitOps | EmulateOnUD, em_syscall),
  3911. II(ImplicitOps | Priv, em_clts, clts), N,
  3912. DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
  3913. N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
  3914. /* 0x10 - 0x1F */
  3915. N, N, N, N, N, N, N, N,
  3916. D(ImplicitOps | ModRM | SrcMem | NoAccess),
  3917. N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
  3918. /* 0x20 - 0x2F */
  3919. DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
  3920. DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
  3921. IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
  3922. check_cr_write),
  3923. IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
  3924. check_dr_write),
  3925. N, N, N, N,
  3926. GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
  3927. GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
  3928. N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
  3929. N, N, N, N,
  3930. /* 0x30 - 0x3F */
  3931. II(ImplicitOps | Priv, em_wrmsr, wrmsr),
  3932. IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
  3933. II(ImplicitOps | Priv, em_rdmsr, rdmsr),
  3934. IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
  3935. I(ImplicitOps | EmulateOnUD, em_sysenter),
  3936. I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
  3937. N, N,
  3938. N, N, N, N, N, N, N, N,
  3939. /* 0x40 - 0x4F */
  3940. X16(D(DstReg | SrcMem | ModRM)),
  3941. /* 0x50 - 0x5F */
  3942. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
  3943. /* 0x60 - 0x6F */
  3944. N, N, N, N,
  3945. N, N, N, N,
  3946. N, N, N, N,
  3947. N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
  3948. /* 0x70 - 0x7F */
  3949. N, N, N, N,
  3950. N, N, N, N,
  3951. N, N, N, N,
  3952. N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
  3953. /* 0x80 - 0x8F */
  3954. X16(D(SrcImm | NearBranch)),
  3955. /* 0x90 - 0x9F */
  3956. X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
  3957. /* 0xA0 - 0xA7 */
  3958. I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
  3959. II(ImplicitOps, em_cpuid, cpuid),
  3960. F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
  3961. F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
  3962. F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
  3963. /* 0xA8 - 0xAF */
  3964. I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
  3965. II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
  3966. F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
  3967. F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
  3968. F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
  3969. GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
  3970. /* 0xB0 - 0xB7 */
  3971. I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
  3972. I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
  3973. F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
  3974. I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
  3975. I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
  3976. D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
  3977. /* 0xB8 - 0xBF */
  3978. N, N,
  3979. G(BitOp, group8),
  3980. F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
  3981. I(DstReg | SrcMem | ModRM, em_bsf_c),
  3982. I(DstReg | SrcMem | ModRM, em_bsr_c),
  3983. D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
  3984. /* 0xC0 - 0xC7 */
  3985. F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
  3986. N, ID(0, &instr_dual_0f_c3),
  3987. N, N, N, GD(0, &group9),
  3988. /* 0xC8 - 0xCF */
  3989. X8(I(DstReg, em_bswap)),
  3990. /* 0xD0 - 0xDF */
  3991. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
  3992. /* 0xE0 - 0xEF */
  3993. N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
  3994. N, N, N, N, N, N, N, N,
  3995. /* 0xF0 - 0xFF */
  3996. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
  3997. };
  3998. static const struct instr_dual instr_dual_0f_38_f0 = {
  3999. I(DstReg | SrcMem | Mov, em_movbe), N
  4000. };
  4001. static const struct instr_dual instr_dual_0f_38_f1 = {
  4002. I(DstMem | SrcReg | Mov, em_movbe), N
  4003. };
  4004. static const struct gprefix three_byte_0f_38_f0 = {
  4005. ID(0, &instr_dual_0f_38_f0), N, N, N
  4006. };
  4007. static const struct gprefix three_byte_0f_38_f1 = {
  4008. ID(0, &instr_dual_0f_38_f1), N, N, N
  4009. };
  4010. /*
  4011. * Insns below are selected by the prefix which indexed by the third opcode
  4012. * byte.
  4013. */
  4014. static const struct opcode opcode_map_0f_38[256] = {
  4015. /* 0x00 - 0x7f */
  4016. X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
  4017. /* 0x80 - 0xef */
  4018. X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
  4019. /* 0xf0 - 0xf1 */
  4020. GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
  4021. GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
  4022. /* 0xf2 - 0xff */
  4023. N, N, X4(N), X8(N)
  4024. };
  4025. #undef D
  4026. #undef N
  4027. #undef G
  4028. #undef GD
  4029. #undef I
  4030. #undef GP
  4031. #undef EXT
  4032. #undef MD
  4033. #undef ID
  4034. #undef D2bv
  4035. #undef D2bvIP
  4036. #undef I2bv
  4037. #undef I2bvIP
  4038. #undef I6ALU
  4039. static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
  4040. {
  4041. unsigned size;
  4042. size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4043. if (size == 8)
  4044. size = 4;
  4045. return size;
  4046. }
  4047. static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
  4048. unsigned size, bool sign_extension)
  4049. {
  4050. int rc = X86EMUL_CONTINUE;
  4051. op->type = OP_IMM;
  4052. op->bytes = size;
  4053. op->addr.mem.ea = ctxt->_eip;
  4054. /* NB. Immediates are sign-extended as necessary. */
  4055. switch (op->bytes) {
  4056. case 1:
  4057. op->val = insn_fetch(s8, ctxt);
  4058. break;
  4059. case 2:
  4060. op->val = insn_fetch(s16, ctxt);
  4061. break;
  4062. case 4:
  4063. op->val = insn_fetch(s32, ctxt);
  4064. break;
  4065. case 8:
  4066. op->val = insn_fetch(s64, ctxt);
  4067. break;
  4068. }
  4069. if (!sign_extension) {
  4070. switch (op->bytes) {
  4071. case 1:
  4072. op->val &= 0xff;
  4073. break;
  4074. case 2:
  4075. op->val &= 0xffff;
  4076. break;
  4077. case 4:
  4078. op->val &= 0xffffffff;
  4079. break;
  4080. }
  4081. }
  4082. done:
  4083. return rc;
  4084. }
  4085. static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
  4086. unsigned d)
  4087. {
  4088. int rc = X86EMUL_CONTINUE;
  4089. switch (d) {
  4090. case OpReg:
  4091. decode_register_operand(ctxt, op);
  4092. break;
  4093. case OpImmUByte:
  4094. rc = decode_imm(ctxt, op, 1, false);
  4095. break;
  4096. case OpMem:
  4097. ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4098. mem_common:
  4099. *op = ctxt->memop;
  4100. ctxt->memopp = op;
  4101. if (ctxt->d & BitOp)
  4102. fetch_bit_operand(ctxt);
  4103. op->orig_val = op->val;
  4104. break;
  4105. case OpMem64:
  4106. ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
  4107. goto mem_common;
  4108. case OpAcc:
  4109. op->type = OP_REG;
  4110. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4111. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
  4112. fetch_register_operand(op);
  4113. op->orig_val = op->val;
  4114. break;
  4115. case OpAccLo:
  4116. op->type = OP_REG;
  4117. op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
  4118. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
  4119. fetch_register_operand(op);
  4120. op->orig_val = op->val;
  4121. break;
  4122. case OpAccHi:
  4123. if (ctxt->d & ByteOp) {
  4124. op->type = OP_NONE;
  4125. break;
  4126. }
  4127. op->type = OP_REG;
  4128. op->bytes = ctxt->op_bytes;
  4129. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
  4130. fetch_register_operand(op);
  4131. op->orig_val = op->val;
  4132. break;
  4133. case OpDI:
  4134. op->type = OP_MEM;
  4135. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4136. op->addr.mem.ea =
  4137. register_address(ctxt, VCPU_REGS_RDI);
  4138. op->addr.mem.seg = VCPU_SREG_ES;
  4139. op->val = 0;
  4140. op->count = 1;
  4141. break;
  4142. case OpDX:
  4143. op->type = OP_REG;
  4144. op->bytes = 2;
  4145. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
  4146. fetch_register_operand(op);
  4147. break;
  4148. case OpCL:
  4149. op->type = OP_IMM;
  4150. op->bytes = 1;
  4151. op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
  4152. break;
  4153. case OpImmByte:
  4154. rc = decode_imm(ctxt, op, 1, true);
  4155. break;
  4156. case OpOne:
  4157. op->type = OP_IMM;
  4158. op->bytes = 1;
  4159. op->val = 1;
  4160. break;
  4161. case OpImm:
  4162. rc = decode_imm(ctxt, op, imm_size(ctxt), true);
  4163. break;
  4164. case OpImm64:
  4165. rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
  4166. break;
  4167. case OpMem8:
  4168. ctxt->memop.bytes = 1;
  4169. if (ctxt->memop.type == OP_REG) {
  4170. ctxt->memop.addr.reg = decode_register(ctxt,
  4171. ctxt->modrm_rm, true);
  4172. fetch_register_operand(&ctxt->memop);
  4173. }
  4174. goto mem_common;
  4175. case OpMem16:
  4176. ctxt->memop.bytes = 2;
  4177. goto mem_common;
  4178. case OpMem32:
  4179. ctxt->memop.bytes = 4;
  4180. goto mem_common;
  4181. case OpImmU16:
  4182. rc = decode_imm(ctxt, op, 2, false);
  4183. break;
  4184. case OpImmU:
  4185. rc = decode_imm(ctxt, op, imm_size(ctxt), false);
  4186. break;
  4187. case OpSI:
  4188. op->type = OP_MEM;
  4189. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4190. op->addr.mem.ea =
  4191. register_address(ctxt, VCPU_REGS_RSI);
  4192. op->addr.mem.seg = ctxt->seg_override;
  4193. op->val = 0;
  4194. op->count = 1;
  4195. break;
  4196. case OpXLat:
  4197. op->type = OP_MEM;
  4198. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4199. op->addr.mem.ea =
  4200. address_mask(ctxt,
  4201. reg_read(ctxt, VCPU_REGS_RBX) +
  4202. (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
  4203. op->addr.mem.seg = ctxt->seg_override;
  4204. op->val = 0;
  4205. break;
  4206. case OpImmFAddr:
  4207. op->type = OP_IMM;
  4208. op->addr.mem.ea = ctxt->_eip;
  4209. op->bytes = ctxt->op_bytes + 2;
  4210. insn_fetch_arr(op->valptr, op->bytes, ctxt);
  4211. break;
  4212. case OpMemFAddr:
  4213. ctxt->memop.bytes = ctxt->op_bytes + 2;
  4214. goto mem_common;
  4215. case OpES:
  4216. op->type = OP_IMM;
  4217. op->val = VCPU_SREG_ES;
  4218. break;
  4219. case OpCS:
  4220. op->type = OP_IMM;
  4221. op->val = VCPU_SREG_CS;
  4222. break;
  4223. case OpSS:
  4224. op->type = OP_IMM;
  4225. op->val = VCPU_SREG_SS;
  4226. break;
  4227. case OpDS:
  4228. op->type = OP_IMM;
  4229. op->val = VCPU_SREG_DS;
  4230. break;
  4231. case OpFS:
  4232. op->type = OP_IMM;
  4233. op->val = VCPU_SREG_FS;
  4234. break;
  4235. case OpGS:
  4236. op->type = OP_IMM;
  4237. op->val = VCPU_SREG_GS;
  4238. break;
  4239. case OpImplicit:
  4240. /* Special instructions do their own operand decoding. */
  4241. default:
  4242. op->type = OP_NONE; /* Disable writeback. */
  4243. break;
  4244. }
  4245. done:
  4246. return rc;
  4247. }
  4248. int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
  4249. {
  4250. int rc = X86EMUL_CONTINUE;
  4251. int mode = ctxt->mode;
  4252. int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
  4253. bool op_prefix = false;
  4254. bool has_seg_override = false;
  4255. struct opcode opcode;
  4256. ctxt->memop.type = OP_NONE;
  4257. ctxt->memopp = NULL;
  4258. ctxt->_eip = ctxt->eip;
  4259. ctxt->fetch.ptr = ctxt->fetch.data;
  4260. ctxt->fetch.end = ctxt->fetch.data + insn_len;
  4261. ctxt->opcode_len = 1;
  4262. if (insn_len > 0)
  4263. memcpy(ctxt->fetch.data, insn, insn_len);
  4264. else {
  4265. rc = __do_insn_fetch_bytes(ctxt, 1);
  4266. if (rc != X86EMUL_CONTINUE)
  4267. return rc;
  4268. }
  4269. switch (mode) {
  4270. case X86EMUL_MODE_REAL:
  4271. case X86EMUL_MODE_VM86:
  4272. case X86EMUL_MODE_PROT16:
  4273. def_op_bytes = def_ad_bytes = 2;
  4274. break;
  4275. case X86EMUL_MODE_PROT32:
  4276. def_op_bytes = def_ad_bytes = 4;
  4277. break;
  4278. #ifdef CONFIG_X86_64
  4279. case X86EMUL_MODE_PROT64:
  4280. def_op_bytes = 4;
  4281. def_ad_bytes = 8;
  4282. break;
  4283. #endif
  4284. default:
  4285. return EMULATION_FAILED;
  4286. }
  4287. ctxt->op_bytes = def_op_bytes;
  4288. ctxt->ad_bytes = def_ad_bytes;
  4289. /* Legacy prefixes. */
  4290. for (;;) {
  4291. switch (ctxt->b = insn_fetch(u8, ctxt)) {
  4292. case 0x66: /* operand-size override */
  4293. op_prefix = true;
  4294. /* switch between 2/4 bytes */
  4295. ctxt->op_bytes = def_op_bytes ^ 6;
  4296. break;
  4297. case 0x67: /* address-size override */
  4298. if (mode == X86EMUL_MODE_PROT64)
  4299. /* switch between 4/8 bytes */
  4300. ctxt->ad_bytes = def_ad_bytes ^ 12;
  4301. else
  4302. /* switch between 2/4 bytes */
  4303. ctxt->ad_bytes = def_ad_bytes ^ 6;
  4304. break;
  4305. case 0x26: /* ES override */
  4306. case 0x2e: /* CS override */
  4307. case 0x36: /* SS override */
  4308. case 0x3e: /* DS override */
  4309. has_seg_override = true;
  4310. ctxt->seg_override = (ctxt->b >> 3) & 3;
  4311. break;
  4312. case 0x64: /* FS override */
  4313. case 0x65: /* GS override */
  4314. has_seg_override = true;
  4315. ctxt->seg_override = ctxt->b & 7;
  4316. break;
  4317. case 0x40 ... 0x4f: /* REX */
  4318. if (mode != X86EMUL_MODE_PROT64)
  4319. goto done_prefixes;
  4320. ctxt->rex_prefix = ctxt->b;
  4321. continue;
  4322. case 0xf0: /* LOCK */
  4323. ctxt->lock_prefix = 1;
  4324. break;
  4325. case 0xf2: /* REPNE/REPNZ */
  4326. case 0xf3: /* REP/REPE/REPZ */
  4327. ctxt->rep_prefix = ctxt->b;
  4328. break;
  4329. default:
  4330. goto done_prefixes;
  4331. }
  4332. /* Any legacy prefix after a REX prefix nullifies its effect. */
  4333. ctxt->rex_prefix = 0;
  4334. }
  4335. done_prefixes:
  4336. /* REX prefix. */
  4337. if (ctxt->rex_prefix & 8)
  4338. ctxt->op_bytes = 8; /* REX.W */
  4339. /* Opcode byte(s). */
  4340. opcode = opcode_table[ctxt->b];
  4341. /* Two-byte opcode? */
  4342. if (ctxt->b == 0x0f) {
  4343. ctxt->opcode_len = 2;
  4344. ctxt->b = insn_fetch(u8, ctxt);
  4345. opcode = twobyte_table[ctxt->b];
  4346. /* 0F_38 opcode map */
  4347. if (ctxt->b == 0x38) {
  4348. ctxt->opcode_len = 3;
  4349. ctxt->b = insn_fetch(u8, ctxt);
  4350. opcode = opcode_map_0f_38[ctxt->b];
  4351. }
  4352. }
  4353. ctxt->d = opcode.flags;
  4354. if (ctxt->d & ModRM)
  4355. ctxt->modrm = insn_fetch(u8, ctxt);
  4356. /* vex-prefix instructions are not implemented */
  4357. if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
  4358. (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
  4359. ctxt->d = NotImpl;
  4360. }
  4361. while (ctxt->d & GroupMask) {
  4362. switch (ctxt->d & GroupMask) {
  4363. case Group:
  4364. goffset = (ctxt->modrm >> 3) & 7;
  4365. opcode = opcode.u.group[goffset];
  4366. break;
  4367. case GroupDual:
  4368. goffset = (ctxt->modrm >> 3) & 7;
  4369. if ((ctxt->modrm >> 6) == 3)
  4370. opcode = opcode.u.gdual->mod3[goffset];
  4371. else
  4372. opcode = opcode.u.gdual->mod012[goffset];
  4373. break;
  4374. case RMExt:
  4375. goffset = ctxt->modrm & 7;
  4376. opcode = opcode.u.group[goffset];
  4377. break;
  4378. case Prefix:
  4379. if (ctxt->rep_prefix && op_prefix)
  4380. return EMULATION_FAILED;
  4381. simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
  4382. switch (simd_prefix) {
  4383. case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
  4384. case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
  4385. case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
  4386. case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
  4387. }
  4388. break;
  4389. case Escape:
  4390. if (ctxt->modrm > 0xbf)
  4391. opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
  4392. else
  4393. opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
  4394. break;
  4395. case InstrDual:
  4396. if ((ctxt->modrm >> 6) == 3)
  4397. opcode = opcode.u.idual->mod3;
  4398. else
  4399. opcode = opcode.u.idual->mod012;
  4400. break;
  4401. case ModeDual:
  4402. if (ctxt->mode == X86EMUL_MODE_PROT64)
  4403. opcode = opcode.u.mdual->mode64;
  4404. else
  4405. opcode = opcode.u.mdual->mode32;
  4406. break;
  4407. default:
  4408. return EMULATION_FAILED;
  4409. }
  4410. ctxt->d &= ~(u64)GroupMask;
  4411. ctxt->d |= opcode.flags;
  4412. }
  4413. /* Unrecognised? */
  4414. if (ctxt->d == 0)
  4415. return EMULATION_FAILED;
  4416. ctxt->execute = opcode.u.execute;
  4417. if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
  4418. return EMULATION_FAILED;
  4419. if (unlikely(ctxt->d &
  4420. (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
  4421. No16))) {
  4422. /*
  4423. * These are copied unconditionally here, and checked unconditionally
  4424. * in x86_emulate_insn.
  4425. */
  4426. ctxt->check_perm = opcode.check_perm;
  4427. ctxt->intercept = opcode.intercept;
  4428. if (ctxt->d & NotImpl)
  4429. return EMULATION_FAILED;
  4430. if (mode == X86EMUL_MODE_PROT64) {
  4431. if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
  4432. ctxt->op_bytes = 8;
  4433. else if (ctxt->d & NearBranch)
  4434. ctxt->op_bytes = 8;
  4435. }
  4436. if (ctxt->d & Op3264) {
  4437. if (mode == X86EMUL_MODE_PROT64)
  4438. ctxt->op_bytes = 8;
  4439. else
  4440. ctxt->op_bytes = 4;
  4441. }
  4442. if ((ctxt->d & No16) && ctxt->op_bytes == 2)
  4443. ctxt->op_bytes = 4;
  4444. if (ctxt->d & Sse)
  4445. ctxt->op_bytes = 16;
  4446. else if (ctxt->d & Mmx)
  4447. ctxt->op_bytes = 8;
  4448. }
  4449. /* ModRM and SIB bytes. */
  4450. if (ctxt->d & ModRM) {
  4451. rc = decode_modrm(ctxt, &ctxt->memop);
  4452. if (!has_seg_override) {
  4453. has_seg_override = true;
  4454. ctxt->seg_override = ctxt->modrm_seg;
  4455. }
  4456. } else if (ctxt->d & MemAbs)
  4457. rc = decode_abs(ctxt, &ctxt->memop);
  4458. if (rc != X86EMUL_CONTINUE)
  4459. goto done;
  4460. if (!has_seg_override)
  4461. ctxt->seg_override = VCPU_SREG_DS;
  4462. ctxt->memop.addr.mem.seg = ctxt->seg_override;
  4463. /*
  4464. * Decode and fetch the source operand: register, memory
  4465. * or immediate.
  4466. */
  4467. rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
  4468. if (rc != X86EMUL_CONTINUE)
  4469. goto done;
  4470. /*
  4471. * Decode and fetch the second source operand: register, memory
  4472. * or immediate.
  4473. */
  4474. rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
  4475. if (rc != X86EMUL_CONTINUE)
  4476. goto done;
  4477. /* Decode and fetch the destination operand: register or memory. */
  4478. rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
  4479. if (ctxt->rip_relative && likely(ctxt->memopp))
  4480. ctxt->memopp->addr.mem.ea = address_mask(ctxt,
  4481. ctxt->memopp->addr.mem.ea + ctxt->_eip);
  4482. done:
  4483. return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
  4484. }
  4485. bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
  4486. {
  4487. return ctxt->d & PageTable;
  4488. }
  4489. static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
  4490. {
  4491. /* The second termination condition only applies for REPE
  4492. * and REPNE. Test if the repeat string operation prefix is
  4493. * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
  4494. * corresponding termination condition according to:
  4495. * - if REPE/REPZ and ZF = 0 then done
  4496. * - if REPNE/REPNZ and ZF = 1 then done
  4497. */
  4498. if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
  4499. (ctxt->b == 0xae) || (ctxt->b == 0xaf))
  4500. && (((ctxt->rep_prefix == REPE_PREFIX) &&
  4501. ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
  4502. || ((ctxt->rep_prefix == REPNE_PREFIX) &&
  4503. ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
  4504. return true;
  4505. return false;
  4506. }
  4507. static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
  4508. {
  4509. int rc;
  4510. ctxt->ops->get_fpu(ctxt);
  4511. rc = asm_safe("fwait");
  4512. ctxt->ops->put_fpu(ctxt);
  4513. if (unlikely(rc != X86EMUL_CONTINUE))
  4514. return emulate_exception(ctxt, MF_VECTOR, 0, false);
  4515. return X86EMUL_CONTINUE;
  4516. }
  4517. static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
  4518. struct operand *op)
  4519. {
  4520. if (op->type == OP_MM)
  4521. read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
  4522. }
  4523. static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
  4524. {
  4525. register void *__sp asm(_ASM_SP);
  4526. ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
  4527. if (!(ctxt->d & ByteOp))
  4528. fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
  4529. asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
  4530. : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
  4531. [fastop]"+S"(fop), "+r"(__sp)
  4532. : "c"(ctxt->src2.val));
  4533. ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
  4534. if (!fop) /* exception is returned in fop variable */
  4535. return emulate_de(ctxt);
  4536. return X86EMUL_CONTINUE;
  4537. }
  4538. void init_decode_cache(struct x86_emulate_ctxt *ctxt)
  4539. {
  4540. memset(&ctxt->rip_relative, 0,
  4541. (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
  4542. ctxt->io_read.pos = 0;
  4543. ctxt->io_read.end = 0;
  4544. ctxt->mem_read.end = 0;
  4545. }
  4546. int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
  4547. {
  4548. const struct x86_emulate_ops *ops = ctxt->ops;
  4549. int rc = X86EMUL_CONTINUE;
  4550. int saved_dst_type = ctxt->dst.type;
  4551. ctxt->mem_read.pos = 0;
  4552. /* LOCK prefix is allowed only with some instructions */
  4553. if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
  4554. rc = emulate_ud(ctxt);
  4555. goto done;
  4556. }
  4557. if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
  4558. rc = emulate_ud(ctxt);
  4559. goto done;
  4560. }
  4561. if (unlikely(ctxt->d &
  4562. (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
  4563. if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
  4564. (ctxt->d & Undefined)) {
  4565. rc = emulate_ud(ctxt);
  4566. goto done;
  4567. }
  4568. if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
  4569. || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
  4570. rc = emulate_ud(ctxt);
  4571. goto done;
  4572. }
  4573. if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
  4574. rc = emulate_nm(ctxt);
  4575. goto done;
  4576. }
  4577. if (ctxt->d & Mmx) {
  4578. rc = flush_pending_x87_faults(ctxt);
  4579. if (rc != X86EMUL_CONTINUE)
  4580. goto done;
  4581. /*
  4582. * Now that we know the fpu is exception safe, we can fetch
  4583. * operands from it.
  4584. */
  4585. fetch_possible_mmx_operand(ctxt, &ctxt->src);
  4586. fetch_possible_mmx_operand(ctxt, &ctxt->src2);
  4587. if (!(ctxt->d & Mov))
  4588. fetch_possible_mmx_operand(ctxt, &ctxt->dst);
  4589. }
  4590. if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
  4591. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  4592. X86_ICPT_PRE_EXCEPT);
  4593. if (rc != X86EMUL_CONTINUE)
  4594. goto done;
  4595. }
  4596. /* Instruction can only be executed in protected mode */
  4597. if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
  4598. rc = emulate_ud(ctxt);
  4599. goto done;
  4600. }
  4601. /* Privileged instruction can be executed only in CPL=0 */
  4602. if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
  4603. if (ctxt->d & PrivUD)
  4604. rc = emulate_ud(ctxt);
  4605. else
  4606. rc = emulate_gp(ctxt, 0);
  4607. goto done;
  4608. }
  4609. /* Do instruction specific permission checks */
  4610. if (ctxt->d & CheckPerm) {
  4611. rc = ctxt->check_perm(ctxt);
  4612. if (rc != X86EMUL_CONTINUE)
  4613. goto done;
  4614. }
  4615. if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
  4616. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  4617. X86_ICPT_POST_EXCEPT);
  4618. if (rc != X86EMUL_CONTINUE)
  4619. goto done;
  4620. }
  4621. if (ctxt->rep_prefix && (ctxt->d & String)) {
  4622. /* All REP prefixes have the same first termination condition */
  4623. if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
  4624. string_registers_quirk(ctxt);
  4625. ctxt->eip = ctxt->_eip;
  4626. ctxt->eflags &= ~X86_EFLAGS_RF;
  4627. goto done;
  4628. }
  4629. }
  4630. }
  4631. if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
  4632. rc = segmented_read(ctxt, ctxt->src.addr.mem,
  4633. ctxt->src.valptr, ctxt->src.bytes);
  4634. if (rc != X86EMUL_CONTINUE)
  4635. goto done;
  4636. ctxt->src.orig_val64 = ctxt->src.val64;
  4637. }
  4638. if (ctxt->src2.type == OP_MEM) {
  4639. rc = segmented_read(ctxt, ctxt->src2.addr.mem,
  4640. &ctxt->src2.val, ctxt->src2.bytes);
  4641. if (rc != X86EMUL_CONTINUE)
  4642. goto done;
  4643. }
  4644. if ((ctxt->d & DstMask) == ImplicitOps)
  4645. goto special_insn;
  4646. if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
  4647. /* optimisation - avoid slow emulated read if Mov */
  4648. rc = segmented_read(ctxt, ctxt->dst.addr.mem,
  4649. &ctxt->dst.val, ctxt->dst.bytes);
  4650. if (rc != X86EMUL_CONTINUE) {
  4651. if (!(ctxt->d & NoWrite) &&
  4652. rc == X86EMUL_PROPAGATE_FAULT &&
  4653. ctxt->exception.vector == PF_VECTOR)
  4654. ctxt->exception.error_code |= PFERR_WRITE_MASK;
  4655. goto done;
  4656. }
  4657. }
  4658. /* Copy full 64-bit value for CMPXCHG8B. */
  4659. ctxt->dst.orig_val64 = ctxt->dst.val64;
  4660. special_insn:
  4661. if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
  4662. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  4663. X86_ICPT_POST_MEMACCESS);
  4664. if (rc != X86EMUL_CONTINUE)
  4665. goto done;
  4666. }
  4667. if (ctxt->rep_prefix && (ctxt->d & String))
  4668. ctxt->eflags |= X86_EFLAGS_RF;
  4669. else
  4670. ctxt->eflags &= ~X86_EFLAGS_RF;
  4671. if (ctxt->execute) {
  4672. if (ctxt->d & Fastop) {
  4673. void (*fop)(struct fastop *) = (void *)ctxt->execute;
  4674. rc = fastop(ctxt, fop);
  4675. if (rc != X86EMUL_CONTINUE)
  4676. goto done;
  4677. goto writeback;
  4678. }
  4679. rc = ctxt->execute(ctxt);
  4680. if (rc != X86EMUL_CONTINUE)
  4681. goto done;
  4682. goto writeback;
  4683. }
  4684. if (ctxt->opcode_len == 2)
  4685. goto twobyte_insn;
  4686. else if (ctxt->opcode_len == 3)
  4687. goto threebyte_insn;
  4688. switch (ctxt->b) {
  4689. case 0x70 ... 0x7f: /* jcc (short) */
  4690. if (test_cc(ctxt->b, ctxt->eflags))
  4691. rc = jmp_rel(ctxt, ctxt->src.val);
  4692. break;
  4693. case 0x8d: /* lea r16/r32, m */
  4694. ctxt->dst.val = ctxt->src.addr.mem.ea;
  4695. break;
  4696. case 0x90 ... 0x97: /* nop / xchg reg, rax */
  4697. if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
  4698. ctxt->dst.type = OP_NONE;
  4699. else
  4700. rc = em_xchg(ctxt);
  4701. break;
  4702. case 0x98: /* cbw/cwde/cdqe */
  4703. switch (ctxt->op_bytes) {
  4704. case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
  4705. case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
  4706. case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
  4707. }
  4708. break;
  4709. case 0xcc: /* int3 */
  4710. rc = emulate_int(ctxt, 3);
  4711. break;
  4712. case 0xcd: /* int n */
  4713. rc = emulate_int(ctxt, ctxt->src.val);
  4714. break;
  4715. case 0xce: /* into */
  4716. if (ctxt->eflags & X86_EFLAGS_OF)
  4717. rc = emulate_int(ctxt, 4);
  4718. break;
  4719. case 0xe9: /* jmp rel */
  4720. case 0xeb: /* jmp rel short */
  4721. rc = jmp_rel(ctxt, ctxt->src.val);
  4722. ctxt->dst.type = OP_NONE; /* Disable writeback. */
  4723. break;
  4724. case 0xf4: /* hlt */
  4725. ctxt->ops->halt(ctxt);
  4726. break;
  4727. case 0xf5: /* cmc */
  4728. /* complement carry flag from eflags reg */
  4729. ctxt->eflags ^= X86_EFLAGS_CF;
  4730. break;
  4731. case 0xf8: /* clc */
  4732. ctxt->eflags &= ~X86_EFLAGS_CF;
  4733. break;
  4734. case 0xf9: /* stc */
  4735. ctxt->eflags |= X86_EFLAGS_CF;
  4736. break;
  4737. case 0xfc: /* cld */
  4738. ctxt->eflags &= ~X86_EFLAGS_DF;
  4739. break;
  4740. case 0xfd: /* std */
  4741. ctxt->eflags |= X86_EFLAGS_DF;
  4742. break;
  4743. default:
  4744. goto cannot_emulate;
  4745. }
  4746. if (rc != X86EMUL_CONTINUE)
  4747. goto done;
  4748. writeback:
  4749. if (ctxt->d & SrcWrite) {
  4750. BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
  4751. rc = writeback(ctxt, &ctxt->src);
  4752. if (rc != X86EMUL_CONTINUE)
  4753. goto done;
  4754. }
  4755. if (!(ctxt->d & NoWrite)) {
  4756. rc = writeback(ctxt, &ctxt->dst);
  4757. if (rc != X86EMUL_CONTINUE)
  4758. goto done;
  4759. }
  4760. /*
  4761. * restore dst type in case the decoding will be reused
  4762. * (happens for string instruction )
  4763. */
  4764. ctxt->dst.type = saved_dst_type;
  4765. if ((ctxt->d & SrcMask) == SrcSI)
  4766. string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
  4767. if ((ctxt->d & DstMask) == DstDI)
  4768. string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
  4769. if (ctxt->rep_prefix && (ctxt->d & String)) {
  4770. unsigned int count;
  4771. struct read_cache *r = &ctxt->io_read;
  4772. if ((ctxt->d & SrcMask) == SrcSI)
  4773. count = ctxt->src.count;
  4774. else
  4775. count = ctxt->dst.count;
  4776. register_address_increment(ctxt, VCPU_REGS_RCX, -count);
  4777. if (!string_insn_completed(ctxt)) {
  4778. /*
  4779. * Re-enter guest when pio read ahead buffer is empty
  4780. * or, if it is not used, after each 1024 iteration.
  4781. */
  4782. if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
  4783. (r->end == 0 || r->end != r->pos)) {
  4784. /*
  4785. * Reset read cache. Usually happens before
  4786. * decode, but since instruction is restarted
  4787. * we have to do it here.
  4788. */
  4789. ctxt->mem_read.end = 0;
  4790. writeback_registers(ctxt);
  4791. return EMULATION_RESTART;
  4792. }
  4793. goto done; /* skip rip writeback */
  4794. }
  4795. ctxt->eflags &= ~X86_EFLAGS_RF;
  4796. }
  4797. ctxt->eip = ctxt->_eip;
  4798. done:
  4799. if (rc == X86EMUL_PROPAGATE_FAULT) {
  4800. WARN_ON(ctxt->exception.vector > 0x1f);
  4801. ctxt->have_exception = true;
  4802. }
  4803. if (rc == X86EMUL_INTERCEPTED)
  4804. return EMULATION_INTERCEPTED;
  4805. if (rc == X86EMUL_CONTINUE)
  4806. writeback_registers(ctxt);
  4807. return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
  4808. twobyte_insn:
  4809. switch (ctxt->b) {
  4810. case 0x09: /* wbinvd */
  4811. (ctxt->ops->wbinvd)(ctxt);
  4812. break;
  4813. case 0x08: /* invd */
  4814. case 0x0d: /* GrpP (prefetch) */
  4815. case 0x18: /* Grp16 (prefetch/nop) */
  4816. case 0x1f: /* nop */
  4817. break;
  4818. case 0x20: /* mov cr, reg */
  4819. ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
  4820. break;
  4821. case 0x21: /* mov from dr to reg */
  4822. ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
  4823. break;
  4824. case 0x40 ... 0x4f: /* cmov */
  4825. if (test_cc(ctxt->b, ctxt->eflags))
  4826. ctxt->dst.val = ctxt->src.val;
  4827. else if (ctxt->op_bytes != 4)
  4828. ctxt->dst.type = OP_NONE; /* no writeback */
  4829. break;
  4830. case 0x80 ... 0x8f: /* jnz rel, etc*/
  4831. if (test_cc(ctxt->b, ctxt->eflags))
  4832. rc = jmp_rel(ctxt, ctxt->src.val);
  4833. break;
  4834. case 0x90 ... 0x9f: /* setcc r/m8 */
  4835. ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
  4836. break;
  4837. case 0xb6 ... 0xb7: /* movzx */
  4838. ctxt->dst.bytes = ctxt->op_bytes;
  4839. ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
  4840. : (u16) ctxt->src.val;
  4841. break;
  4842. case 0xbe ... 0xbf: /* movsx */
  4843. ctxt->dst.bytes = ctxt->op_bytes;
  4844. ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
  4845. (s16) ctxt->src.val;
  4846. break;
  4847. default:
  4848. goto cannot_emulate;
  4849. }
  4850. threebyte_insn:
  4851. if (rc != X86EMUL_CONTINUE)
  4852. goto done;
  4853. goto writeback;
  4854. cannot_emulate:
  4855. return EMULATION_FAILED;
  4856. }
  4857. void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
  4858. {
  4859. invalidate_registers(ctxt);
  4860. }
  4861. void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
  4862. {
  4863. writeback_registers(ctxt);
  4864. }