x86.c 96 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. *
  8. * Authors:
  9. * Avi Kivity <avi@qumranet.com>
  10. * Yaniv Kamay <yaniv@qumranet.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. */
  16. #include <linux/kvm_host.h>
  17. #include "irq.h"
  18. #include "mmu.h"
  19. #include "i8254.h"
  20. #include "tss.h"
  21. #include <linux/clocksource.h>
  22. #include <linux/kvm.h>
  23. #include <linux/fs.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/module.h>
  26. #include <linux/mman.h>
  27. #include <linux/highmem.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/msr.h>
  30. #include <asm/desc.h>
  31. #define MAX_IO_MSRS 256
  32. #define CR0_RESERVED_BITS \
  33. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  34. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  35. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  36. #define CR4_RESERVED_BITS \
  37. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  38. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  39. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  40. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  41. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  42. /* EFER defaults:
  43. * - enable syscall per default because its emulated by KVM
  44. * - enable LME and LMA per default on 64 bit KVM
  45. */
  46. #ifdef CONFIG_X86_64
  47. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  48. #else
  49. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  50. #endif
  51. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  52. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  53. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  54. struct kvm_cpuid_entry2 __user *entries);
  55. struct kvm_x86_ops *kvm_x86_ops;
  56. struct kvm_stats_debugfs_item debugfs_entries[] = {
  57. { "pf_fixed", VCPU_STAT(pf_fixed) },
  58. { "pf_guest", VCPU_STAT(pf_guest) },
  59. { "tlb_flush", VCPU_STAT(tlb_flush) },
  60. { "invlpg", VCPU_STAT(invlpg) },
  61. { "exits", VCPU_STAT(exits) },
  62. { "io_exits", VCPU_STAT(io_exits) },
  63. { "mmio_exits", VCPU_STAT(mmio_exits) },
  64. { "signal_exits", VCPU_STAT(signal_exits) },
  65. { "irq_window", VCPU_STAT(irq_window_exits) },
  66. { "halt_exits", VCPU_STAT(halt_exits) },
  67. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  68. { "hypercalls", VCPU_STAT(hypercalls) },
  69. { "request_irq", VCPU_STAT(request_irq_exits) },
  70. { "irq_exits", VCPU_STAT(irq_exits) },
  71. { "host_state_reload", VCPU_STAT(host_state_reload) },
  72. { "efer_reload", VCPU_STAT(efer_reload) },
  73. { "fpu_reload", VCPU_STAT(fpu_reload) },
  74. { "insn_emulation", VCPU_STAT(insn_emulation) },
  75. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  76. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  77. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  78. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  79. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  80. { "mmu_flooded", VM_STAT(mmu_flooded) },
  81. { "mmu_recycled", VM_STAT(mmu_recycled) },
  82. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  83. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  84. { "largepages", VM_STAT(lpages) },
  85. { NULL }
  86. };
  87. unsigned long segment_base(u16 selector)
  88. {
  89. struct descriptor_table gdt;
  90. struct desc_struct *d;
  91. unsigned long table_base;
  92. unsigned long v;
  93. if (selector == 0)
  94. return 0;
  95. asm("sgdt %0" : "=m"(gdt));
  96. table_base = gdt.base;
  97. if (selector & 4) { /* from ldt */
  98. u16 ldt_selector;
  99. asm("sldt %0" : "=g"(ldt_selector));
  100. table_base = segment_base(ldt_selector);
  101. }
  102. d = (struct desc_struct *)(table_base + (selector & ~7));
  103. v = d->base0 | ((unsigned long)d->base1 << 16) |
  104. ((unsigned long)d->base2 << 24);
  105. #ifdef CONFIG_X86_64
  106. if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  107. v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
  108. #endif
  109. return v;
  110. }
  111. EXPORT_SYMBOL_GPL(segment_base);
  112. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  113. {
  114. if (irqchip_in_kernel(vcpu->kvm))
  115. return vcpu->arch.apic_base;
  116. else
  117. return vcpu->arch.apic_base;
  118. }
  119. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  120. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  121. {
  122. /* TODO: reserve bits check */
  123. if (irqchip_in_kernel(vcpu->kvm))
  124. kvm_lapic_set_base(vcpu, data);
  125. else
  126. vcpu->arch.apic_base = data;
  127. }
  128. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  129. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  130. {
  131. WARN_ON(vcpu->arch.exception.pending);
  132. vcpu->arch.exception.pending = true;
  133. vcpu->arch.exception.has_error_code = false;
  134. vcpu->arch.exception.nr = nr;
  135. }
  136. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  137. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  138. u32 error_code)
  139. {
  140. ++vcpu->stat.pf_guest;
  141. if (vcpu->arch.exception.pending) {
  142. if (vcpu->arch.exception.nr == PF_VECTOR) {
  143. printk(KERN_DEBUG "kvm: inject_page_fault:"
  144. " double fault 0x%lx\n", addr);
  145. vcpu->arch.exception.nr = DF_VECTOR;
  146. vcpu->arch.exception.error_code = 0;
  147. } else if (vcpu->arch.exception.nr == DF_VECTOR) {
  148. /* triple fault -> shutdown */
  149. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  150. }
  151. return;
  152. }
  153. vcpu->arch.cr2 = addr;
  154. kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  155. }
  156. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  157. {
  158. WARN_ON(vcpu->arch.exception.pending);
  159. vcpu->arch.exception.pending = true;
  160. vcpu->arch.exception.has_error_code = true;
  161. vcpu->arch.exception.nr = nr;
  162. vcpu->arch.exception.error_code = error_code;
  163. }
  164. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  165. static void __queue_exception(struct kvm_vcpu *vcpu)
  166. {
  167. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  168. vcpu->arch.exception.has_error_code,
  169. vcpu->arch.exception.error_code);
  170. }
  171. /*
  172. * Load the pae pdptrs. Return true is they are all valid.
  173. */
  174. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  175. {
  176. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  177. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  178. int i;
  179. int ret;
  180. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  181. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  182. offset * sizeof(u64), sizeof(pdpte));
  183. if (ret < 0) {
  184. ret = 0;
  185. goto out;
  186. }
  187. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  188. if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
  189. ret = 0;
  190. goto out;
  191. }
  192. }
  193. ret = 1;
  194. memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
  195. out:
  196. return ret;
  197. }
  198. EXPORT_SYMBOL_GPL(load_pdptrs);
  199. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  200. {
  201. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  202. bool changed = true;
  203. int r;
  204. if (is_long_mode(vcpu) || !is_pae(vcpu))
  205. return false;
  206. r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
  207. if (r < 0)
  208. goto out;
  209. changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
  210. out:
  211. return changed;
  212. }
  213. void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  214. {
  215. if (cr0 & CR0_RESERVED_BITS) {
  216. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  217. cr0, vcpu->arch.cr0);
  218. kvm_inject_gp(vcpu, 0);
  219. return;
  220. }
  221. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  222. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  223. kvm_inject_gp(vcpu, 0);
  224. return;
  225. }
  226. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  227. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  228. "and a clear PE flag\n");
  229. kvm_inject_gp(vcpu, 0);
  230. return;
  231. }
  232. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  233. #ifdef CONFIG_X86_64
  234. if ((vcpu->arch.shadow_efer & EFER_LME)) {
  235. int cs_db, cs_l;
  236. if (!is_pae(vcpu)) {
  237. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  238. "in long mode while PAE is disabled\n");
  239. kvm_inject_gp(vcpu, 0);
  240. return;
  241. }
  242. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  243. if (cs_l) {
  244. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  245. "in long mode while CS.L == 1\n");
  246. kvm_inject_gp(vcpu, 0);
  247. return;
  248. }
  249. } else
  250. #endif
  251. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  252. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  253. "reserved bits\n");
  254. kvm_inject_gp(vcpu, 0);
  255. return;
  256. }
  257. }
  258. kvm_x86_ops->set_cr0(vcpu, cr0);
  259. vcpu->arch.cr0 = cr0;
  260. kvm_mmu_reset_context(vcpu);
  261. return;
  262. }
  263. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  264. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  265. {
  266. kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
  267. KVMTRACE_1D(LMSW, vcpu,
  268. (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
  269. handler);
  270. }
  271. EXPORT_SYMBOL_GPL(kvm_lmsw);
  272. void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  273. {
  274. if (cr4 & CR4_RESERVED_BITS) {
  275. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  276. kvm_inject_gp(vcpu, 0);
  277. return;
  278. }
  279. if (is_long_mode(vcpu)) {
  280. if (!(cr4 & X86_CR4_PAE)) {
  281. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  282. "in long mode\n");
  283. kvm_inject_gp(vcpu, 0);
  284. return;
  285. }
  286. } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
  287. && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  288. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  289. kvm_inject_gp(vcpu, 0);
  290. return;
  291. }
  292. if (cr4 & X86_CR4_VMXE) {
  293. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  294. kvm_inject_gp(vcpu, 0);
  295. return;
  296. }
  297. kvm_x86_ops->set_cr4(vcpu, cr4);
  298. vcpu->arch.cr4 = cr4;
  299. kvm_mmu_reset_context(vcpu);
  300. }
  301. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  302. void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  303. {
  304. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  305. kvm_mmu_flush_tlb(vcpu);
  306. return;
  307. }
  308. if (is_long_mode(vcpu)) {
  309. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  310. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  311. kvm_inject_gp(vcpu, 0);
  312. return;
  313. }
  314. } else {
  315. if (is_pae(vcpu)) {
  316. if (cr3 & CR3_PAE_RESERVED_BITS) {
  317. printk(KERN_DEBUG
  318. "set_cr3: #GP, reserved bits\n");
  319. kvm_inject_gp(vcpu, 0);
  320. return;
  321. }
  322. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  323. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  324. "reserved bits\n");
  325. kvm_inject_gp(vcpu, 0);
  326. return;
  327. }
  328. }
  329. /*
  330. * We don't check reserved bits in nonpae mode, because
  331. * this isn't enforced, and VMware depends on this.
  332. */
  333. }
  334. /*
  335. * Does the new cr3 value map to physical memory? (Note, we
  336. * catch an invalid cr3 even in real-mode, because it would
  337. * cause trouble later on when we turn on paging anyway.)
  338. *
  339. * A real CPU would silently accept an invalid cr3 and would
  340. * attempt to use it - with largely undefined (and often hard
  341. * to debug) behavior on the guest side.
  342. */
  343. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  344. kvm_inject_gp(vcpu, 0);
  345. else {
  346. vcpu->arch.cr3 = cr3;
  347. vcpu->arch.mmu.new_cr3(vcpu);
  348. }
  349. }
  350. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  351. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  352. {
  353. if (cr8 & CR8_RESERVED_BITS) {
  354. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  355. kvm_inject_gp(vcpu, 0);
  356. return;
  357. }
  358. if (irqchip_in_kernel(vcpu->kvm))
  359. kvm_lapic_set_tpr(vcpu, cr8);
  360. else
  361. vcpu->arch.cr8 = cr8;
  362. }
  363. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  364. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  365. {
  366. if (irqchip_in_kernel(vcpu->kvm))
  367. return kvm_lapic_get_cr8(vcpu);
  368. else
  369. return vcpu->arch.cr8;
  370. }
  371. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  372. /*
  373. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  374. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  375. *
  376. * This list is modified at module load time to reflect the
  377. * capabilities of the host cpu.
  378. */
  379. static u32 msrs_to_save[] = {
  380. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  381. MSR_K6_STAR,
  382. #ifdef CONFIG_X86_64
  383. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  384. #endif
  385. MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  386. MSR_IA32_PERF_STATUS,
  387. };
  388. static unsigned num_msrs_to_save;
  389. static u32 emulated_msrs[] = {
  390. MSR_IA32_MISC_ENABLE,
  391. };
  392. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  393. {
  394. if (efer & efer_reserved_bits) {
  395. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  396. efer);
  397. kvm_inject_gp(vcpu, 0);
  398. return;
  399. }
  400. if (is_paging(vcpu)
  401. && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  402. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  403. kvm_inject_gp(vcpu, 0);
  404. return;
  405. }
  406. kvm_x86_ops->set_efer(vcpu, efer);
  407. efer &= ~EFER_LMA;
  408. efer |= vcpu->arch.shadow_efer & EFER_LMA;
  409. vcpu->arch.shadow_efer = efer;
  410. }
  411. void kvm_enable_efer_bits(u64 mask)
  412. {
  413. efer_reserved_bits &= ~mask;
  414. }
  415. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  416. /*
  417. * Writes msr value into into the appropriate "register".
  418. * Returns 0 on success, non-0 otherwise.
  419. * Assumes vcpu_load() was already called.
  420. */
  421. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  422. {
  423. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  424. }
  425. /*
  426. * Adapt set_msr() to msr_io()'s calling convention
  427. */
  428. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  429. {
  430. return kvm_set_msr(vcpu, index, *data);
  431. }
  432. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  433. {
  434. static int version;
  435. struct pvclock_wall_clock wc;
  436. struct timespec now, sys, boot;
  437. if (!wall_clock)
  438. return;
  439. version++;
  440. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  441. /*
  442. * The guest calculates current wall clock time by adding
  443. * system time (updated by kvm_write_guest_time below) to the
  444. * wall clock specified here. guest system time equals host
  445. * system time for us, thus we must fill in host boot time here.
  446. */
  447. now = current_kernel_time();
  448. ktime_get_ts(&sys);
  449. boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
  450. wc.sec = boot.tv_sec;
  451. wc.nsec = boot.tv_nsec;
  452. wc.version = version;
  453. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  454. version++;
  455. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  456. }
  457. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  458. {
  459. uint32_t quotient, remainder;
  460. /* Don't try to replace with do_div(), this one calculates
  461. * "(dividend << 32) / divisor" */
  462. __asm__ ( "divl %4"
  463. : "=a" (quotient), "=d" (remainder)
  464. : "0" (0), "1" (dividend), "r" (divisor) );
  465. return quotient;
  466. }
  467. static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
  468. {
  469. uint64_t nsecs = 1000000000LL;
  470. int32_t shift = 0;
  471. uint64_t tps64;
  472. uint32_t tps32;
  473. tps64 = tsc_khz * 1000LL;
  474. while (tps64 > nsecs*2) {
  475. tps64 >>= 1;
  476. shift--;
  477. }
  478. tps32 = (uint32_t)tps64;
  479. while (tps32 <= (uint32_t)nsecs) {
  480. tps32 <<= 1;
  481. shift++;
  482. }
  483. hv_clock->tsc_shift = shift;
  484. hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
  485. pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
  486. __FUNCTION__, tsc_khz, hv_clock->tsc_shift,
  487. hv_clock->tsc_to_system_mul);
  488. }
  489. static void kvm_write_guest_time(struct kvm_vcpu *v)
  490. {
  491. struct timespec ts;
  492. unsigned long flags;
  493. struct kvm_vcpu_arch *vcpu = &v->arch;
  494. void *shared_kaddr;
  495. if ((!vcpu->time_page))
  496. return;
  497. if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
  498. kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
  499. vcpu->hv_clock_tsc_khz = tsc_khz;
  500. }
  501. /* Keep irq disabled to prevent changes to the clock */
  502. local_irq_save(flags);
  503. kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
  504. &vcpu->hv_clock.tsc_timestamp);
  505. ktime_get_ts(&ts);
  506. local_irq_restore(flags);
  507. /* With all the info we got, fill in the values */
  508. vcpu->hv_clock.system_time = ts.tv_nsec +
  509. (NSEC_PER_SEC * (u64)ts.tv_sec);
  510. /*
  511. * The interface expects us to write an even number signaling that the
  512. * update is finished. Since the guest won't see the intermediate
  513. * state, we just increase by 2 at the end.
  514. */
  515. vcpu->hv_clock.version += 2;
  516. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  517. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  518. sizeof(vcpu->hv_clock));
  519. kunmap_atomic(shared_kaddr, KM_USER0);
  520. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  521. }
  522. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  523. {
  524. switch (msr) {
  525. case MSR_EFER:
  526. set_efer(vcpu, data);
  527. break;
  528. case MSR_IA32_MC0_STATUS:
  529. pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  530. __func__, data);
  531. break;
  532. case MSR_IA32_MCG_STATUS:
  533. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
  534. __func__, data);
  535. break;
  536. case MSR_IA32_MCG_CTL:
  537. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
  538. __func__, data);
  539. break;
  540. case MSR_IA32_UCODE_REV:
  541. case MSR_IA32_UCODE_WRITE:
  542. case 0x200 ... 0x2ff: /* MTRRs */
  543. break;
  544. case MSR_IA32_APICBASE:
  545. kvm_set_apic_base(vcpu, data);
  546. break;
  547. case MSR_IA32_MISC_ENABLE:
  548. vcpu->arch.ia32_misc_enable_msr = data;
  549. break;
  550. case MSR_KVM_WALL_CLOCK:
  551. vcpu->kvm->arch.wall_clock = data;
  552. kvm_write_wall_clock(vcpu->kvm, data);
  553. break;
  554. case MSR_KVM_SYSTEM_TIME: {
  555. if (vcpu->arch.time_page) {
  556. kvm_release_page_dirty(vcpu->arch.time_page);
  557. vcpu->arch.time_page = NULL;
  558. }
  559. vcpu->arch.time = data;
  560. /* we verify if the enable bit is set... */
  561. if (!(data & 1))
  562. break;
  563. /* ...but clean it before doing the actual write */
  564. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  565. down_read(&current->mm->mmap_sem);
  566. vcpu->arch.time_page =
  567. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  568. up_read(&current->mm->mmap_sem);
  569. if (is_error_page(vcpu->arch.time_page)) {
  570. kvm_release_page_clean(vcpu->arch.time_page);
  571. vcpu->arch.time_page = NULL;
  572. }
  573. kvm_write_guest_time(vcpu);
  574. break;
  575. }
  576. default:
  577. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
  578. return 1;
  579. }
  580. return 0;
  581. }
  582. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  583. /*
  584. * Reads an msr value (of 'msr_index') into 'pdata'.
  585. * Returns 0 on success, non-0 otherwise.
  586. * Assumes vcpu_load() was already called.
  587. */
  588. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  589. {
  590. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  591. }
  592. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  593. {
  594. u64 data;
  595. switch (msr) {
  596. case 0xc0010010: /* SYSCFG */
  597. case 0xc0010015: /* HWCR */
  598. case MSR_IA32_PLATFORM_ID:
  599. case MSR_IA32_P5_MC_ADDR:
  600. case MSR_IA32_P5_MC_TYPE:
  601. case MSR_IA32_MC0_CTL:
  602. case MSR_IA32_MCG_STATUS:
  603. case MSR_IA32_MCG_CAP:
  604. case MSR_IA32_MCG_CTL:
  605. case MSR_IA32_MC0_MISC:
  606. case MSR_IA32_MC0_MISC+4:
  607. case MSR_IA32_MC0_MISC+8:
  608. case MSR_IA32_MC0_MISC+12:
  609. case MSR_IA32_MC0_MISC+16:
  610. case MSR_IA32_UCODE_REV:
  611. case MSR_IA32_EBL_CR_POWERON:
  612. /* MTRR registers */
  613. case 0xfe:
  614. case 0x200 ... 0x2ff:
  615. data = 0;
  616. break;
  617. case 0xcd: /* fsb frequency */
  618. data = 3;
  619. break;
  620. case MSR_IA32_APICBASE:
  621. data = kvm_get_apic_base(vcpu);
  622. break;
  623. case MSR_IA32_MISC_ENABLE:
  624. data = vcpu->arch.ia32_misc_enable_msr;
  625. break;
  626. case MSR_IA32_PERF_STATUS:
  627. /* TSC increment by tick */
  628. data = 1000ULL;
  629. /* CPU multiplier */
  630. data |= (((uint64_t)4ULL) << 40);
  631. break;
  632. case MSR_EFER:
  633. data = vcpu->arch.shadow_efer;
  634. break;
  635. case MSR_KVM_WALL_CLOCK:
  636. data = vcpu->kvm->arch.wall_clock;
  637. break;
  638. case MSR_KVM_SYSTEM_TIME:
  639. data = vcpu->arch.time;
  640. break;
  641. default:
  642. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  643. return 1;
  644. }
  645. *pdata = data;
  646. return 0;
  647. }
  648. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  649. /*
  650. * Read or write a bunch of msrs. All parameters are kernel addresses.
  651. *
  652. * @return number of msrs set successfully.
  653. */
  654. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  655. struct kvm_msr_entry *entries,
  656. int (*do_msr)(struct kvm_vcpu *vcpu,
  657. unsigned index, u64 *data))
  658. {
  659. int i;
  660. vcpu_load(vcpu);
  661. down_read(&vcpu->kvm->slots_lock);
  662. for (i = 0; i < msrs->nmsrs; ++i)
  663. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  664. break;
  665. up_read(&vcpu->kvm->slots_lock);
  666. vcpu_put(vcpu);
  667. return i;
  668. }
  669. /*
  670. * Read or write a bunch of msrs. Parameters are user addresses.
  671. *
  672. * @return number of msrs set successfully.
  673. */
  674. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  675. int (*do_msr)(struct kvm_vcpu *vcpu,
  676. unsigned index, u64 *data),
  677. int writeback)
  678. {
  679. struct kvm_msrs msrs;
  680. struct kvm_msr_entry *entries;
  681. int r, n;
  682. unsigned size;
  683. r = -EFAULT;
  684. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  685. goto out;
  686. r = -E2BIG;
  687. if (msrs.nmsrs >= MAX_IO_MSRS)
  688. goto out;
  689. r = -ENOMEM;
  690. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  691. entries = vmalloc(size);
  692. if (!entries)
  693. goto out;
  694. r = -EFAULT;
  695. if (copy_from_user(entries, user_msrs->entries, size))
  696. goto out_free;
  697. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  698. if (r < 0)
  699. goto out_free;
  700. r = -EFAULT;
  701. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  702. goto out_free;
  703. r = n;
  704. out_free:
  705. vfree(entries);
  706. out:
  707. return r;
  708. }
  709. /*
  710. * Make sure that a cpu that is being hot-unplugged does not have any vcpus
  711. * cached on it.
  712. */
  713. void decache_vcpus_on_cpu(int cpu)
  714. {
  715. }
  716. int kvm_dev_ioctl_check_extension(long ext)
  717. {
  718. int r;
  719. switch (ext) {
  720. case KVM_CAP_IRQCHIP:
  721. case KVM_CAP_HLT:
  722. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  723. case KVM_CAP_USER_MEMORY:
  724. case KVM_CAP_SET_TSS_ADDR:
  725. case KVM_CAP_EXT_CPUID:
  726. case KVM_CAP_CLOCKSOURCE:
  727. case KVM_CAP_PIT:
  728. case KVM_CAP_NOP_IO_DELAY:
  729. case KVM_CAP_MP_STATE:
  730. r = 1;
  731. break;
  732. case KVM_CAP_VAPIC:
  733. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  734. break;
  735. case KVM_CAP_NR_VCPUS:
  736. r = KVM_MAX_VCPUS;
  737. break;
  738. case KVM_CAP_NR_MEMSLOTS:
  739. r = KVM_MEMORY_SLOTS;
  740. break;
  741. case KVM_CAP_PV_MMU:
  742. r = !tdp_enabled;
  743. break;
  744. default:
  745. r = 0;
  746. break;
  747. }
  748. return r;
  749. }
  750. long kvm_arch_dev_ioctl(struct file *filp,
  751. unsigned int ioctl, unsigned long arg)
  752. {
  753. void __user *argp = (void __user *)arg;
  754. long r;
  755. switch (ioctl) {
  756. case KVM_GET_MSR_INDEX_LIST: {
  757. struct kvm_msr_list __user *user_msr_list = argp;
  758. struct kvm_msr_list msr_list;
  759. unsigned n;
  760. r = -EFAULT;
  761. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  762. goto out;
  763. n = msr_list.nmsrs;
  764. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  765. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  766. goto out;
  767. r = -E2BIG;
  768. if (n < num_msrs_to_save)
  769. goto out;
  770. r = -EFAULT;
  771. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  772. num_msrs_to_save * sizeof(u32)))
  773. goto out;
  774. if (copy_to_user(user_msr_list->indices
  775. + num_msrs_to_save * sizeof(u32),
  776. &emulated_msrs,
  777. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  778. goto out;
  779. r = 0;
  780. break;
  781. }
  782. case KVM_GET_SUPPORTED_CPUID: {
  783. struct kvm_cpuid2 __user *cpuid_arg = argp;
  784. struct kvm_cpuid2 cpuid;
  785. r = -EFAULT;
  786. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  787. goto out;
  788. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  789. cpuid_arg->entries);
  790. if (r)
  791. goto out;
  792. r = -EFAULT;
  793. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  794. goto out;
  795. r = 0;
  796. break;
  797. }
  798. default:
  799. r = -EINVAL;
  800. }
  801. out:
  802. return r;
  803. }
  804. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  805. {
  806. kvm_x86_ops->vcpu_load(vcpu, cpu);
  807. kvm_write_guest_time(vcpu);
  808. }
  809. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  810. {
  811. kvm_x86_ops->vcpu_put(vcpu);
  812. kvm_put_guest_fpu(vcpu);
  813. }
  814. static int is_efer_nx(void)
  815. {
  816. u64 efer;
  817. rdmsrl(MSR_EFER, efer);
  818. return efer & EFER_NX;
  819. }
  820. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  821. {
  822. int i;
  823. struct kvm_cpuid_entry2 *e, *entry;
  824. entry = NULL;
  825. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  826. e = &vcpu->arch.cpuid_entries[i];
  827. if (e->function == 0x80000001) {
  828. entry = e;
  829. break;
  830. }
  831. }
  832. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  833. entry->edx &= ~(1 << 20);
  834. printk(KERN_INFO "kvm: guest NX capability removed\n");
  835. }
  836. }
  837. /* when an old userspace process fills a new kernel module */
  838. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  839. struct kvm_cpuid *cpuid,
  840. struct kvm_cpuid_entry __user *entries)
  841. {
  842. int r, i;
  843. struct kvm_cpuid_entry *cpuid_entries;
  844. r = -E2BIG;
  845. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  846. goto out;
  847. r = -ENOMEM;
  848. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  849. if (!cpuid_entries)
  850. goto out;
  851. r = -EFAULT;
  852. if (copy_from_user(cpuid_entries, entries,
  853. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  854. goto out_free;
  855. for (i = 0; i < cpuid->nent; i++) {
  856. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  857. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  858. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  859. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  860. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  861. vcpu->arch.cpuid_entries[i].index = 0;
  862. vcpu->arch.cpuid_entries[i].flags = 0;
  863. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  864. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  865. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  866. }
  867. vcpu->arch.cpuid_nent = cpuid->nent;
  868. cpuid_fix_nx_cap(vcpu);
  869. r = 0;
  870. out_free:
  871. vfree(cpuid_entries);
  872. out:
  873. return r;
  874. }
  875. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  876. struct kvm_cpuid2 *cpuid,
  877. struct kvm_cpuid_entry2 __user *entries)
  878. {
  879. int r;
  880. r = -E2BIG;
  881. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  882. goto out;
  883. r = -EFAULT;
  884. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  885. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  886. goto out;
  887. vcpu->arch.cpuid_nent = cpuid->nent;
  888. return 0;
  889. out:
  890. return r;
  891. }
  892. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  893. struct kvm_cpuid2 *cpuid,
  894. struct kvm_cpuid_entry2 __user *entries)
  895. {
  896. int r;
  897. r = -E2BIG;
  898. if (cpuid->nent < vcpu->arch.cpuid_nent)
  899. goto out;
  900. r = -EFAULT;
  901. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  902. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  903. goto out;
  904. return 0;
  905. out:
  906. cpuid->nent = vcpu->arch.cpuid_nent;
  907. return r;
  908. }
  909. static inline u32 bit(int bitno)
  910. {
  911. return 1 << (bitno & 31);
  912. }
  913. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  914. u32 index)
  915. {
  916. entry->function = function;
  917. entry->index = index;
  918. cpuid_count(entry->function, entry->index,
  919. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  920. entry->flags = 0;
  921. }
  922. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  923. u32 index, int *nent, int maxnent)
  924. {
  925. const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
  926. bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
  927. bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
  928. bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
  929. bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
  930. bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
  931. bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
  932. bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
  933. bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
  934. bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
  935. const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
  936. bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
  937. bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
  938. bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
  939. bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
  940. bit(X86_FEATURE_PGE) |
  941. bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
  942. bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
  943. bit(X86_FEATURE_SYSCALL) |
  944. (bit(X86_FEATURE_NX) && is_efer_nx()) |
  945. #ifdef CONFIG_X86_64
  946. bit(X86_FEATURE_LM) |
  947. #endif
  948. bit(X86_FEATURE_MMXEXT) |
  949. bit(X86_FEATURE_3DNOWEXT) |
  950. bit(X86_FEATURE_3DNOW);
  951. const u32 kvm_supported_word3_x86_features =
  952. bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
  953. const u32 kvm_supported_word6_x86_features =
  954. bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
  955. /* all func 2 cpuid_count() should be called on the same cpu */
  956. get_cpu();
  957. do_cpuid_1_ent(entry, function, index);
  958. ++*nent;
  959. switch (function) {
  960. case 0:
  961. entry->eax = min(entry->eax, (u32)0xb);
  962. break;
  963. case 1:
  964. entry->edx &= kvm_supported_word0_x86_features;
  965. entry->ecx &= kvm_supported_word3_x86_features;
  966. break;
  967. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  968. * may return different values. This forces us to get_cpu() before
  969. * issuing the first command, and also to emulate this annoying behavior
  970. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  971. case 2: {
  972. int t, times = entry->eax & 0xff;
  973. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  974. for (t = 1; t < times && *nent < maxnent; ++t) {
  975. do_cpuid_1_ent(&entry[t], function, 0);
  976. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  977. ++*nent;
  978. }
  979. break;
  980. }
  981. /* function 4 and 0xb have additional index. */
  982. case 4: {
  983. int i, cache_type;
  984. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  985. /* read more entries until cache_type is zero */
  986. for (i = 1; *nent < maxnent; ++i) {
  987. cache_type = entry[i - 1].eax & 0x1f;
  988. if (!cache_type)
  989. break;
  990. do_cpuid_1_ent(&entry[i], function, i);
  991. entry[i].flags |=
  992. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  993. ++*nent;
  994. }
  995. break;
  996. }
  997. case 0xb: {
  998. int i, level_type;
  999. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1000. /* read more entries until level_type is zero */
  1001. for (i = 1; *nent < maxnent; ++i) {
  1002. level_type = entry[i - 1].ecx & 0xff;
  1003. if (!level_type)
  1004. break;
  1005. do_cpuid_1_ent(&entry[i], function, i);
  1006. entry[i].flags |=
  1007. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1008. ++*nent;
  1009. }
  1010. break;
  1011. }
  1012. case 0x80000000:
  1013. entry->eax = min(entry->eax, 0x8000001a);
  1014. break;
  1015. case 0x80000001:
  1016. entry->edx &= kvm_supported_word1_x86_features;
  1017. entry->ecx &= kvm_supported_word6_x86_features;
  1018. break;
  1019. }
  1020. put_cpu();
  1021. }
  1022. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  1023. struct kvm_cpuid_entry2 __user *entries)
  1024. {
  1025. struct kvm_cpuid_entry2 *cpuid_entries;
  1026. int limit, nent = 0, r = -E2BIG;
  1027. u32 func;
  1028. if (cpuid->nent < 1)
  1029. goto out;
  1030. r = -ENOMEM;
  1031. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  1032. if (!cpuid_entries)
  1033. goto out;
  1034. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  1035. limit = cpuid_entries[0].eax;
  1036. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  1037. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1038. &nent, cpuid->nent);
  1039. r = -E2BIG;
  1040. if (nent >= cpuid->nent)
  1041. goto out_free;
  1042. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  1043. limit = cpuid_entries[nent - 1].eax;
  1044. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  1045. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1046. &nent, cpuid->nent);
  1047. r = -EFAULT;
  1048. if (copy_to_user(entries, cpuid_entries,
  1049. nent * sizeof(struct kvm_cpuid_entry2)))
  1050. goto out_free;
  1051. cpuid->nent = nent;
  1052. r = 0;
  1053. out_free:
  1054. vfree(cpuid_entries);
  1055. out:
  1056. return r;
  1057. }
  1058. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  1059. struct kvm_lapic_state *s)
  1060. {
  1061. vcpu_load(vcpu);
  1062. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  1063. vcpu_put(vcpu);
  1064. return 0;
  1065. }
  1066. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  1067. struct kvm_lapic_state *s)
  1068. {
  1069. vcpu_load(vcpu);
  1070. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  1071. kvm_apic_post_state_restore(vcpu);
  1072. vcpu_put(vcpu);
  1073. return 0;
  1074. }
  1075. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1076. struct kvm_interrupt *irq)
  1077. {
  1078. if (irq->irq < 0 || irq->irq >= 256)
  1079. return -EINVAL;
  1080. if (irqchip_in_kernel(vcpu->kvm))
  1081. return -ENXIO;
  1082. vcpu_load(vcpu);
  1083. set_bit(irq->irq, vcpu->arch.irq_pending);
  1084. set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
  1085. vcpu_put(vcpu);
  1086. return 0;
  1087. }
  1088. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  1089. struct kvm_tpr_access_ctl *tac)
  1090. {
  1091. if (tac->flags)
  1092. return -EINVAL;
  1093. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  1094. return 0;
  1095. }
  1096. long kvm_arch_vcpu_ioctl(struct file *filp,
  1097. unsigned int ioctl, unsigned long arg)
  1098. {
  1099. struct kvm_vcpu *vcpu = filp->private_data;
  1100. void __user *argp = (void __user *)arg;
  1101. int r;
  1102. switch (ioctl) {
  1103. case KVM_GET_LAPIC: {
  1104. struct kvm_lapic_state lapic;
  1105. memset(&lapic, 0, sizeof lapic);
  1106. r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
  1107. if (r)
  1108. goto out;
  1109. r = -EFAULT;
  1110. if (copy_to_user(argp, &lapic, sizeof lapic))
  1111. goto out;
  1112. r = 0;
  1113. break;
  1114. }
  1115. case KVM_SET_LAPIC: {
  1116. struct kvm_lapic_state lapic;
  1117. r = -EFAULT;
  1118. if (copy_from_user(&lapic, argp, sizeof lapic))
  1119. goto out;
  1120. r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
  1121. if (r)
  1122. goto out;
  1123. r = 0;
  1124. break;
  1125. }
  1126. case KVM_INTERRUPT: {
  1127. struct kvm_interrupt irq;
  1128. r = -EFAULT;
  1129. if (copy_from_user(&irq, argp, sizeof irq))
  1130. goto out;
  1131. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  1132. if (r)
  1133. goto out;
  1134. r = 0;
  1135. break;
  1136. }
  1137. case KVM_SET_CPUID: {
  1138. struct kvm_cpuid __user *cpuid_arg = argp;
  1139. struct kvm_cpuid cpuid;
  1140. r = -EFAULT;
  1141. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1142. goto out;
  1143. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  1144. if (r)
  1145. goto out;
  1146. break;
  1147. }
  1148. case KVM_SET_CPUID2: {
  1149. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1150. struct kvm_cpuid2 cpuid;
  1151. r = -EFAULT;
  1152. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1153. goto out;
  1154. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  1155. cpuid_arg->entries);
  1156. if (r)
  1157. goto out;
  1158. break;
  1159. }
  1160. case KVM_GET_CPUID2: {
  1161. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1162. struct kvm_cpuid2 cpuid;
  1163. r = -EFAULT;
  1164. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1165. goto out;
  1166. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  1167. cpuid_arg->entries);
  1168. if (r)
  1169. goto out;
  1170. r = -EFAULT;
  1171. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1172. goto out;
  1173. r = 0;
  1174. break;
  1175. }
  1176. case KVM_GET_MSRS:
  1177. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  1178. break;
  1179. case KVM_SET_MSRS:
  1180. r = msr_io(vcpu, argp, do_set_msr, 0);
  1181. break;
  1182. case KVM_TPR_ACCESS_REPORTING: {
  1183. struct kvm_tpr_access_ctl tac;
  1184. r = -EFAULT;
  1185. if (copy_from_user(&tac, argp, sizeof tac))
  1186. goto out;
  1187. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  1188. if (r)
  1189. goto out;
  1190. r = -EFAULT;
  1191. if (copy_to_user(argp, &tac, sizeof tac))
  1192. goto out;
  1193. r = 0;
  1194. break;
  1195. };
  1196. case KVM_SET_VAPIC_ADDR: {
  1197. struct kvm_vapic_addr va;
  1198. r = -EINVAL;
  1199. if (!irqchip_in_kernel(vcpu->kvm))
  1200. goto out;
  1201. r = -EFAULT;
  1202. if (copy_from_user(&va, argp, sizeof va))
  1203. goto out;
  1204. r = 0;
  1205. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  1206. break;
  1207. }
  1208. default:
  1209. r = -EINVAL;
  1210. }
  1211. out:
  1212. return r;
  1213. }
  1214. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  1215. {
  1216. int ret;
  1217. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  1218. return -1;
  1219. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  1220. return ret;
  1221. }
  1222. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  1223. u32 kvm_nr_mmu_pages)
  1224. {
  1225. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  1226. return -EINVAL;
  1227. down_write(&kvm->slots_lock);
  1228. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  1229. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  1230. up_write(&kvm->slots_lock);
  1231. return 0;
  1232. }
  1233. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  1234. {
  1235. return kvm->arch.n_alloc_mmu_pages;
  1236. }
  1237. gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  1238. {
  1239. int i;
  1240. struct kvm_mem_alias *alias;
  1241. for (i = 0; i < kvm->arch.naliases; ++i) {
  1242. alias = &kvm->arch.aliases[i];
  1243. if (gfn >= alias->base_gfn
  1244. && gfn < alias->base_gfn + alias->npages)
  1245. return alias->target_gfn + gfn - alias->base_gfn;
  1246. }
  1247. return gfn;
  1248. }
  1249. /*
  1250. * Set a new alias region. Aliases map a portion of physical memory into
  1251. * another portion. This is useful for memory windows, for example the PC
  1252. * VGA region.
  1253. */
  1254. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  1255. struct kvm_memory_alias *alias)
  1256. {
  1257. int r, n;
  1258. struct kvm_mem_alias *p;
  1259. r = -EINVAL;
  1260. /* General sanity checks */
  1261. if (alias->memory_size & (PAGE_SIZE - 1))
  1262. goto out;
  1263. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  1264. goto out;
  1265. if (alias->slot >= KVM_ALIAS_SLOTS)
  1266. goto out;
  1267. if (alias->guest_phys_addr + alias->memory_size
  1268. < alias->guest_phys_addr)
  1269. goto out;
  1270. if (alias->target_phys_addr + alias->memory_size
  1271. < alias->target_phys_addr)
  1272. goto out;
  1273. down_write(&kvm->slots_lock);
  1274. p = &kvm->arch.aliases[alias->slot];
  1275. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  1276. p->npages = alias->memory_size >> PAGE_SHIFT;
  1277. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  1278. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  1279. if (kvm->arch.aliases[n - 1].npages)
  1280. break;
  1281. kvm->arch.naliases = n;
  1282. kvm_mmu_zap_all(kvm);
  1283. up_write(&kvm->slots_lock);
  1284. return 0;
  1285. out:
  1286. return r;
  1287. }
  1288. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1289. {
  1290. int r;
  1291. r = 0;
  1292. switch (chip->chip_id) {
  1293. case KVM_IRQCHIP_PIC_MASTER:
  1294. memcpy(&chip->chip.pic,
  1295. &pic_irqchip(kvm)->pics[0],
  1296. sizeof(struct kvm_pic_state));
  1297. break;
  1298. case KVM_IRQCHIP_PIC_SLAVE:
  1299. memcpy(&chip->chip.pic,
  1300. &pic_irqchip(kvm)->pics[1],
  1301. sizeof(struct kvm_pic_state));
  1302. break;
  1303. case KVM_IRQCHIP_IOAPIC:
  1304. memcpy(&chip->chip.ioapic,
  1305. ioapic_irqchip(kvm),
  1306. sizeof(struct kvm_ioapic_state));
  1307. break;
  1308. default:
  1309. r = -EINVAL;
  1310. break;
  1311. }
  1312. return r;
  1313. }
  1314. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1315. {
  1316. int r;
  1317. r = 0;
  1318. switch (chip->chip_id) {
  1319. case KVM_IRQCHIP_PIC_MASTER:
  1320. memcpy(&pic_irqchip(kvm)->pics[0],
  1321. &chip->chip.pic,
  1322. sizeof(struct kvm_pic_state));
  1323. break;
  1324. case KVM_IRQCHIP_PIC_SLAVE:
  1325. memcpy(&pic_irqchip(kvm)->pics[1],
  1326. &chip->chip.pic,
  1327. sizeof(struct kvm_pic_state));
  1328. break;
  1329. case KVM_IRQCHIP_IOAPIC:
  1330. memcpy(ioapic_irqchip(kvm),
  1331. &chip->chip.ioapic,
  1332. sizeof(struct kvm_ioapic_state));
  1333. break;
  1334. default:
  1335. r = -EINVAL;
  1336. break;
  1337. }
  1338. kvm_pic_update_irq(pic_irqchip(kvm));
  1339. return r;
  1340. }
  1341. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1342. {
  1343. int r = 0;
  1344. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  1345. return r;
  1346. }
  1347. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1348. {
  1349. int r = 0;
  1350. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  1351. kvm_pit_load_count(kvm, 0, ps->channels[0].count);
  1352. return r;
  1353. }
  1354. /*
  1355. * Get (and clear) the dirty memory log for a memory slot.
  1356. */
  1357. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  1358. struct kvm_dirty_log *log)
  1359. {
  1360. int r;
  1361. int n;
  1362. struct kvm_memory_slot *memslot;
  1363. int is_dirty = 0;
  1364. down_write(&kvm->slots_lock);
  1365. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  1366. if (r)
  1367. goto out;
  1368. /* If nothing is dirty, don't bother messing with page tables. */
  1369. if (is_dirty) {
  1370. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  1371. kvm_flush_remote_tlbs(kvm);
  1372. memslot = &kvm->memslots[log->slot];
  1373. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  1374. memset(memslot->dirty_bitmap, 0, n);
  1375. }
  1376. r = 0;
  1377. out:
  1378. up_write(&kvm->slots_lock);
  1379. return r;
  1380. }
  1381. long kvm_arch_vm_ioctl(struct file *filp,
  1382. unsigned int ioctl, unsigned long arg)
  1383. {
  1384. struct kvm *kvm = filp->private_data;
  1385. void __user *argp = (void __user *)arg;
  1386. int r = -EINVAL;
  1387. switch (ioctl) {
  1388. case KVM_SET_TSS_ADDR:
  1389. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  1390. if (r < 0)
  1391. goto out;
  1392. break;
  1393. case KVM_SET_MEMORY_REGION: {
  1394. struct kvm_memory_region kvm_mem;
  1395. struct kvm_userspace_memory_region kvm_userspace_mem;
  1396. r = -EFAULT;
  1397. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  1398. goto out;
  1399. kvm_userspace_mem.slot = kvm_mem.slot;
  1400. kvm_userspace_mem.flags = kvm_mem.flags;
  1401. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  1402. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  1403. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  1404. if (r)
  1405. goto out;
  1406. break;
  1407. }
  1408. case KVM_SET_NR_MMU_PAGES:
  1409. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  1410. if (r)
  1411. goto out;
  1412. break;
  1413. case KVM_GET_NR_MMU_PAGES:
  1414. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  1415. break;
  1416. case KVM_SET_MEMORY_ALIAS: {
  1417. struct kvm_memory_alias alias;
  1418. r = -EFAULT;
  1419. if (copy_from_user(&alias, argp, sizeof alias))
  1420. goto out;
  1421. r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
  1422. if (r)
  1423. goto out;
  1424. break;
  1425. }
  1426. case KVM_CREATE_IRQCHIP:
  1427. r = -ENOMEM;
  1428. kvm->arch.vpic = kvm_create_pic(kvm);
  1429. if (kvm->arch.vpic) {
  1430. r = kvm_ioapic_init(kvm);
  1431. if (r) {
  1432. kfree(kvm->arch.vpic);
  1433. kvm->arch.vpic = NULL;
  1434. goto out;
  1435. }
  1436. } else
  1437. goto out;
  1438. break;
  1439. case KVM_CREATE_PIT:
  1440. r = -ENOMEM;
  1441. kvm->arch.vpit = kvm_create_pit(kvm);
  1442. if (kvm->arch.vpit)
  1443. r = 0;
  1444. break;
  1445. case KVM_IRQ_LINE: {
  1446. struct kvm_irq_level irq_event;
  1447. r = -EFAULT;
  1448. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  1449. goto out;
  1450. if (irqchip_in_kernel(kvm)) {
  1451. mutex_lock(&kvm->lock);
  1452. if (irq_event.irq < 16)
  1453. kvm_pic_set_irq(pic_irqchip(kvm),
  1454. irq_event.irq,
  1455. irq_event.level);
  1456. kvm_ioapic_set_irq(kvm->arch.vioapic,
  1457. irq_event.irq,
  1458. irq_event.level);
  1459. mutex_unlock(&kvm->lock);
  1460. r = 0;
  1461. }
  1462. break;
  1463. }
  1464. case KVM_GET_IRQCHIP: {
  1465. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1466. struct kvm_irqchip chip;
  1467. r = -EFAULT;
  1468. if (copy_from_user(&chip, argp, sizeof chip))
  1469. goto out;
  1470. r = -ENXIO;
  1471. if (!irqchip_in_kernel(kvm))
  1472. goto out;
  1473. r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
  1474. if (r)
  1475. goto out;
  1476. r = -EFAULT;
  1477. if (copy_to_user(argp, &chip, sizeof chip))
  1478. goto out;
  1479. r = 0;
  1480. break;
  1481. }
  1482. case KVM_SET_IRQCHIP: {
  1483. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1484. struct kvm_irqchip chip;
  1485. r = -EFAULT;
  1486. if (copy_from_user(&chip, argp, sizeof chip))
  1487. goto out;
  1488. r = -ENXIO;
  1489. if (!irqchip_in_kernel(kvm))
  1490. goto out;
  1491. r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
  1492. if (r)
  1493. goto out;
  1494. r = 0;
  1495. break;
  1496. }
  1497. case KVM_GET_PIT: {
  1498. struct kvm_pit_state ps;
  1499. r = -EFAULT;
  1500. if (copy_from_user(&ps, argp, sizeof ps))
  1501. goto out;
  1502. r = -ENXIO;
  1503. if (!kvm->arch.vpit)
  1504. goto out;
  1505. r = kvm_vm_ioctl_get_pit(kvm, &ps);
  1506. if (r)
  1507. goto out;
  1508. r = -EFAULT;
  1509. if (copy_to_user(argp, &ps, sizeof ps))
  1510. goto out;
  1511. r = 0;
  1512. break;
  1513. }
  1514. case KVM_SET_PIT: {
  1515. struct kvm_pit_state ps;
  1516. r = -EFAULT;
  1517. if (copy_from_user(&ps, argp, sizeof ps))
  1518. goto out;
  1519. r = -ENXIO;
  1520. if (!kvm->arch.vpit)
  1521. goto out;
  1522. r = kvm_vm_ioctl_set_pit(kvm, &ps);
  1523. if (r)
  1524. goto out;
  1525. r = 0;
  1526. break;
  1527. }
  1528. default:
  1529. ;
  1530. }
  1531. out:
  1532. return r;
  1533. }
  1534. static void kvm_init_msr_list(void)
  1535. {
  1536. u32 dummy[2];
  1537. unsigned i, j;
  1538. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1539. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1540. continue;
  1541. if (j < i)
  1542. msrs_to_save[j] = msrs_to_save[i];
  1543. j++;
  1544. }
  1545. num_msrs_to_save = j;
  1546. }
  1547. /*
  1548. * Only apic need an MMIO device hook, so shortcut now..
  1549. */
  1550. static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
  1551. gpa_t addr)
  1552. {
  1553. struct kvm_io_device *dev;
  1554. if (vcpu->arch.apic) {
  1555. dev = &vcpu->arch.apic->dev;
  1556. if (dev->in_range(dev, addr))
  1557. return dev;
  1558. }
  1559. return NULL;
  1560. }
  1561. static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
  1562. gpa_t addr)
  1563. {
  1564. struct kvm_io_device *dev;
  1565. dev = vcpu_find_pervcpu_dev(vcpu, addr);
  1566. if (dev == NULL)
  1567. dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
  1568. return dev;
  1569. }
  1570. int emulator_read_std(unsigned long addr,
  1571. void *val,
  1572. unsigned int bytes,
  1573. struct kvm_vcpu *vcpu)
  1574. {
  1575. void *data = val;
  1576. int r = X86EMUL_CONTINUE;
  1577. while (bytes) {
  1578. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1579. unsigned offset = addr & (PAGE_SIZE-1);
  1580. unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
  1581. int ret;
  1582. if (gpa == UNMAPPED_GVA) {
  1583. r = X86EMUL_PROPAGATE_FAULT;
  1584. goto out;
  1585. }
  1586. ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
  1587. if (ret < 0) {
  1588. r = X86EMUL_UNHANDLEABLE;
  1589. goto out;
  1590. }
  1591. bytes -= tocopy;
  1592. data += tocopy;
  1593. addr += tocopy;
  1594. }
  1595. out:
  1596. return r;
  1597. }
  1598. EXPORT_SYMBOL_GPL(emulator_read_std);
  1599. static int emulator_read_emulated(unsigned long addr,
  1600. void *val,
  1601. unsigned int bytes,
  1602. struct kvm_vcpu *vcpu)
  1603. {
  1604. struct kvm_io_device *mmio_dev;
  1605. gpa_t gpa;
  1606. if (vcpu->mmio_read_completed) {
  1607. memcpy(val, vcpu->mmio_data, bytes);
  1608. vcpu->mmio_read_completed = 0;
  1609. return X86EMUL_CONTINUE;
  1610. }
  1611. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1612. /* For APIC access vmexit */
  1613. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1614. goto mmio;
  1615. if (emulator_read_std(addr, val, bytes, vcpu)
  1616. == X86EMUL_CONTINUE)
  1617. return X86EMUL_CONTINUE;
  1618. if (gpa == UNMAPPED_GVA)
  1619. return X86EMUL_PROPAGATE_FAULT;
  1620. mmio:
  1621. /*
  1622. * Is this MMIO handled locally?
  1623. */
  1624. mutex_lock(&vcpu->kvm->lock);
  1625. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
  1626. if (mmio_dev) {
  1627. kvm_iodevice_read(mmio_dev, gpa, bytes, val);
  1628. mutex_unlock(&vcpu->kvm->lock);
  1629. return X86EMUL_CONTINUE;
  1630. }
  1631. mutex_unlock(&vcpu->kvm->lock);
  1632. vcpu->mmio_needed = 1;
  1633. vcpu->mmio_phys_addr = gpa;
  1634. vcpu->mmio_size = bytes;
  1635. vcpu->mmio_is_write = 0;
  1636. return X86EMUL_UNHANDLEABLE;
  1637. }
  1638. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  1639. const void *val, int bytes)
  1640. {
  1641. int ret;
  1642. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  1643. if (ret < 0)
  1644. return 0;
  1645. kvm_mmu_pte_write(vcpu, gpa, val, bytes);
  1646. return 1;
  1647. }
  1648. static int emulator_write_emulated_onepage(unsigned long addr,
  1649. const void *val,
  1650. unsigned int bytes,
  1651. struct kvm_vcpu *vcpu)
  1652. {
  1653. struct kvm_io_device *mmio_dev;
  1654. gpa_t gpa;
  1655. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1656. if (gpa == UNMAPPED_GVA) {
  1657. kvm_inject_page_fault(vcpu, addr, 2);
  1658. return X86EMUL_PROPAGATE_FAULT;
  1659. }
  1660. /* For APIC access vmexit */
  1661. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1662. goto mmio;
  1663. if (emulator_write_phys(vcpu, gpa, val, bytes))
  1664. return X86EMUL_CONTINUE;
  1665. mmio:
  1666. /*
  1667. * Is this MMIO handled locally?
  1668. */
  1669. mutex_lock(&vcpu->kvm->lock);
  1670. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
  1671. if (mmio_dev) {
  1672. kvm_iodevice_write(mmio_dev, gpa, bytes, val);
  1673. mutex_unlock(&vcpu->kvm->lock);
  1674. return X86EMUL_CONTINUE;
  1675. }
  1676. mutex_unlock(&vcpu->kvm->lock);
  1677. vcpu->mmio_needed = 1;
  1678. vcpu->mmio_phys_addr = gpa;
  1679. vcpu->mmio_size = bytes;
  1680. vcpu->mmio_is_write = 1;
  1681. memcpy(vcpu->mmio_data, val, bytes);
  1682. return X86EMUL_CONTINUE;
  1683. }
  1684. int emulator_write_emulated(unsigned long addr,
  1685. const void *val,
  1686. unsigned int bytes,
  1687. struct kvm_vcpu *vcpu)
  1688. {
  1689. /* Crossing a page boundary? */
  1690. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  1691. int rc, now;
  1692. now = -addr & ~PAGE_MASK;
  1693. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  1694. if (rc != X86EMUL_CONTINUE)
  1695. return rc;
  1696. addr += now;
  1697. val += now;
  1698. bytes -= now;
  1699. }
  1700. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  1701. }
  1702. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  1703. static int emulator_cmpxchg_emulated(unsigned long addr,
  1704. const void *old,
  1705. const void *new,
  1706. unsigned int bytes,
  1707. struct kvm_vcpu *vcpu)
  1708. {
  1709. static int reported;
  1710. if (!reported) {
  1711. reported = 1;
  1712. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  1713. }
  1714. #ifndef CONFIG_X86_64
  1715. /* guests cmpxchg8b have to be emulated atomically */
  1716. if (bytes == 8) {
  1717. gpa_t gpa;
  1718. struct page *page;
  1719. char *kaddr;
  1720. u64 val;
  1721. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1722. if (gpa == UNMAPPED_GVA ||
  1723. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1724. goto emul_write;
  1725. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  1726. goto emul_write;
  1727. val = *(u64 *)new;
  1728. down_read(&current->mm->mmap_sem);
  1729. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  1730. up_read(&current->mm->mmap_sem);
  1731. kaddr = kmap_atomic(page, KM_USER0);
  1732. set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
  1733. kunmap_atomic(kaddr, KM_USER0);
  1734. kvm_release_page_dirty(page);
  1735. }
  1736. emul_write:
  1737. #endif
  1738. return emulator_write_emulated(addr, new, bytes, vcpu);
  1739. }
  1740. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  1741. {
  1742. return kvm_x86_ops->get_segment_base(vcpu, seg);
  1743. }
  1744. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  1745. {
  1746. return X86EMUL_CONTINUE;
  1747. }
  1748. int emulate_clts(struct kvm_vcpu *vcpu)
  1749. {
  1750. KVMTRACE_0D(CLTS, vcpu, handler);
  1751. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
  1752. return X86EMUL_CONTINUE;
  1753. }
  1754. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  1755. {
  1756. struct kvm_vcpu *vcpu = ctxt->vcpu;
  1757. switch (dr) {
  1758. case 0 ... 3:
  1759. *dest = kvm_x86_ops->get_dr(vcpu, dr);
  1760. return X86EMUL_CONTINUE;
  1761. default:
  1762. pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
  1763. return X86EMUL_UNHANDLEABLE;
  1764. }
  1765. }
  1766. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  1767. {
  1768. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  1769. int exception;
  1770. kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  1771. if (exception) {
  1772. /* FIXME: better handling */
  1773. return X86EMUL_UNHANDLEABLE;
  1774. }
  1775. return X86EMUL_CONTINUE;
  1776. }
  1777. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  1778. {
  1779. static int reported;
  1780. u8 opcodes[4];
  1781. unsigned long rip = vcpu->arch.rip;
  1782. unsigned long rip_linear;
  1783. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  1784. if (reported)
  1785. return;
  1786. emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
  1787. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  1788. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  1789. reported = 1;
  1790. }
  1791. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  1792. static struct x86_emulate_ops emulate_ops = {
  1793. .read_std = emulator_read_std,
  1794. .read_emulated = emulator_read_emulated,
  1795. .write_emulated = emulator_write_emulated,
  1796. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  1797. };
  1798. int emulate_instruction(struct kvm_vcpu *vcpu,
  1799. struct kvm_run *run,
  1800. unsigned long cr2,
  1801. u16 error_code,
  1802. int emulation_type)
  1803. {
  1804. int r;
  1805. struct decode_cache *c;
  1806. vcpu->arch.mmio_fault_cr2 = cr2;
  1807. kvm_x86_ops->cache_regs(vcpu);
  1808. vcpu->mmio_is_write = 0;
  1809. vcpu->arch.pio.string = 0;
  1810. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  1811. int cs_db, cs_l;
  1812. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  1813. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  1814. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  1815. vcpu->arch.emulate_ctxt.mode =
  1816. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  1817. ? X86EMUL_MODE_REAL : cs_l
  1818. ? X86EMUL_MODE_PROT64 : cs_db
  1819. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  1820. if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
  1821. vcpu->arch.emulate_ctxt.cs_base = 0;
  1822. vcpu->arch.emulate_ctxt.ds_base = 0;
  1823. vcpu->arch.emulate_ctxt.es_base = 0;
  1824. vcpu->arch.emulate_ctxt.ss_base = 0;
  1825. } else {
  1826. vcpu->arch.emulate_ctxt.cs_base =
  1827. get_segment_base(vcpu, VCPU_SREG_CS);
  1828. vcpu->arch.emulate_ctxt.ds_base =
  1829. get_segment_base(vcpu, VCPU_SREG_DS);
  1830. vcpu->arch.emulate_ctxt.es_base =
  1831. get_segment_base(vcpu, VCPU_SREG_ES);
  1832. vcpu->arch.emulate_ctxt.ss_base =
  1833. get_segment_base(vcpu, VCPU_SREG_SS);
  1834. }
  1835. vcpu->arch.emulate_ctxt.gs_base =
  1836. get_segment_base(vcpu, VCPU_SREG_GS);
  1837. vcpu->arch.emulate_ctxt.fs_base =
  1838. get_segment_base(vcpu, VCPU_SREG_FS);
  1839. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  1840. /* Reject the instructions other than VMCALL/VMMCALL when
  1841. * try to emulate invalid opcode */
  1842. c = &vcpu->arch.emulate_ctxt.decode;
  1843. if ((emulation_type & EMULTYPE_TRAP_UD) &&
  1844. (!(c->twobyte && c->b == 0x01 &&
  1845. (c->modrm_reg == 0 || c->modrm_reg == 3) &&
  1846. c->modrm_mod == 3 && c->modrm_rm == 1)))
  1847. return EMULATE_FAIL;
  1848. ++vcpu->stat.insn_emulation;
  1849. if (r) {
  1850. ++vcpu->stat.insn_emulation_fail;
  1851. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  1852. return EMULATE_DONE;
  1853. return EMULATE_FAIL;
  1854. }
  1855. }
  1856. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  1857. if (vcpu->arch.pio.string)
  1858. return EMULATE_DO_MMIO;
  1859. if ((r || vcpu->mmio_is_write) && run) {
  1860. run->exit_reason = KVM_EXIT_MMIO;
  1861. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  1862. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  1863. run->mmio.len = vcpu->mmio_size;
  1864. run->mmio.is_write = vcpu->mmio_is_write;
  1865. }
  1866. if (r) {
  1867. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  1868. return EMULATE_DONE;
  1869. if (!vcpu->mmio_needed) {
  1870. kvm_report_emulation_failure(vcpu, "mmio");
  1871. return EMULATE_FAIL;
  1872. }
  1873. return EMULATE_DO_MMIO;
  1874. }
  1875. kvm_x86_ops->decache_regs(vcpu);
  1876. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  1877. if (vcpu->mmio_is_write) {
  1878. vcpu->mmio_needed = 0;
  1879. return EMULATE_DO_MMIO;
  1880. }
  1881. return EMULATE_DONE;
  1882. }
  1883. EXPORT_SYMBOL_GPL(emulate_instruction);
  1884. static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
  1885. {
  1886. int i;
  1887. for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
  1888. if (vcpu->arch.pio.guest_pages[i]) {
  1889. kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
  1890. vcpu->arch.pio.guest_pages[i] = NULL;
  1891. }
  1892. }
  1893. static int pio_copy_data(struct kvm_vcpu *vcpu)
  1894. {
  1895. void *p = vcpu->arch.pio_data;
  1896. void *q;
  1897. unsigned bytes;
  1898. int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
  1899. q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
  1900. PAGE_KERNEL);
  1901. if (!q) {
  1902. free_pio_guest_pages(vcpu);
  1903. return -ENOMEM;
  1904. }
  1905. q += vcpu->arch.pio.guest_page_offset;
  1906. bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
  1907. if (vcpu->arch.pio.in)
  1908. memcpy(q, p, bytes);
  1909. else
  1910. memcpy(p, q, bytes);
  1911. q -= vcpu->arch.pio.guest_page_offset;
  1912. vunmap(q);
  1913. free_pio_guest_pages(vcpu);
  1914. return 0;
  1915. }
  1916. int complete_pio(struct kvm_vcpu *vcpu)
  1917. {
  1918. struct kvm_pio_request *io = &vcpu->arch.pio;
  1919. long delta;
  1920. int r;
  1921. kvm_x86_ops->cache_regs(vcpu);
  1922. if (!io->string) {
  1923. if (io->in)
  1924. memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
  1925. io->size);
  1926. } else {
  1927. if (io->in) {
  1928. r = pio_copy_data(vcpu);
  1929. if (r) {
  1930. kvm_x86_ops->cache_regs(vcpu);
  1931. return r;
  1932. }
  1933. }
  1934. delta = 1;
  1935. if (io->rep) {
  1936. delta *= io->cur_count;
  1937. /*
  1938. * The size of the register should really depend on
  1939. * current address size.
  1940. */
  1941. vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
  1942. }
  1943. if (io->down)
  1944. delta = -delta;
  1945. delta *= io->size;
  1946. if (io->in)
  1947. vcpu->arch.regs[VCPU_REGS_RDI] += delta;
  1948. else
  1949. vcpu->arch.regs[VCPU_REGS_RSI] += delta;
  1950. }
  1951. kvm_x86_ops->decache_regs(vcpu);
  1952. io->count -= io->cur_count;
  1953. io->cur_count = 0;
  1954. return 0;
  1955. }
  1956. static void kernel_pio(struct kvm_io_device *pio_dev,
  1957. struct kvm_vcpu *vcpu,
  1958. void *pd)
  1959. {
  1960. /* TODO: String I/O for in kernel device */
  1961. mutex_lock(&vcpu->kvm->lock);
  1962. if (vcpu->arch.pio.in)
  1963. kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
  1964. vcpu->arch.pio.size,
  1965. pd);
  1966. else
  1967. kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
  1968. vcpu->arch.pio.size,
  1969. pd);
  1970. mutex_unlock(&vcpu->kvm->lock);
  1971. }
  1972. static void pio_string_write(struct kvm_io_device *pio_dev,
  1973. struct kvm_vcpu *vcpu)
  1974. {
  1975. struct kvm_pio_request *io = &vcpu->arch.pio;
  1976. void *pd = vcpu->arch.pio_data;
  1977. int i;
  1978. mutex_lock(&vcpu->kvm->lock);
  1979. for (i = 0; i < io->cur_count; i++) {
  1980. kvm_iodevice_write(pio_dev, io->port,
  1981. io->size,
  1982. pd);
  1983. pd += io->size;
  1984. }
  1985. mutex_unlock(&vcpu->kvm->lock);
  1986. }
  1987. static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
  1988. gpa_t addr)
  1989. {
  1990. return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
  1991. }
  1992. int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  1993. int size, unsigned port)
  1994. {
  1995. struct kvm_io_device *pio_dev;
  1996. vcpu->run->exit_reason = KVM_EXIT_IO;
  1997. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  1998. vcpu->run->io.size = vcpu->arch.pio.size = size;
  1999. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2000. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
  2001. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2002. vcpu->arch.pio.in = in;
  2003. vcpu->arch.pio.string = 0;
  2004. vcpu->arch.pio.down = 0;
  2005. vcpu->arch.pio.guest_page_offset = 0;
  2006. vcpu->arch.pio.rep = 0;
  2007. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2008. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2009. handler);
  2010. else
  2011. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2012. handler);
  2013. kvm_x86_ops->cache_regs(vcpu);
  2014. memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
  2015. kvm_x86_ops->decache_regs(vcpu);
  2016. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2017. pio_dev = vcpu_find_pio_dev(vcpu, port);
  2018. if (pio_dev) {
  2019. kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
  2020. complete_pio(vcpu);
  2021. return 1;
  2022. }
  2023. return 0;
  2024. }
  2025. EXPORT_SYMBOL_GPL(kvm_emulate_pio);
  2026. int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  2027. int size, unsigned long count, int down,
  2028. gva_t address, int rep, unsigned port)
  2029. {
  2030. unsigned now, in_page;
  2031. int i, ret = 0;
  2032. int nr_pages = 1;
  2033. struct page *page;
  2034. struct kvm_io_device *pio_dev;
  2035. vcpu->run->exit_reason = KVM_EXIT_IO;
  2036. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  2037. vcpu->run->io.size = vcpu->arch.pio.size = size;
  2038. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2039. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
  2040. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2041. vcpu->arch.pio.in = in;
  2042. vcpu->arch.pio.string = 1;
  2043. vcpu->arch.pio.down = down;
  2044. vcpu->arch.pio.guest_page_offset = offset_in_page(address);
  2045. vcpu->arch.pio.rep = rep;
  2046. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2047. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2048. handler);
  2049. else
  2050. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2051. handler);
  2052. if (!count) {
  2053. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2054. return 1;
  2055. }
  2056. if (!down)
  2057. in_page = PAGE_SIZE - offset_in_page(address);
  2058. else
  2059. in_page = offset_in_page(address) + size;
  2060. now = min(count, (unsigned long)in_page / size);
  2061. if (!now) {
  2062. /*
  2063. * String I/O straddles page boundary. Pin two guest pages
  2064. * so that we satisfy atomicity constraints. Do just one
  2065. * transaction to avoid complexity.
  2066. */
  2067. nr_pages = 2;
  2068. now = 1;
  2069. }
  2070. if (down) {
  2071. /*
  2072. * String I/O in reverse. Yuck. Kill the guest, fix later.
  2073. */
  2074. pr_unimpl(vcpu, "guest string pio down\n");
  2075. kvm_inject_gp(vcpu, 0);
  2076. return 1;
  2077. }
  2078. vcpu->run->io.count = now;
  2079. vcpu->arch.pio.cur_count = now;
  2080. if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
  2081. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2082. for (i = 0; i < nr_pages; ++i) {
  2083. page = gva_to_page(vcpu, address + i * PAGE_SIZE);
  2084. vcpu->arch.pio.guest_pages[i] = page;
  2085. if (!page) {
  2086. kvm_inject_gp(vcpu, 0);
  2087. free_pio_guest_pages(vcpu);
  2088. return 1;
  2089. }
  2090. }
  2091. pio_dev = vcpu_find_pio_dev(vcpu, port);
  2092. if (!vcpu->arch.pio.in) {
  2093. /* string PIO write */
  2094. ret = pio_copy_data(vcpu);
  2095. if (ret >= 0 && pio_dev) {
  2096. pio_string_write(pio_dev, vcpu);
  2097. complete_pio(vcpu);
  2098. if (vcpu->arch.pio.count == 0)
  2099. ret = 1;
  2100. }
  2101. } else if (pio_dev)
  2102. pr_unimpl(vcpu, "no string pio read support yet, "
  2103. "port %x size %d count %ld\n",
  2104. port, size, count);
  2105. return ret;
  2106. }
  2107. EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
  2108. int kvm_arch_init(void *opaque)
  2109. {
  2110. int r;
  2111. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  2112. if (kvm_x86_ops) {
  2113. printk(KERN_ERR "kvm: already loaded the other module\n");
  2114. r = -EEXIST;
  2115. goto out;
  2116. }
  2117. if (!ops->cpu_has_kvm_support()) {
  2118. printk(KERN_ERR "kvm: no hardware support\n");
  2119. r = -EOPNOTSUPP;
  2120. goto out;
  2121. }
  2122. if (ops->disabled_by_bios()) {
  2123. printk(KERN_ERR "kvm: disabled by bios\n");
  2124. r = -EOPNOTSUPP;
  2125. goto out;
  2126. }
  2127. r = kvm_mmu_module_init();
  2128. if (r)
  2129. goto out;
  2130. kvm_init_msr_list();
  2131. kvm_x86_ops = ops;
  2132. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  2133. kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
  2134. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  2135. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  2136. return 0;
  2137. out:
  2138. return r;
  2139. }
  2140. void kvm_arch_exit(void)
  2141. {
  2142. kvm_x86_ops = NULL;
  2143. kvm_mmu_module_exit();
  2144. }
  2145. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  2146. {
  2147. ++vcpu->stat.halt_exits;
  2148. KVMTRACE_0D(HLT, vcpu, handler);
  2149. if (irqchip_in_kernel(vcpu->kvm)) {
  2150. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  2151. up_read(&vcpu->kvm->slots_lock);
  2152. kvm_vcpu_block(vcpu);
  2153. down_read(&vcpu->kvm->slots_lock);
  2154. if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
  2155. return -EINTR;
  2156. return 1;
  2157. } else {
  2158. vcpu->run->exit_reason = KVM_EXIT_HLT;
  2159. return 0;
  2160. }
  2161. }
  2162. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  2163. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  2164. unsigned long a1)
  2165. {
  2166. if (is_long_mode(vcpu))
  2167. return a0;
  2168. else
  2169. return a0 | ((gpa_t)a1 << 32);
  2170. }
  2171. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  2172. {
  2173. unsigned long nr, a0, a1, a2, a3, ret;
  2174. int r = 1;
  2175. kvm_x86_ops->cache_regs(vcpu);
  2176. nr = vcpu->arch.regs[VCPU_REGS_RAX];
  2177. a0 = vcpu->arch.regs[VCPU_REGS_RBX];
  2178. a1 = vcpu->arch.regs[VCPU_REGS_RCX];
  2179. a2 = vcpu->arch.regs[VCPU_REGS_RDX];
  2180. a3 = vcpu->arch.regs[VCPU_REGS_RSI];
  2181. KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
  2182. if (!is_long_mode(vcpu)) {
  2183. nr &= 0xFFFFFFFF;
  2184. a0 &= 0xFFFFFFFF;
  2185. a1 &= 0xFFFFFFFF;
  2186. a2 &= 0xFFFFFFFF;
  2187. a3 &= 0xFFFFFFFF;
  2188. }
  2189. switch (nr) {
  2190. case KVM_HC_VAPIC_POLL_IRQ:
  2191. ret = 0;
  2192. break;
  2193. case KVM_HC_MMU_OP:
  2194. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  2195. break;
  2196. default:
  2197. ret = -KVM_ENOSYS;
  2198. break;
  2199. }
  2200. vcpu->arch.regs[VCPU_REGS_RAX] = ret;
  2201. kvm_x86_ops->decache_regs(vcpu);
  2202. ++vcpu->stat.hypercalls;
  2203. return r;
  2204. }
  2205. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  2206. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  2207. {
  2208. char instruction[3];
  2209. int ret = 0;
  2210. /*
  2211. * Blow out the MMU to ensure that no other VCPU has an active mapping
  2212. * to ensure that the updated hypercall appears atomically across all
  2213. * VCPUs.
  2214. */
  2215. kvm_mmu_zap_all(vcpu->kvm);
  2216. kvm_x86_ops->cache_regs(vcpu);
  2217. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  2218. if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
  2219. != X86EMUL_CONTINUE)
  2220. ret = -EFAULT;
  2221. return ret;
  2222. }
  2223. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  2224. {
  2225. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  2226. }
  2227. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2228. {
  2229. struct descriptor_table dt = { limit, base };
  2230. kvm_x86_ops->set_gdt(vcpu, &dt);
  2231. }
  2232. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2233. {
  2234. struct descriptor_table dt = { limit, base };
  2235. kvm_x86_ops->set_idt(vcpu, &dt);
  2236. }
  2237. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  2238. unsigned long *rflags)
  2239. {
  2240. kvm_lmsw(vcpu, msw);
  2241. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2242. }
  2243. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  2244. {
  2245. unsigned long value;
  2246. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  2247. switch (cr) {
  2248. case 0:
  2249. value = vcpu->arch.cr0;
  2250. break;
  2251. case 2:
  2252. value = vcpu->arch.cr2;
  2253. break;
  2254. case 3:
  2255. value = vcpu->arch.cr3;
  2256. break;
  2257. case 4:
  2258. value = vcpu->arch.cr4;
  2259. break;
  2260. case 8:
  2261. value = kvm_get_cr8(vcpu);
  2262. break;
  2263. default:
  2264. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2265. return 0;
  2266. }
  2267. KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
  2268. (u32)((u64)value >> 32), handler);
  2269. return value;
  2270. }
  2271. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  2272. unsigned long *rflags)
  2273. {
  2274. KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
  2275. (u32)((u64)val >> 32), handler);
  2276. switch (cr) {
  2277. case 0:
  2278. kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
  2279. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2280. break;
  2281. case 2:
  2282. vcpu->arch.cr2 = val;
  2283. break;
  2284. case 3:
  2285. kvm_set_cr3(vcpu, val);
  2286. break;
  2287. case 4:
  2288. kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
  2289. break;
  2290. case 8:
  2291. kvm_set_cr8(vcpu, val & 0xfUL);
  2292. break;
  2293. default:
  2294. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2295. }
  2296. }
  2297. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  2298. {
  2299. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  2300. int j, nent = vcpu->arch.cpuid_nent;
  2301. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  2302. /* when no next entry is found, the current entry[i] is reselected */
  2303. for (j = i + 1; j == i; j = (j + 1) % nent) {
  2304. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  2305. if (ej->function == e->function) {
  2306. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  2307. return j;
  2308. }
  2309. }
  2310. return 0; /* silence gcc, even though control never reaches here */
  2311. }
  2312. /* find an entry with matching function, matching index (if needed), and that
  2313. * should be read next (if it's stateful) */
  2314. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  2315. u32 function, u32 index)
  2316. {
  2317. if (e->function != function)
  2318. return 0;
  2319. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  2320. return 0;
  2321. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  2322. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  2323. return 0;
  2324. return 1;
  2325. }
  2326. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  2327. {
  2328. int i;
  2329. u32 function, index;
  2330. struct kvm_cpuid_entry2 *e, *best;
  2331. kvm_x86_ops->cache_regs(vcpu);
  2332. function = vcpu->arch.regs[VCPU_REGS_RAX];
  2333. index = vcpu->arch.regs[VCPU_REGS_RCX];
  2334. vcpu->arch.regs[VCPU_REGS_RAX] = 0;
  2335. vcpu->arch.regs[VCPU_REGS_RBX] = 0;
  2336. vcpu->arch.regs[VCPU_REGS_RCX] = 0;
  2337. vcpu->arch.regs[VCPU_REGS_RDX] = 0;
  2338. best = NULL;
  2339. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  2340. e = &vcpu->arch.cpuid_entries[i];
  2341. if (is_matching_cpuid_entry(e, function, index)) {
  2342. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  2343. move_to_next_stateful_cpuid_entry(vcpu, i);
  2344. best = e;
  2345. break;
  2346. }
  2347. /*
  2348. * Both basic or both extended?
  2349. */
  2350. if (((e->function ^ function) & 0x80000000) == 0)
  2351. if (!best || e->function > best->function)
  2352. best = e;
  2353. }
  2354. if (best) {
  2355. vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
  2356. vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
  2357. vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
  2358. vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
  2359. }
  2360. kvm_x86_ops->decache_regs(vcpu);
  2361. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2362. KVMTRACE_5D(CPUID, vcpu, function,
  2363. (u32)vcpu->arch.regs[VCPU_REGS_RAX],
  2364. (u32)vcpu->arch.regs[VCPU_REGS_RBX],
  2365. (u32)vcpu->arch.regs[VCPU_REGS_RCX],
  2366. (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
  2367. }
  2368. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  2369. /*
  2370. * Check if userspace requested an interrupt window, and that the
  2371. * interrupt window is open.
  2372. *
  2373. * No need to exit to userspace if we already have an interrupt queued.
  2374. */
  2375. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
  2376. struct kvm_run *kvm_run)
  2377. {
  2378. return (!vcpu->arch.irq_summary &&
  2379. kvm_run->request_interrupt_window &&
  2380. vcpu->arch.interrupt_window_open &&
  2381. (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
  2382. }
  2383. static void post_kvm_run_save(struct kvm_vcpu *vcpu,
  2384. struct kvm_run *kvm_run)
  2385. {
  2386. kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  2387. kvm_run->cr8 = kvm_get_cr8(vcpu);
  2388. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  2389. if (irqchip_in_kernel(vcpu->kvm))
  2390. kvm_run->ready_for_interrupt_injection = 1;
  2391. else
  2392. kvm_run->ready_for_interrupt_injection =
  2393. (vcpu->arch.interrupt_window_open &&
  2394. vcpu->arch.irq_summary == 0);
  2395. }
  2396. static void vapic_enter(struct kvm_vcpu *vcpu)
  2397. {
  2398. struct kvm_lapic *apic = vcpu->arch.apic;
  2399. struct page *page;
  2400. if (!apic || !apic->vapic_addr)
  2401. return;
  2402. down_read(&current->mm->mmap_sem);
  2403. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2404. up_read(&current->mm->mmap_sem);
  2405. vcpu->arch.apic->vapic_page = page;
  2406. }
  2407. static void vapic_exit(struct kvm_vcpu *vcpu)
  2408. {
  2409. struct kvm_lapic *apic = vcpu->arch.apic;
  2410. if (!apic || !apic->vapic_addr)
  2411. return;
  2412. kvm_release_page_dirty(apic->vapic_page);
  2413. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2414. }
  2415. static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2416. {
  2417. int r;
  2418. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  2419. pr_debug("vcpu %d received sipi with vector # %x\n",
  2420. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  2421. kvm_lapic_reset(vcpu);
  2422. r = kvm_x86_ops->vcpu_reset(vcpu);
  2423. if (r)
  2424. return r;
  2425. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  2426. }
  2427. down_read(&vcpu->kvm->slots_lock);
  2428. vapic_enter(vcpu);
  2429. preempted:
  2430. if (vcpu->guest_debug.enabled)
  2431. kvm_x86_ops->guest_debug_pre(vcpu);
  2432. again:
  2433. if (vcpu->requests)
  2434. if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  2435. kvm_mmu_unload(vcpu);
  2436. r = kvm_mmu_reload(vcpu);
  2437. if (unlikely(r))
  2438. goto out;
  2439. if (vcpu->requests) {
  2440. if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
  2441. __kvm_migrate_timers(vcpu);
  2442. if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  2443. kvm_x86_ops->tlb_flush(vcpu);
  2444. if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
  2445. &vcpu->requests)) {
  2446. kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
  2447. r = 0;
  2448. goto out;
  2449. }
  2450. if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
  2451. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  2452. r = 0;
  2453. goto out;
  2454. }
  2455. }
  2456. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  2457. kvm_inject_pending_timer_irqs(vcpu);
  2458. preempt_disable();
  2459. kvm_x86_ops->prepare_guest_switch(vcpu);
  2460. kvm_load_guest_fpu(vcpu);
  2461. local_irq_disable();
  2462. if (vcpu->requests || need_resched()) {
  2463. local_irq_enable();
  2464. preempt_enable();
  2465. r = 1;
  2466. goto out;
  2467. }
  2468. if (signal_pending(current)) {
  2469. local_irq_enable();
  2470. preempt_enable();
  2471. r = -EINTR;
  2472. kvm_run->exit_reason = KVM_EXIT_INTR;
  2473. ++vcpu->stat.signal_exits;
  2474. goto out;
  2475. }
  2476. vcpu->guest_mode = 1;
  2477. /*
  2478. * Make sure that guest_mode assignment won't happen after
  2479. * testing the pending IRQ vector bitmap.
  2480. */
  2481. smp_wmb();
  2482. if (vcpu->arch.exception.pending)
  2483. __queue_exception(vcpu);
  2484. else if (irqchip_in_kernel(vcpu->kvm))
  2485. kvm_x86_ops->inject_pending_irq(vcpu);
  2486. else
  2487. kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
  2488. kvm_lapic_sync_to_vapic(vcpu);
  2489. up_read(&vcpu->kvm->slots_lock);
  2490. kvm_guest_enter();
  2491. KVMTRACE_0D(VMENTRY, vcpu, entryexit);
  2492. kvm_x86_ops->run(vcpu, kvm_run);
  2493. vcpu->guest_mode = 0;
  2494. local_irq_enable();
  2495. ++vcpu->stat.exits;
  2496. /*
  2497. * We must have an instruction between local_irq_enable() and
  2498. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  2499. * the interrupt shadow. The stat.exits increment will do nicely.
  2500. * But we need to prevent reordering, hence this barrier():
  2501. */
  2502. barrier();
  2503. kvm_guest_exit();
  2504. preempt_enable();
  2505. down_read(&vcpu->kvm->slots_lock);
  2506. /*
  2507. * Profile KVM exit RIPs:
  2508. */
  2509. if (unlikely(prof_on == KVM_PROFILING)) {
  2510. kvm_x86_ops->cache_regs(vcpu);
  2511. profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
  2512. }
  2513. if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
  2514. vcpu->arch.exception.pending = false;
  2515. kvm_lapic_sync_from_vapic(vcpu);
  2516. r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
  2517. if (r > 0) {
  2518. if (dm_request_for_irq_injection(vcpu, kvm_run)) {
  2519. r = -EINTR;
  2520. kvm_run->exit_reason = KVM_EXIT_INTR;
  2521. ++vcpu->stat.request_irq_exits;
  2522. goto out;
  2523. }
  2524. if (!need_resched())
  2525. goto again;
  2526. }
  2527. out:
  2528. up_read(&vcpu->kvm->slots_lock);
  2529. if (r > 0) {
  2530. kvm_resched(vcpu);
  2531. down_read(&vcpu->kvm->slots_lock);
  2532. goto preempted;
  2533. }
  2534. post_kvm_run_save(vcpu, kvm_run);
  2535. down_read(&vcpu->kvm->slots_lock);
  2536. vapic_exit(vcpu);
  2537. up_read(&vcpu->kvm->slots_lock);
  2538. return r;
  2539. }
  2540. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2541. {
  2542. int r;
  2543. sigset_t sigsaved;
  2544. vcpu_load(vcpu);
  2545. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  2546. kvm_vcpu_block(vcpu);
  2547. vcpu_put(vcpu);
  2548. return -EAGAIN;
  2549. }
  2550. if (vcpu->sigset_active)
  2551. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  2552. /* re-sync apic's tpr */
  2553. if (!irqchip_in_kernel(vcpu->kvm))
  2554. kvm_set_cr8(vcpu, kvm_run->cr8);
  2555. if (vcpu->arch.pio.cur_count) {
  2556. r = complete_pio(vcpu);
  2557. if (r)
  2558. goto out;
  2559. }
  2560. #if CONFIG_HAS_IOMEM
  2561. if (vcpu->mmio_needed) {
  2562. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  2563. vcpu->mmio_read_completed = 1;
  2564. vcpu->mmio_needed = 0;
  2565. down_read(&vcpu->kvm->slots_lock);
  2566. r = emulate_instruction(vcpu, kvm_run,
  2567. vcpu->arch.mmio_fault_cr2, 0,
  2568. EMULTYPE_NO_DECODE);
  2569. up_read(&vcpu->kvm->slots_lock);
  2570. if (r == EMULATE_DO_MMIO) {
  2571. /*
  2572. * Read-modify-write. Back to userspace.
  2573. */
  2574. r = 0;
  2575. goto out;
  2576. }
  2577. }
  2578. #endif
  2579. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
  2580. kvm_x86_ops->cache_regs(vcpu);
  2581. vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
  2582. kvm_x86_ops->decache_regs(vcpu);
  2583. }
  2584. r = __vcpu_run(vcpu, kvm_run);
  2585. out:
  2586. if (vcpu->sigset_active)
  2587. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  2588. vcpu_put(vcpu);
  2589. return r;
  2590. }
  2591. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  2592. {
  2593. vcpu_load(vcpu);
  2594. kvm_x86_ops->cache_regs(vcpu);
  2595. regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
  2596. regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
  2597. regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
  2598. regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
  2599. regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
  2600. regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
  2601. regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
  2602. regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
  2603. #ifdef CONFIG_X86_64
  2604. regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
  2605. regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
  2606. regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
  2607. regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
  2608. regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
  2609. regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
  2610. regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
  2611. regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
  2612. #endif
  2613. regs->rip = vcpu->arch.rip;
  2614. regs->rflags = kvm_x86_ops->get_rflags(vcpu);
  2615. /*
  2616. * Don't leak debug flags in case they were set for guest debugging
  2617. */
  2618. if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
  2619. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  2620. vcpu_put(vcpu);
  2621. return 0;
  2622. }
  2623. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  2624. {
  2625. vcpu_load(vcpu);
  2626. vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
  2627. vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
  2628. vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
  2629. vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
  2630. vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
  2631. vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
  2632. vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
  2633. vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
  2634. #ifdef CONFIG_X86_64
  2635. vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
  2636. vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
  2637. vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
  2638. vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
  2639. vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
  2640. vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
  2641. vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
  2642. vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
  2643. #endif
  2644. vcpu->arch.rip = regs->rip;
  2645. kvm_x86_ops->set_rflags(vcpu, regs->rflags);
  2646. kvm_x86_ops->decache_regs(vcpu);
  2647. vcpu->arch.exception.pending = false;
  2648. vcpu_put(vcpu);
  2649. return 0;
  2650. }
  2651. static void get_segment(struct kvm_vcpu *vcpu,
  2652. struct kvm_segment *var, int seg)
  2653. {
  2654. kvm_x86_ops->get_segment(vcpu, var, seg);
  2655. }
  2656. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  2657. {
  2658. struct kvm_segment cs;
  2659. get_segment(vcpu, &cs, VCPU_SREG_CS);
  2660. *db = cs.db;
  2661. *l = cs.l;
  2662. }
  2663. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  2664. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  2665. struct kvm_sregs *sregs)
  2666. {
  2667. struct descriptor_table dt;
  2668. int pending_vec;
  2669. vcpu_load(vcpu);
  2670. get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  2671. get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  2672. get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  2673. get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  2674. get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  2675. get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  2676. get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  2677. get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  2678. kvm_x86_ops->get_idt(vcpu, &dt);
  2679. sregs->idt.limit = dt.limit;
  2680. sregs->idt.base = dt.base;
  2681. kvm_x86_ops->get_gdt(vcpu, &dt);
  2682. sregs->gdt.limit = dt.limit;
  2683. sregs->gdt.base = dt.base;
  2684. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  2685. sregs->cr0 = vcpu->arch.cr0;
  2686. sregs->cr2 = vcpu->arch.cr2;
  2687. sregs->cr3 = vcpu->arch.cr3;
  2688. sregs->cr4 = vcpu->arch.cr4;
  2689. sregs->cr8 = kvm_get_cr8(vcpu);
  2690. sregs->efer = vcpu->arch.shadow_efer;
  2691. sregs->apic_base = kvm_get_apic_base(vcpu);
  2692. if (irqchip_in_kernel(vcpu->kvm)) {
  2693. memset(sregs->interrupt_bitmap, 0,
  2694. sizeof sregs->interrupt_bitmap);
  2695. pending_vec = kvm_x86_ops->get_irq(vcpu);
  2696. if (pending_vec >= 0)
  2697. set_bit(pending_vec,
  2698. (unsigned long *)sregs->interrupt_bitmap);
  2699. } else
  2700. memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
  2701. sizeof sregs->interrupt_bitmap);
  2702. vcpu_put(vcpu);
  2703. return 0;
  2704. }
  2705. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  2706. struct kvm_mp_state *mp_state)
  2707. {
  2708. vcpu_load(vcpu);
  2709. mp_state->mp_state = vcpu->arch.mp_state;
  2710. vcpu_put(vcpu);
  2711. return 0;
  2712. }
  2713. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  2714. struct kvm_mp_state *mp_state)
  2715. {
  2716. vcpu_load(vcpu);
  2717. vcpu->arch.mp_state = mp_state->mp_state;
  2718. vcpu_put(vcpu);
  2719. return 0;
  2720. }
  2721. static void set_segment(struct kvm_vcpu *vcpu,
  2722. struct kvm_segment *var, int seg)
  2723. {
  2724. kvm_x86_ops->set_segment(vcpu, var, seg);
  2725. }
  2726. static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
  2727. struct kvm_segment *kvm_desct)
  2728. {
  2729. kvm_desct->base = seg_desc->base0;
  2730. kvm_desct->base |= seg_desc->base1 << 16;
  2731. kvm_desct->base |= seg_desc->base2 << 24;
  2732. kvm_desct->limit = seg_desc->limit0;
  2733. kvm_desct->limit |= seg_desc->limit << 16;
  2734. kvm_desct->selector = selector;
  2735. kvm_desct->type = seg_desc->type;
  2736. kvm_desct->present = seg_desc->p;
  2737. kvm_desct->dpl = seg_desc->dpl;
  2738. kvm_desct->db = seg_desc->d;
  2739. kvm_desct->s = seg_desc->s;
  2740. kvm_desct->l = seg_desc->l;
  2741. kvm_desct->g = seg_desc->g;
  2742. kvm_desct->avl = seg_desc->avl;
  2743. if (!selector)
  2744. kvm_desct->unusable = 1;
  2745. else
  2746. kvm_desct->unusable = 0;
  2747. kvm_desct->padding = 0;
  2748. }
  2749. static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
  2750. u16 selector,
  2751. struct descriptor_table *dtable)
  2752. {
  2753. if (selector & 1 << 2) {
  2754. struct kvm_segment kvm_seg;
  2755. get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
  2756. if (kvm_seg.unusable)
  2757. dtable->limit = 0;
  2758. else
  2759. dtable->limit = kvm_seg.limit;
  2760. dtable->base = kvm_seg.base;
  2761. }
  2762. else
  2763. kvm_x86_ops->get_gdt(vcpu, dtable);
  2764. }
  2765. /* allowed just for 8 bytes segments */
  2766. static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  2767. struct desc_struct *seg_desc)
  2768. {
  2769. struct descriptor_table dtable;
  2770. u16 index = selector >> 3;
  2771. get_segment_descritptor_dtable(vcpu, selector, &dtable);
  2772. if (dtable.limit < index * 8 + 7) {
  2773. kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
  2774. return 1;
  2775. }
  2776. return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
  2777. }
  2778. /* allowed just for 8 bytes segments */
  2779. static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  2780. struct desc_struct *seg_desc)
  2781. {
  2782. struct descriptor_table dtable;
  2783. u16 index = selector >> 3;
  2784. get_segment_descritptor_dtable(vcpu, selector, &dtable);
  2785. if (dtable.limit < index * 8 + 7)
  2786. return 1;
  2787. return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
  2788. }
  2789. static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
  2790. struct desc_struct *seg_desc)
  2791. {
  2792. u32 base_addr;
  2793. base_addr = seg_desc->base0;
  2794. base_addr |= (seg_desc->base1 << 16);
  2795. base_addr |= (seg_desc->base2 << 24);
  2796. return base_addr;
  2797. }
  2798. static int load_tss_segment32(struct kvm_vcpu *vcpu,
  2799. struct desc_struct *seg_desc,
  2800. struct tss_segment_32 *tss)
  2801. {
  2802. u32 base_addr;
  2803. base_addr = get_tss_base_addr(vcpu, seg_desc);
  2804. return kvm_read_guest(vcpu->kvm, base_addr, tss,
  2805. sizeof(struct tss_segment_32));
  2806. }
  2807. static int save_tss_segment32(struct kvm_vcpu *vcpu,
  2808. struct desc_struct *seg_desc,
  2809. struct tss_segment_32 *tss)
  2810. {
  2811. u32 base_addr;
  2812. base_addr = get_tss_base_addr(vcpu, seg_desc);
  2813. return kvm_write_guest(vcpu->kvm, base_addr, tss,
  2814. sizeof(struct tss_segment_32));
  2815. }
  2816. static int load_tss_segment16(struct kvm_vcpu *vcpu,
  2817. struct desc_struct *seg_desc,
  2818. struct tss_segment_16 *tss)
  2819. {
  2820. u32 base_addr;
  2821. base_addr = get_tss_base_addr(vcpu, seg_desc);
  2822. return kvm_read_guest(vcpu->kvm, base_addr, tss,
  2823. sizeof(struct tss_segment_16));
  2824. }
  2825. static int save_tss_segment16(struct kvm_vcpu *vcpu,
  2826. struct desc_struct *seg_desc,
  2827. struct tss_segment_16 *tss)
  2828. {
  2829. u32 base_addr;
  2830. base_addr = get_tss_base_addr(vcpu, seg_desc);
  2831. return kvm_write_guest(vcpu->kvm, base_addr, tss,
  2832. sizeof(struct tss_segment_16));
  2833. }
  2834. static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
  2835. {
  2836. struct kvm_segment kvm_seg;
  2837. get_segment(vcpu, &kvm_seg, seg);
  2838. return kvm_seg.selector;
  2839. }
  2840. static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
  2841. u16 selector,
  2842. struct kvm_segment *kvm_seg)
  2843. {
  2844. struct desc_struct seg_desc;
  2845. if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
  2846. return 1;
  2847. seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
  2848. return 0;
  2849. }
  2850. static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  2851. int type_bits, int seg)
  2852. {
  2853. struct kvm_segment kvm_seg;
  2854. if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
  2855. return 1;
  2856. kvm_seg.type |= type_bits;
  2857. if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
  2858. seg != VCPU_SREG_LDTR)
  2859. if (!kvm_seg.s)
  2860. kvm_seg.unusable = 1;
  2861. set_segment(vcpu, &kvm_seg, seg);
  2862. return 0;
  2863. }
  2864. static void save_state_to_tss32(struct kvm_vcpu *vcpu,
  2865. struct tss_segment_32 *tss)
  2866. {
  2867. tss->cr3 = vcpu->arch.cr3;
  2868. tss->eip = vcpu->arch.rip;
  2869. tss->eflags = kvm_x86_ops->get_rflags(vcpu);
  2870. tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
  2871. tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
  2872. tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
  2873. tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
  2874. tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
  2875. tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
  2876. tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
  2877. tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
  2878. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  2879. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  2880. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  2881. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  2882. tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
  2883. tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
  2884. tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  2885. tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
  2886. }
  2887. static int load_state_from_tss32(struct kvm_vcpu *vcpu,
  2888. struct tss_segment_32 *tss)
  2889. {
  2890. kvm_set_cr3(vcpu, tss->cr3);
  2891. vcpu->arch.rip = tss->eip;
  2892. kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
  2893. vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
  2894. vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
  2895. vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
  2896. vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
  2897. vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
  2898. vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
  2899. vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
  2900. vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
  2901. if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
  2902. return 1;
  2903. if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  2904. return 1;
  2905. if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  2906. return 1;
  2907. if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  2908. return 1;
  2909. if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  2910. return 1;
  2911. if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
  2912. return 1;
  2913. if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
  2914. return 1;
  2915. return 0;
  2916. }
  2917. static void save_state_to_tss16(struct kvm_vcpu *vcpu,
  2918. struct tss_segment_16 *tss)
  2919. {
  2920. tss->ip = vcpu->arch.rip;
  2921. tss->flag = kvm_x86_ops->get_rflags(vcpu);
  2922. tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
  2923. tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
  2924. tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
  2925. tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
  2926. tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
  2927. tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
  2928. tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
  2929. tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
  2930. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  2931. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  2932. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  2933. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  2934. tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  2935. tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
  2936. }
  2937. static int load_state_from_tss16(struct kvm_vcpu *vcpu,
  2938. struct tss_segment_16 *tss)
  2939. {
  2940. vcpu->arch.rip = tss->ip;
  2941. kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
  2942. vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
  2943. vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
  2944. vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
  2945. vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
  2946. vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
  2947. vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
  2948. vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
  2949. vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
  2950. if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
  2951. return 1;
  2952. if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  2953. return 1;
  2954. if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  2955. return 1;
  2956. if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  2957. return 1;
  2958. if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  2959. return 1;
  2960. return 0;
  2961. }
  2962. static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
  2963. struct desc_struct *cseg_desc,
  2964. struct desc_struct *nseg_desc)
  2965. {
  2966. struct tss_segment_16 tss_segment_16;
  2967. int ret = 0;
  2968. if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
  2969. goto out;
  2970. save_state_to_tss16(vcpu, &tss_segment_16);
  2971. save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
  2972. if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
  2973. goto out;
  2974. if (load_state_from_tss16(vcpu, &tss_segment_16))
  2975. goto out;
  2976. ret = 1;
  2977. out:
  2978. return ret;
  2979. }
  2980. static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
  2981. struct desc_struct *cseg_desc,
  2982. struct desc_struct *nseg_desc)
  2983. {
  2984. struct tss_segment_32 tss_segment_32;
  2985. int ret = 0;
  2986. if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
  2987. goto out;
  2988. save_state_to_tss32(vcpu, &tss_segment_32);
  2989. save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
  2990. if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
  2991. goto out;
  2992. if (load_state_from_tss32(vcpu, &tss_segment_32))
  2993. goto out;
  2994. ret = 1;
  2995. out:
  2996. return ret;
  2997. }
  2998. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
  2999. {
  3000. struct kvm_segment tr_seg;
  3001. struct desc_struct cseg_desc;
  3002. struct desc_struct nseg_desc;
  3003. int ret = 0;
  3004. get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
  3005. if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
  3006. goto out;
  3007. if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
  3008. goto out;
  3009. if (reason != TASK_SWITCH_IRET) {
  3010. int cpl;
  3011. cpl = kvm_x86_ops->get_cpl(vcpu);
  3012. if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
  3013. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  3014. return 1;
  3015. }
  3016. }
  3017. if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
  3018. kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
  3019. return 1;
  3020. }
  3021. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  3022. cseg_desc.type &= ~(1 << 1); //clear the B flag
  3023. save_guest_segment_descriptor(vcpu, tr_seg.selector,
  3024. &cseg_desc);
  3025. }
  3026. if (reason == TASK_SWITCH_IRET) {
  3027. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3028. kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
  3029. }
  3030. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3031. kvm_x86_ops->cache_regs(vcpu);
  3032. if (nseg_desc.type & 8)
  3033. ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
  3034. &nseg_desc);
  3035. else
  3036. ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
  3037. &nseg_desc);
  3038. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
  3039. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3040. kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
  3041. }
  3042. if (reason != TASK_SWITCH_IRET) {
  3043. nseg_desc.type |= (1 << 1);
  3044. save_guest_segment_descriptor(vcpu, tss_selector,
  3045. &nseg_desc);
  3046. }
  3047. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
  3048. seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
  3049. tr_seg.type = 11;
  3050. set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
  3051. out:
  3052. kvm_x86_ops->decache_regs(vcpu);
  3053. return ret;
  3054. }
  3055. EXPORT_SYMBOL_GPL(kvm_task_switch);
  3056. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  3057. struct kvm_sregs *sregs)
  3058. {
  3059. int mmu_reset_needed = 0;
  3060. int i, pending_vec, max_bits;
  3061. struct descriptor_table dt;
  3062. vcpu_load(vcpu);
  3063. dt.limit = sregs->idt.limit;
  3064. dt.base = sregs->idt.base;
  3065. kvm_x86_ops->set_idt(vcpu, &dt);
  3066. dt.limit = sregs->gdt.limit;
  3067. dt.base = sregs->gdt.base;
  3068. kvm_x86_ops->set_gdt(vcpu, &dt);
  3069. vcpu->arch.cr2 = sregs->cr2;
  3070. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  3071. vcpu->arch.cr3 = sregs->cr3;
  3072. kvm_set_cr8(vcpu, sregs->cr8);
  3073. mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
  3074. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  3075. kvm_set_apic_base(vcpu, sregs->apic_base);
  3076. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  3077. mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
  3078. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  3079. vcpu->arch.cr0 = sregs->cr0;
  3080. mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
  3081. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  3082. if (!is_long_mode(vcpu) && is_pae(vcpu))
  3083. load_pdptrs(vcpu, vcpu->arch.cr3);
  3084. if (mmu_reset_needed)
  3085. kvm_mmu_reset_context(vcpu);
  3086. if (!irqchip_in_kernel(vcpu->kvm)) {
  3087. memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
  3088. sizeof vcpu->arch.irq_pending);
  3089. vcpu->arch.irq_summary = 0;
  3090. for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
  3091. if (vcpu->arch.irq_pending[i])
  3092. __set_bit(i, &vcpu->arch.irq_summary);
  3093. } else {
  3094. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  3095. pending_vec = find_first_bit(
  3096. (const unsigned long *)sregs->interrupt_bitmap,
  3097. max_bits);
  3098. /* Only pending external irq is handled here */
  3099. if (pending_vec < max_bits) {
  3100. kvm_x86_ops->set_irq(vcpu, pending_vec);
  3101. pr_debug("Set back pending irq %d\n",
  3102. pending_vec);
  3103. }
  3104. }
  3105. set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  3106. set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  3107. set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  3108. set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  3109. set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  3110. set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  3111. set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  3112. set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  3113. vcpu_put(vcpu);
  3114. return 0;
  3115. }
  3116. int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
  3117. struct kvm_debug_guest *dbg)
  3118. {
  3119. int r;
  3120. vcpu_load(vcpu);
  3121. r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
  3122. vcpu_put(vcpu);
  3123. return r;
  3124. }
  3125. /*
  3126. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  3127. * we have asm/x86/processor.h
  3128. */
  3129. struct fxsave {
  3130. u16 cwd;
  3131. u16 swd;
  3132. u16 twd;
  3133. u16 fop;
  3134. u64 rip;
  3135. u64 rdp;
  3136. u32 mxcsr;
  3137. u32 mxcsr_mask;
  3138. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  3139. #ifdef CONFIG_X86_64
  3140. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  3141. #else
  3142. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  3143. #endif
  3144. };
  3145. /*
  3146. * Translate a guest virtual address to a guest physical address.
  3147. */
  3148. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  3149. struct kvm_translation *tr)
  3150. {
  3151. unsigned long vaddr = tr->linear_address;
  3152. gpa_t gpa;
  3153. vcpu_load(vcpu);
  3154. down_read(&vcpu->kvm->slots_lock);
  3155. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
  3156. up_read(&vcpu->kvm->slots_lock);
  3157. tr->physical_address = gpa;
  3158. tr->valid = gpa != UNMAPPED_GVA;
  3159. tr->writeable = 1;
  3160. tr->usermode = 0;
  3161. vcpu_put(vcpu);
  3162. return 0;
  3163. }
  3164. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3165. {
  3166. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3167. vcpu_load(vcpu);
  3168. memcpy(fpu->fpr, fxsave->st_space, 128);
  3169. fpu->fcw = fxsave->cwd;
  3170. fpu->fsw = fxsave->swd;
  3171. fpu->ftwx = fxsave->twd;
  3172. fpu->last_opcode = fxsave->fop;
  3173. fpu->last_ip = fxsave->rip;
  3174. fpu->last_dp = fxsave->rdp;
  3175. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  3176. vcpu_put(vcpu);
  3177. return 0;
  3178. }
  3179. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3180. {
  3181. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3182. vcpu_load(vcpu);
  3183. memcpy(fxsave->st_space, fpu->fpr, 128);
  3184. fxsave->cwd = fpu->fcw;
  3185. fxsave->swd = fpu->fsw;
  3186. fxsave->twd = fpu->ftwx;
  3187. fxsave->fop = fpu->last_opcode;
  3188. fxsave->rip = fpu->last_ip;
  3189. fxsave->rdp = fpu->last_dp;
  3190. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  3191. vcpu_put(vcpu);
  3192. return 0;
  3193. }
  3194. void fx_init(struct kvm_vcpu *vcpu)
  3195. {
  3196. unsigned after_mxcsr_mask;
  3197. /*
  3198. * Touch the fpu the first time in non atomic context as if
  3199. * this is the first fpu instruction the exception handler
  3200. * will fire before the instruction returns and it'll have to
  3201. * allocate ram with GFP_KERNEL.
  3202. */
  3203. if (!used_math())
  3204. fx_save(&vcpu->arch.host_fx_image);
  3205. /* Initialize guest FPU by resetting ours and saving into guest's */
  3206. preempt_disable();
  3207. fx_save(&vcpu->arch.host_fx_image);
  3208. fx_finit();
  3209. fx_save(&vcpu->arch.guest_fx_image);
  3210. fx_restore(&vcpu->arch.host_fx_image);
  3211. preempt_enable();
  3212. vcpu->arch.cr0 |= X86_CR0_ET;
  3213. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  3214. vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
  3215. memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
  3216. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  3217. }
  3218. EXPORT_SYMBOL_GPL(fx_init);
  3219. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  3220. {
  3221. if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
  3222. return;
  3223. vcpu->guest_fpu_loaded = 1;
  3224. fx_save(&vcpu->arch.host_fx_image);
  3225. fx_restore(&vcpu->arch.guest_fx_image);
  3226. }
  3227. EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
  3228. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  3229. {
  3230. if (!vcpu->guest_fpu_loaded)
  3231. return;
  3232. vcpu->guest_fpu_loaded = 0;
  3233. fx_save(&vcpu->arch.guest_fx_image);
  3234. fx_restore(&vcpu->arch.host_fx_image);
  3235. ++vcpu->stat.fpu_reload;
  3236. }
  3237. EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
  3238. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  3239. {
  3240. kvm_x86_ops->vcpu_free(vcpu);
  3241. }
  3242. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  3243. unsigned int id)
  3244. {
  3245. return kvm_x86_ops->vcpu_create(kvm, id);
  3246. }
  3247. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  3248. {
  3249. int r;
  3250. /* We do fxsave: this must be aligned. */
  3251. BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
  3252. vcpu_load(vcpu);
  3253. r = kvm_arch_vcpu_reset(vcpu);
  3254. if (r == 0)
  3255. r = kvm_mmu_setup(vcpu);
  3256. vcpu_put(vcpu);
  3257. if (r < 0)
  3258. goto free_vcpu;
  3259. return 0;
  3260. free_vcpu:
  3261. kvm_x86_ops->vcpu_free(vcpu);
  3262. return r;
  3263. }
  3264. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  3265. {
  3266. vcpu_load(vcpu);
  3267. kvm_mmu_unload(vcpu);
  3268. vcpu_put(vcpu);
  3269. kvm_x86_ops->vcpu_free(vcpu);
  3270. }
  3271. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  3272. {
  3273. return kvm_x86_ops->vcpu_reset(vcpu);
  3274. }
  3275. void kvm_arch_hardware_enable(void *garbage)
  3276. {
  3277. kvm_x86_ops->hardware_enable(garbage);
  3278. }
  3279. void kvm_arch_hardware_disable(void *garbage)
  3280. {
  3281. kvm_x86_ops->hardware_disable(garbage);
  3282. }
  3283. int kvm_arch_hardware_setup(void)
  3284. {
  3285. return kvm_x86_ops->hardware_setup();
  3286. }
  3287. void kvm_arch_hardware_unsetup(void)
  3288. {
  3289. kvm_x86_ops->hardware_unsetup();
  3290. }
  3291. void kvm_arch_check_processor_compat(void *rtn)
  3292. {
  3293. kvm_x86_ops->check_processor_compatibility(rtn);
  3294. }
  3295. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  3296. {
  3297. struct page *page;
  3298. struct kvm *kvm;
  3299. int r;
  3300. BUG_ON(vcpu->kvm == NULL);
  3301. kvm = vcpu->kvm;
  3302. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  3303. if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
  3304. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3305. else
  3306. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  3307. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  3308. if (!page) {
  3309. r = -ENOMEM;
  3310. goto fail;
  3311. }
  3312. vcpu->arch.pio_data = page_address(page);
  3313. r = kvm_mmu_create(vcpu);
  3314. if (r < 0)
  3315. goto fail_free_pio_data;
  3316. if (irqchip_in_kernel(kvm)) {
  3317. r = kvm_create_lapic(vcpu);
  3318. if (r < 0)
  3319. goto fail_mmu_destroy;
  3320. }
  3321. return 0;
  3322. fail_mmu_destroy:
  3323. kvm_mmu_destroy(vcpu);
  3324. fail_free_pio_data:
  3325. free_page((unsigned long)vcpu->arch.pio_data);
  3326. fail:
  3327. return r;
  3328. }
  3329. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  3330. {
  3331. kvm_free_lapic(vcpu);
  3332. down_read(&vcpu->kvm->slots_lock);
  3333. kvm_mmu_destroy(vcpu);
  3334. up_read(&vcpu->kvm->slots_lock);
  3335. free_page((unsigned long)vcpu->arch.pio_data);
  3336. }
  3337. struct kvm *kvm_arch_create_vm(void)
  3338. {
  3339. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  3340. if (!kvm)
  3341. return ERR_PTR(-ENOMEM);
  3342. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  3343. return kvm;
  3344. }
  3345. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  3346. {
  3347. vcpu_load(vcpu);
  3348. kvm_mmu_unload(vcpu);
  3349. vcpu_put(vcpu);
  3350. }
  3351. static void kvm_free_vcpus(struct kvm *kvm)
  3352. {
  3353. unsigned int i;
  3354. /*
  3355. * Unpin any mmu pages first.
  3356. */
  3357. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  3358. if (kvm->vcpus[i])
  3359. kvm_unload_vcpu_mmu(kvm->vcpus[i]);
  3360. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  3361. if (kvm->vcpus[i]) {
  3362. kvm_arch_vcpu_free(kvm->vcpus[i]);
  3363. kvm->vcpus[i] = NULL;
  3364. }
  3365. }
  3366. }
  3367. void kvm_arch_destroy_vm(struct kvm *kvm)
  3368. {
  3369. kvm_free_pit(kvm);
  3370. kfree(kvm->arch.vpic);
  3371. kfree(kvm->arch.vioapic);
  3372. kvm_free_vcpus(kvm);
  3373. kvm_free_physmem(kvm);
  3374. if (kvm->arch.apic_access_page)
  3375. put_page(kvm->arch.apic_access_page);
  3376. if (kvm->arch.ept_identity_pagetable)
  3377. put_page(kvm->arch.ept_identity_pagetable);
  3378. kfree(kvm);
  3379. }
  3380. int kvm_arch_set_memory_region(struct kvm *kvm,
  3381. struct kvm_userspace_memory_region *mem,
  3382. struct kvm_memory_slot old,
  3383. int user_alloc)
  3384. {
  3385. int npages = mem->memory_size >> PAGE_SHIFT;
  3386. struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
  3387. /*To keep backward compatibility with older userspace,
  3388. *x86 needs to hanlde !user_alloc case.
  3389. */
  3390. if (!user_alloc) {
  3391. if (npages && !old.rmap) {
  3392. down_write(&current->mm->mmap_sem);
  3393. memslot->userspace_addr = do_mmap(NULL, 0,
  3394. npages * PAGE_SIZE,
  3395. PROT_READ | PROT_WRITE,
  3396. MAP_SHARED | MAP_ANONYMOUS,
  3397. 0);
  3398. up_write(&current->mm->mmap_sem);
  3399. if (IS_ERR((void *)memslot->userspace_addr))
  3400. return PTR_ERR((void *)memslot->userspace_addr);
  3401. } else {
  3402. if (!old.user_alloc && old.rmap) {
  3403. int ret;
  3404. down_write(&current->mm->mmap_sem);
  3405. ret = do_munmap(current->mm, old.userspace_addr,
  3406. old.npages * PAGE_SIZE);
  3407. up_write(&current->mm->mmap_sem);
  3408. if (ret < 0)
  3409. printk(KERN_WARNING
  3410. "kvm_vm_ioctl_set_memory_region: "
  3411. "failed to munmap memory\n");
  3412. }
  3413. }
  3414. }
  3415. if (!kvm->arch.n_requested_mmu_pages) {
  3416. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  3417. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  3418. }
  3419. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  3420. kvm_flush_remote_tlbs(kvm);
  3421. return 0;
  3422. }
  3423. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  3424. {
  3425. return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
  3426. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
  3427. }
  3428. static void vcpu_kick_intr(void *info)
  3429. {
  3430. #ifdef DEBUG
  3431. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
  3432. printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
  3433. #endif
  3434. }
  3435. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  3436. {
  3437. int ipi_pcpu = vcpu->cpu;
  3438. int cpu = get_cpu();
  3439. if (waitqueue_active(&vcpu->wq)) {
  3440. wake_up_interruptible(&vcpu->wq);
  3441. ++vcpu->stat.halt_wakeup;
  3442. }
  3443. /*
  3444. * We may be called synchronously with irqs disabled in guest mode,
  3445. * So need not to call smp_call_function_single() in that case.
  3446. */
  3447. if (vcpu->guest_mode && vcpu->cpu != cpu)
  3448. smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
  3449. put_cpu();
  3450. }