kvm_main.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. *
  14. * This work is licensed under the terms of the GNU GPL, version 2. See
  15. * the COPYING file in the top-level directory.
  16. *
  17. */
  18. #include <kvm/iodev.h>
  19. #include <linux/kvm_host.h>
  20. #include <linux/kvm.h>
  21. #include <linux/module.h>
  22. #include <linux/errno.h>
  23. #include <linux/percpu.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/syscore_ops.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched/signal.h>
  34. #include <linux/sched/mm.h>
  35. #include <linux/sched/stat.h>
  36. #include <linux/cpumask.h>
  37. #include <linux/smp.h>
  38. #include <linux/anon_inodes.h>
  39. #include <linux/profile.h>
  40. #include <linux/kvm_para.h>
  41. #include <linux/pagemap.h>
  42. #include <linux/mman.h>
  43. #include <linux/swap.h>
  44. #include <linux/bitops.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/compat.h>
  47. #include <linux/srcu.h>
  48. #include <linux/hugetlb.h>
  49. #include <linux/slab.h>
  50. #include <linux/sort.h>
  51. #include <linux/bsearch.h>
  52. #include <asm/processor.h>
  53. #include <asm/io.h>
  54. #include <asm/ioctl.h>
  55. #include <linux/uaccess.h>
  56. #include <asm/pgtable.h>
  57. #include "coalesced_mmio.h"
  58. #include "async_pf.h"
  59. #include "vfio.h"
  60. #define CREATE_TRACE_POINTS
  61. #include <trace/events/kvm.h>
  62. /* Worst case buffer size needed for holding an integer. */
  63. #define ITOA_MAX_LEN 12
  64. MODULE_AUTHOR("Qumranet");
  65. MODULE_LICENSE("GPL");
  66. /* Architectures should define their poll value according to the halt latency */
  67. unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
  68. module_param(halt_poll_ns, uint, 0644);
  69. EXPORT_SYMBOL_GPL(halt_poll_ns);
  70. /* Default doubles per-vcpu halt_poll_ns. */
  71. unsigned int halt_poll_ns_grow = 2;
  72. module_param(halt_poll_ns_grow, uint, 0644);
  73. EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
  74. /* Default resets per-vcpu halt_poll_ns . */
  75. unsigned int halt_poll_ns_shrink;
  76. module_param(halt_poll_ns_shrink, uint, 0644);
  77. EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
  78. /*
  79. * Ordering of locks:
  80. *
  81. * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  82. */
  83. DEFINE_SPINLOCK(kvm_lock);
  84. static DEFINE_RAW_SPINLOCK(kvm_count_lock);
  85. LIST_HEAD(vm_list);
  86. static cpumask_var_t cpus_hardware_enabled;
  87. static int kvm_usage_count;
  88. static atomic_t hardware_enable_failed;
  89. struct kmem_cache *kvm_vcpu_cache;
  90. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  91. static __read_mostly struct preempt_ops kvm_preempt_ops;
  92. struct dentry *kvm_debugfs_dir;
  93. EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
  94. static int kvm_debugfs_num_entries;
  95. static const struct file_operations *stat_fops_per_vm[];
  96. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  97. unsigned long arg);
  98. #ifdef CONFIG_KVM_COMPAT
  99. static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
  100. unsigned long arg);
  101. #endif
  102. static int hardware_enable_all(void);
  103. static void hardware_disable_all(void);
  104. static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  105. static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
  106. __visible bool kvm_rebooting;
  107. EXPORT_SYMBOL_GPL(kvm_rebooting);
  108. static bool largepages_enabled = true;
  109. #define KVM_EVENT_CREATE_VM 0
  110. #define KVM_EVENT_DESTROY_VM 1
  111. static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
  112. static unsigned long long kvm_createvm_count;
  113. static unsigned long long kvm_active_vms;
  114. __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
  115. unsigned long start, unsigned long end)
  116. {
  117. }
  118. bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
  119. {
  120. if (pfn_valid(pfn))
  121. return PageReserved(pfn_to_page(pfn));
  122. return true;
  123. }
  124. /*
  125. * Switches to specified vcpu, until a matching vcpu_put()
  126. */
  127. void vcpu_load(struct kvm_vcpu *vcpu)
  128. {
  129. int cpu = get_cpu();
  130. preempt_notifier_register(&vcpu->preempt_notifier);
  131. kvm_arch_vcpu_load(vcpu, cpu);
  132. put_cpu();
  133. }
  134. EXPORT_SYMBOL_GPL(vcpu_load);
  135. void vcpu_put(struct kvm_vcpu *vcpu)
  136. {
  137. preempt_disable();
  138. kvm_arch_vcpu_put(vcpu);
  139. preempt_notifier_unregister(&vcpu->preempt_notifier);
  140. preempt_enable();
  141. }
  142. EXPORT_SYMBOL_GPL(vcpu_put);
  143. /* TODO: merge with kvm_arch_vcpu_should_kick */
  144. static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
  145. {
  146. int mode = kvm_vcpu_exiting_guest_mode(vcpu);
  147. /*
  148. * We need to wait for the VCPU to reenable interrupts and get out of
  149. * READING_SHADOW_PAGE_TABLES mode.
  150. */
  151. if (req & KVM_REQUEST_WAIT)
  152. return mode != OUTSIDE_GUEST_MODE;
  153. /*
  154. * Need to kick a running VCPU, but otherwise there is nothing to do.
  155. */
  156. return mode == IN_GUEST_MODE;
  157. }
  158. static void ack_flush(void *_completed)
  159. {
  160. }
  161. static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
  162. {
  163. if (unlikely(!cpus))
  164. cpus = cpu_online_mask;
  165. if (cpumask_empty(cpus))
  166. return false;
  167. smp_call_function_many(cpus, ack_flush, NULL, wait);
  168. return true;
  169. }
  170. bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
  171. {
  172. int i, cpu, me;
  173. cpumask_var_t cpus;
  174. bool called;
  175. struct kvm_vcpu *vcpu;
  176. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  177. me = get_cpu();
  178. kvm_for_each_vcpu(i, vcpu, kvm) {
  179. kvm_make_request(req, vcpu);
  180. cpu = vcpu->cpu;
  181. if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
  182. continue;
  183. if (cpus != NULL && cpu != -1 && cpu != me &&
  184. kvm_request_needs_ipi(vcpu, req))
  185. __cpumask_set_cpu(cpu, cpus);
  186. }
  187. called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
  188. put_cpu();
  189. free_cpumask_var(cpus);
  190. return called;
  191. }
  192. #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
  193. void kvm_flush_remote_tlbs(struct kvm *kvm)
  194. {
  195. /*
  196. * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
  197. * kvm_make_all_cpus_request.
  198. */
  199. long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
  200. /*
  201. * We want to publish modifications to the page tables before reading
  202. * mode. Pairs with a memory barrier in arch-specific code.
  203. * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
  204. * and smp_mb in walk_shadow_page_lockless_begin/end.
  205. * - powerpc: smp_mb in kvmppc_prepare_to_enter.
  206. *
  207. * There is already an smp_mb__after_atomic() before
  208. * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
  209. * barrier here.
  210. */
  211. if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  212. ++kvm->stat.remote_tlb_flush;
  213. cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
  214. }
  215. EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
  216. #endif
  217. void kvm_reload_remote_mmus(struct kvm *kvm)
  218. {
  219. kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  220. }
  221. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  222. {
  223. struct page *page;
  224. int r;
  225. mutex_init(&vcpu->mutex);
  226. vcpu->cpu = -1;
  227. vcpu->kvm = kvm;
  228. vcpu->vcpu_id = id;
  229. vcpu->pid = NULL;
  230. init_swait_queue_head(&vcpu->wq);
  231. kvm_async_pf_vcpu_init(vcpu);
  232. vcpu->pre_pcpu = -1;
  233. INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
  234. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  235. if (!page) {
  236. r = -ENOMEM;
  237. goto fail;
  238. }
  239. vcpu->run = page_address(page);
  240. kvm_vcpu_set_in_spin_loop(vcpu, false);
  241. kvm_vcpu_set_dy_eligible(vcpu, false);
  242. vcpu->preempted = false;
  243. r = kvm_arch_vcpu_init(vcpu);
  244. if (r < 0)
  245. goto fail_free_run;
  246. return 0;
  247. fail_free_run:
  248. free_page((unsigned long)vcpu->run);
  249. fail:
  250. return r;
  251. }
  252. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  253. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  254. {
  255. /*
  256. * no need for rcu_read_lock as VCPU_RUN is the only place that
  257. * will change the vcpu->pid pointer and on uninit all file
  258. * descriptors are already gone.
  259. */
  260. put_pid(rcu_dereference_protected(vcpu->pid, 1));
  261. kvm_arch_vcpu_uninit(vcpu);
  262. free_page((unsigned long)vcpu->run);
  263. }
  264. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  265. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  266. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  267. {
  268. return container_of(mn, struct kvm, mmu_notifier);
  269. }
  270. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  271. struct mm_struct *mm,
  272. unsigned long address,
  273. pte_t pte)
  274. {
  275. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  276. int idx;
  277. idx = srcu_read_lock(&kvm->srcu);
  278. spin_lock(&kvm->mmu_lock);
  279. kvm->mmu_notifier_seq++;
  280. kvm_set_spte_hva(kvm, address, pte);
  281. spin_unlock(&kvm->mmu_lock);
  282. srcu_read_unlock(&kvm->srcu, idx);
  283. }
  284. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  285. struct mm_struct *mm,
  286. unsigned long start,
  287. unsigned long end)
  288. {
  289. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  290. int need_tlb_flush = 0, idx;
  291. idx = srcu_read_lock(&kvm->srcu);
  292. spin_lock(&kvm->mmu_lock);
  293. /*
  294. * The count increase must become visible at unlock time as no
  295. * spte can be established without taking the mmu_lock and
  296. * count is also read inside the mmu_lock critical section.
  297. */
  298. kvm->mmu_notifier_count++;
  299. need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
  300. need_tlb_flush |= kvm->tlbs_dirty;
  301. /* we've to flush the tlb before the pages can be freed */
  302. if (need_tlb_flush)
  303. kvm_flush_remote_tlbs(kvm);
  304. spin_unlock(&kvm->mmu_lock);
  305. kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
  306. srcu_read_unlock(&kvm->srcu, idx);
  307. }
  308. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  309. struct mm_struct *mm,
  310. unsigned long start,
  311. unsigned long end)
  312. {
  313. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  314. spin_lock(&kvm->mmu_lock);
  315. /*
  316. * This sequence increase will notify the kvm page fault that
  317. * the page that is going to be mapped in the spte could have
  318. * been freed.
  319. */
  320. kvm->mmu_notifier_seq++;
  321. smp_wmb();
  322. /*
  323. * The above sequence increase must be visible before the
  324. * below count decrease, which is ensured by the smp_wmb above
  325. * in conjunction with the smp_rmb in mmu_notifier_retry().
  326. */
  327. kvm->mmu_notifier_count--;
  328. spin_unlock(&kvm->mmu_lock);
  329. BUG_ON(kvm->mmu_notifier_count < 0);
  330. }
  331. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  332. struct mm_struct *mm,
  333. unsigned long start,
  334. unsigned long end)
  335. {
  336. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  337. int young, idx;
  338. idx = srcu_read_lock(&kvm->srcu);
  339. spin_lock(&kvm->mmu_lock);
  340. young = kvm_age_hva(kvm, start, end);
  341. if (young)
  342. kvm_flush_remote_tlbs(kvm);
  343. spin_unlock(&kvm->mmu_lock);
  344. srcu_read_unlock(&kvm->srcu, idx);
  345. return young;
  346. }
  347. static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
  348. struct mm_struct *mm,
  349. unsigned long start,
  350. unsigned long end)
  351. {
  352. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  353. int young, idx;
  354. idx = srcu_read_lock(&kvm->srcu);
  355. spin_lock(&kvm->mmu_lock);
  356. /*
  357. * Even though we do not flush TLB, this will still adversely
  358. * affect performance on pre-Haswell Intel EPT, where there is
  359. * no EPT Access Bit to clear so that we have to tear down EPT
  360. * tables instead. If we find this unacceptable, we can always
  361. * add a parameter to kvm_age_hva so that it effectively doesn't
  362. * do anything on clear_young.
  363. *
  364. * Also note that currently we never issue secondary TLB flushes
  365. * from clear_young, leaving this job up to the regular system
  366. * cadence. If we find this inaccurate, we might come up with a
  367. * more sophisticated heuristic later.
  368. */
  369. young = kvm_age_hva(kvm, start, end);
  370. spin_unlock(&kvm->mmu_lock);
  371. srcu_read_unlock(&kvm->srcu, idx);
  372. return young;
  373. }
  374. static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
  375. struct mm_struct *mm,
  376. unsigned long address)
  377. {
  378. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  379. int young, idx;
  380. idx = srcu_read_lock(&kvm->srcu);
  381. spin_lock(&kvm->mmu_lock);
  382. young = kvm_test_age_hva(kvm, address);
  383. spin_unlock(&kvm->mmu_lock);
  384. srcu_read_unlock(&kvm->srcu, idx);
  385. return young;
  386. }
  387. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  388. struct mm_struct *mm)
  389. {
  390. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  391. int idx;
  392. idx = srcu_read_lock(&kvm->srcu);
  393. kvm_arch_flush_shadow_all(kvm);
  394. srcu_read_unlock(&kvm->srcu, idx);
  395. }
  396. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  397. .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
  398. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  399. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  400. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  401. .clear_young = kvm_mmu_notifier_clear_young,
  402. .test_young = kvm_mmu_notifier_test_young,
  403. .change_pte = kvm_mmu_notifier_change_pte,
  404. .release = kvm_mmu_notifier_release,
  405. };
  406. static int kvm_init_mmu_notifier(struct kvm *kvm)
  407. {
  408. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  409. return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  410. }
  411. #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
  412. static int kvm_init_mmu_notifier(struct kvm *kvm)
  413. {
  414. return 0;
  415. }
  416. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  417. static struct kvm_memslots *kvm_alloc_memslots(void)
  418. {
  419. int i;
  420. struct kvm_memslots *slots;
  421. slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  422. if (!slots)
  423. return NULL;
  424. for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  425. slots->id_to_index[i] = slots->memslots[i].id = i;
  426. return slots;
  427. }
  428. static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
  429. {
  430. if (!memslot->dirty_bitmap)
  431. return;
  432. kvfree(memslot->dirty_bitmap);
  433. memslot->dirty_bitmap = NULL;
  434. }
  435. /*
  436. * Free any memory in @free but not in @dont.
  437. */
  438. static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  439. struct kvm_memory_slot *dont)
  440. {
  441. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  442. kvm_destroy_dirty_bitmap(free);
  443. kvm_arch_free_memslot(kvm, free, dont);
  444. free->npages = 0;
  445. }
  446. static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
  447. {
  448. struct kvm_memory_slot *memslot;
  449. if (!slots)
  450. return;
  451. kvm_for_each_memslot(memslot, slots)
  452. kvm_free_memslot(kvm, memslot, NULL);
  453. kvfree(slots);
  454. }
  455. static void kvm_destroy_vm_debugfs(struct kvm *kvm)
  456. {
  457. int i;
  458. if (!kvm->debugfs_dentry)
  459. return;
  460. debugfs_remove_recursive(kvm->debugfs_dentry);
  461. if (kvm->debugfs_stat_data) {
  462. for (i = 0; i < kvm_debugfs_num_entries; i++)
  463. kfree(kvm->debugfs_stat_data[i]);
  464. kfree(kvm->debugfs_stat_data);
  465. }
  466. }
  467. static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
  468. {
  469. char dir_name[ITOA_MAX_LEN * 2];
  470. struct kvm_stat_data *stat_data;
  471. struct kvm_stats_debugfs_item *p;
  472. if (!debugfs_initialized())
  473. return 0;
  474. snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
  475. kvm->debugfs_dentry = debugfs_create_dir(dir_name,
  476. kvm_debugfs_dir);
  477. if (!kvm->debugfs_dentry)
  478. return -ENOMEM;
  479. kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
  480. sizeof(*kvm->debugfs_stat_data),
  481. GFP_KERNEL);
  482. if (!kvm->debugfs_stat_data)
  483. return -ENOMEM;
  484. for (p = debugfs_entries; p->name; p++) {
  485. stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL);
  486. if (!stat_data)
  487. return -ENOMEM;
  488. stat_data->kvm = kvm;
  489. stat_data->offset = p->offset;
  490. kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
  491. if (!debugfs_create_file(p->name, 0644,
  492. kvm->debugfs_dentry,
  493. stat_data,
  494. stat_fops_per_vm[p->kind]))
  495. return -ENOMEM;
  496. }
  497. return 0;
  498. }
  499. static struct kvm *kvm_create_vm(unsigned long type)
  500. {
  501. int r, i;
  502. struct kvm *kvm = kvm_arch_alloc_vm();
  503. if (!kvm)
  504. return ERR_PTR(-ENOMEM);
  505. spin_lock_init(&kvm->mmu_lock);
  506. mmgrab(current->mm);
  507. kvm->mm = current->mm;
  508. kvm_eventfd_init(kvm);
  509. mutex_init(&kvm->lock);
  510. mutex_init(&kvm->irq_lock);
  511. mutex_init(&kvm->slots_lock);
  512. refcount_set(&kvm->users_count, 1);
  513. INIT_LIST_HEAD(&kvm->devices);
  514. r = kvm_arch_init_vm(kvm, type);
  515. if (r)
  516. goto out_err_no_disable;
  517. r = hardware_enable_all();
  518. if (r)
  519. goto out_err_no_disable;
  520. #ifdef CONFIG_HAVE_KVM_IRQFD
  521. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  522. #endif
  523. BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
  524. r = -ENOMEM;
  525. for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
  526. struct kvm_memslots *slots = kvm_alloc_memslots();
  527. if (!slots)
  528. goto out_err_no_srcu;
  529. /*
  530. * Generations must be different for each address space.
  531. * Init kvm generation close to the maximum to easily test the
  532. * code of handling generation number wrap-around.
  533. */
  534. slots->generation = i * 2 - 150;
  535. rcu_assign_pointer(kvm->memslots[i], slots);
  536. }
  537. if (init_srcu_struct(&kvm->srcu))
  538. goto out_err_no_srcu;
  539. if (init_srcu_struct(&kvm->irq_srcu))
  540. goto out_err_no_irq_srcu;
  541. for (i = 0; i < KVM_NR_BUSES; i++) {
  542. rcu_assign_pointer(kvm->buses[i],
  543. kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL));
  544. if (!kvm->buses[i])
  545. goto out_err;
  546. }
  547. r = kvm_init_mmu_notifier(kvm);
  548. if (r)
  549. goto out_err;
  550. spin_lock(&kvm_lock);
  551. list_add(&kvm->vm_list, &vm_list);
  552. spin_unlock(&kvm_lock);
  553. preempt_notifier_inc();
  554. return kvm;
  555. out_err:
  556. cleanup_srcu_struct(&kvm->irq_srcu);
  557. out_err_no_irq_srcu:
  558. cleanup_srcu_struct(&kvm->srcu);
  559. out_err_no_srcu:
  560. hardware_disable_all();
  561. out_err_no_disable:
  562. refcount_set(&kvm->users_count, 0);
  563. for (i = 0; i < KVM_NR_BUSES; i++)
  564. kfree(kvm_get_bus(kvm, i));
  565. for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
  566. kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
  567. kvm_arch_free_vm(kvm);
  568. mmdrop(current->mm);
  569. return ERR_PTR(r);
  570. }
  571. static void kvm_destroy_devices(struct kvm *kvm)
  572. {
  573. struct kvm_device *dev, *tmp;
  574. /*
  575. * We do not need to take the kvm->lock here, because nobody else
  576. * has a reference to the struct kvm at this point and therefore
  577. * cannot access the devices list anyhow.
  578. */
  579. list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
  580. list_del(&dev->vm_node);
  581. dev->ops->destroy(dev);
  582. }
  583. }
  584. static void kvm_destroy_vm(struct kvm *kvm)
  585. {
  586. int i;
  587. struct mm_struct *mm = kvm->mm;
  588. kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
  589. kvm_destroy_vm_debugfs(kvm);
  590. kvm_arch_sync_events(kvm);
  591. spin_lock(&kvm_lock);
  592. list_del(&kvm->vm_list);
  593. spin_unlock(&kvm_lock);
  594. kvm_free_irq_routing(kvm);
  595. for (i = 0; i < KVM_NR_BUSES; i++) {
  596. struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
  597. if (bus)
  598. kvm_io_bus_destroy(bus);
  599. kvm->buses[i] = NULL;
  600. }
  601. kvm_coalesced_mmio_free(kvm);
  602. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  603. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  604. #else
  605. kvm_arch_flush_shadow_all(kvm);
  606. #endif
  607. kvm_arch_destroy_vm(kvm);
  608. kvm_destroy_devices(kvm);
  609. for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
  610. kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
  611. cleanup_srcu_struct(&kvm->irq_srcu);
  612. cleanup_srcu_struct(&kvm->srcu);
  613. kvm_arch_free_vm(kvm);
  614. preempt_notifier_dec();
  615. hardware_disable_all();
  616. mmdrop(mm);
  617. }
  618. void kvm_get_kvm(struct kvm *kvm)
  619. {
  620. refcount_inc(&kvm->users_count);
  621. }
  622. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  623. void kvm_put_kvm(struct kvm *kvm)
  624. {
  625. if (refcount_dec_and_test(&kvm->users_count))
  626. kvm_destroy_vm(kvm);
  627. }
  628. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  629. static int kvm_vm_release(struct inode *inode, struct file *filp)
  630. {
  631. struct kvm *kvm = filp->private_data;
  632. kvm_irqfd_release(kvm);
  633. kvm_put_kvm(kvm);
  634. return 0;
  635. }
  636. /*
  637. * Allocation size is twice as large as the actual dirty bitmap size.
  638. * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
  639. */
  640. static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
  641. {
  642. unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
  643. memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL);
  644. if (!memslot->dirty_bitmap)
  645. return -ENOMEM;
  646. return 0;
  647. }
  648. /*
  649. * Insert memslot and re-sort memslots based on their GFN,
  650. * so binary search could be used to lookup GFN.
  651. * Sorting algorithm takes advantage of having initially
  652. * sorted array and known changed memslot position.
  653. */
  654. static void update_memslots(struct kvm_memslots *slots,
  655. struct kvm_memory_slot *new)
  656. {
  657. int id = new->id;
  658. int i = slots->id_to_index[id];
  659. struct kvm_memory_slot *mslots = slots->memslots;
  660. WARN_ON(mslots[i].id != id);
  661. if (!new->npages) {
  662. WARN_ON(!mslots[i].npages);
  663. if (mslots[i].npages)
  664. slots->used_slots--;
  665. } else {
  666. if (!mslots[i].npages)
  667. slots->used_slots++;
  668. }
  669. while (i < KVM_MEM_SLOTS_NUM - 1 &&
  670. new->base_gfn <= mslots[i + 1].base_gfn) {
  671. if (!mslots[i + 1].npages)
  672. break;
  673. mslots[i] = mslots[i + 1];
  674. slots->id_to_index[mslots[i].id] = i;
  675. i++;
  676. }
  677. /*
  678. * The ">=" is needed when creating a slot with base_gfn == 0,
  679. * so that it moves before all those with base_gfn == npages == 0.
  680. *
  681. * On the other hand, if new->npages is zero, the above loop has
  682. * already left i pointing to the beginning of the empty part of
  683. * mslots, and the ">=" would move the hole backwards in this
  684. * case---which is wrong. So skip the loop when deleting a slot.
  685. */
  686. if (new->npages) {
  687. while (i > 0 &&
  688. new->base_gfn >= mslots[i - 1].base_gfn) {
  689. mslots[i] = mslots[i - 1];
  690. slots->id_to_index[mslots[i].id] = i;
  691. i--;
  692. }
  693. } else
  694. WARN_ON_ONCE(i != slots->used_slots);
  695. mslots[i] = *new;
  696. slots->id_to_index[mslots[i].id] = i;
  697. }
  698. static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
  699. {
  700. u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
  701. #ifdef __KVM_HAVE_READONLY_MEM
  702. valid_flags |= KVM_MEM_READONLY;
  703. #endif
  704. if (mem->flags & ~valid_flags)
  705. return -EINVAL;
  706. return 0;
  707. }
  708. static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
  709. int as_id, struct kvm_memslots *slots)
  710. {
  711. struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
  712. /*
  713. * Set the low bit in the generation, which disables SPTE caching
  714. * until the end of synchronize_srcu_expedited.
  715. */
  716. WARN_ON(old_memslots->generation & 1);
  717. slots->generation = old_memslots->generation + 1;
  718. rcu_assign_pointer(kvm->memslots[as_id], slots);
  719. synchronize_srcu_expedited(&kvm->srcu);
  720. /*
  721. * Increment the new memslot generation a second time. This prevents
  722. * vm exits that race with memslot updates from caching a memslot
  723. * generation that will (potentially) be valid forever.
  724. *
  725. * Generations must be unique even across address spaces. We do not need
  726. * a global counter for that, instead the generation space is evenly split
  727. * across address spaces. For example, with two address spaces, address
  728. * space 0 will use generations 0, 4, 8, ... while * address space 1 will
  729. * use generations 2, 6, 10, 14, ...
  730. */
  731. slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
  732. kvm_arch_memslots_updated(kvm, slots);
  733. return old_memslots;
  734. }
  735. /*
  736. * Allocate some memory and give it an address in the guest physical address
  737. * space.
  738. *
  739. * Discontiguous memory is allowed, mostly for framebuffers.
  740. *
  741. * Must be called holding kvm->slots_lock for write.
  742. */
  743. int __kvm_set_memory_region(struct kvm *kvm,
  744. const struct kvm_userspace_memory_region *mem)
  745. {
  746. int r;
  747. gfn_t base_gfn;
  748. unsigned long npages;
  749. struct kvm_memory_slot *slot;
  750. struct kvm_memory_slot old, new;
  751. struct kvm_memslots *slots = NULL, *old_memslots;
  752. int as_id, id;
  753. enum kvm_mr_change change;
  754. r = check_memory_region_flags(mem);
  755. if (r)
  756. goto out;
  757. r = -EINVAL;
  758. as_id = mem->slot >> 16;
  759. id = (u16)mem->slot;
  760. /* General sanity checks */
  761. if (mem->memory_size & (PAGE_SIZE - 1))
  762. goto out;
  763. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  764. goto out;
  765. /* We can read the guest memory with __xxx_user() later on. */
  766. if ((id < KVM_USER_MEM_SLOTS) &&
  767. ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
  768. !access_ok(VERIFY_WRITE,
  769. (void __user *)(unsigned long)mem->userspace_addr,
  770. mem->memory_size)))
  771. goto out;
  772. if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
  773. goto out;
  774. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  775. goto out;
  776. slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
  777. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  778. npages = mem->memory_size >> PAGE_SHIFT;
  779. if (npages > KVM_MEM_MAX_NR_PAGES)
  780. goto out;
  781. new = old = *slot;
  782. new.id = id;
  783. new.base_gfn = base_gfn;
  784. new.npages = npages;
  785. new.flags = mem->flags;
  786. if (npages) {
  787. if (!old.npages)
  788. change = KVM_MR_CREATE;
  789. else { /* Modify an existing slot. */
  790. if ((mem->userspace_addr != old.userspace_addr) ||
  791. (npages != old.npages) ||
  792. ((new.flags ^ old.flags) & KVM_MEM_READONLY))
  793. goto out;
  794. if (base_gfn != old.base_gfn)
  795. change = KVM_MR_MOVE;
  796. else if (new.flags != old.flags)
  797. change = KVM_MR_FLAGS_ONLY;
  798. else { /* Nothing to change. */
  799. r = 0;
  800. goto out;
  801. }
  802. }
  803. } else {
  804. if (!old.npages)
  805. goto out;
  806. change = KVM_MR_DELETE;
  807. new.base_gfn = 0;
  808. new.flags = 0;
  809. }
  810. if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
  811. /* Check for overlaps */
  812. r = -EEXIST;
  813. kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
  814. if (slot->id == id)
  815. continue;
  816. if (!((base_gfn + npages <= slot->base_gfn) ||
  817. (base_gfn >= slot->base_gfn + slot->npages)))
  818. goto out;
  819. }
  820. }
  821. /* Free page dirty bitmap if unneeded */
  822. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  823. new.dirty_bitmap = NULL;
  824. r = -ENOMEM;
  825. if (change == KVM_MR_CREATE) {
  826. new.userspace_addr = mem->userspace_addr;
  827. if (kvm_arch_create_memslot(kvm, &new, npages))
  828. goto out_free;
  829. }
  830. /* Allocate page dirty bitmap if needed */
  831. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  832. if (kvm_create_dirty_bitmap(&new) < 0)
  833. goto out_free;
  834. }
  835. slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  836. if (!slots)
  837. goto out_free;
  838. memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
  839. if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
  840. slot = id_to_memslot(slots, id);
  841. slot->flags |= KVM_MEMSLOT_INVALID;
  842. old_memslots = install_new_memslots(kvm, as_id, slots);
  843. /* From this point no new shadow pages pointing to a deleted,
  844. * or moved, memslot will be created.
  845. *
  846. * validation of sp->gfn happens in:
  847. * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
  848. * - kvm_is_visible_gfn (mmu_check_roots)
  849. */
  850. kvm_arch_flush_shadow_memslot(kvm, slot);
  851. /*
  852. * We can re-use the old_memslots from above, the only difference
  853. * from the currently installed memslots is the invalid flag. This
  854. * will get overwritten by update_memslots anyway.
  855. */
  856. slots = old_memslots;
  857. }
  858. r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
  859. if (r)
  860. goto out_slots;
  861. /* actual memory is freed via old in kvm_free_memslot below */
  862. if (change == KVM_MR_DELETE) {
  863. new.dirty_bitmap = NULL;
  864. memset(&new.arch, 0, sizeof(new.arch));
  865. }
  866. update_memslots(slots, &new);
  867. old_memslots = install_new_memslots(kvm, as_id, slots);
  868. kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
  869. kvm_free_memslot(kvm, &old, &new);
  870. kvfree(old_memslots);
  871. return 0;
  872. out_slots:
  873. kvfree(slots);
  874. out_free:
  875. kvm_free_memslot(kvm, &new, &old);
  876. out:
  877. return r;
  878. }
  879. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  880. int kvm_set_memory_region(struct kvm *kvm,
  881. const struct kvm_userspace_memory_region *mem)
  882. {
  883. int r;
  884. mutex_lock(&kvm->slots_lock);
  885. r = __kvm_set_memory_region(kvm, mem);
  886. mutex_unlock(&kvm->slots_lock);
  887. return r;
  888. }
  889. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  890. static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  891. struct kvm_userspace_memory_region *mem)
  892. {
  893. if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
  894. return -EINVAL;
  895. return kvm_set_memory_region(kvm, mem);
  896. }
  897. int kvm_get_dirty_log(struct kvm *kvm,
  898. struct kvm_dirty_log *log, int *is_dirty)
  899. {
  900. struct kvm_memslots *slots;
  901. struct kvm_memory_slot *memslot;
  902. int i, as_id, id;
  903. unsigned long n;
  904. unsigned long any = 0;
  905. as_id = log->slot >> 16;
  906. id = (u16)log->slot;
  907. if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
  908. return -EINVAL;
  909. slots = __kvm_memslots(kvm, as_id);
  910. memslot = id_to_memslot(slots, id);
  911. if (!memslot->dirty_bitmap)
  912. return -ENOENT;
  913. n = kvm_dirty_bitmap_bytes(memslot);
  914. for (i = 0; !any && i < n/sizeof(long); ++i)
  915. any = memslot->dirty_bitmap[i];
  916. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  917. return -EFAULT;
  918. if (any)
  919. *is_dirty = 1;
  920. return 0;
  921. }
  922. EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
  923. #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
  924. /**
  925. * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
  926. * are dirty write protect them for next write.
  927. * @kvm: pointer to kvm instance
  928. * @log: slot id and address to which we copy the log
  929. * @is_dirty: flag set if any page is dirty
  930. *
  931. * We need to keep it in mind that VCPU threads can write to the bitmap
  932. * concurrently. So, to avoid losing track of dirty pages we keep the
  933. * following order:
  934. *
  935. * 1. Take a snapshot of the bit and clear it if needed.
  936. * 2. Write protect the corresponding page.
  937. * 3. Copy the snapshot to the userspace.
  938. * 4. Upon return caller flushes TLB's if needed.
  939. *
  940. * Between 2 and 4, the guest may write to the page using the remaining TLB
  941. * entry. This is not a problem because the page is reported dirty using
  942. * the snapshot taken before and step 4 ensures that writes done after
  943. * exiting to userspace will be logged for the next call.
  944. *
  945. */
  946. int kvm_get_dirty_log_protect(struct kvm *kvm,
  947. struct kvm_dirty_log *log, bool *is_dirty)
  948. {
  949. struct kvm_memslots *slots;
  950. struct kvm_memory_slot *memslot;
  951. int i, as_id, id;
  952. unsigned long n;
  953. unsigned long *dirty_bitmap;
  954. unsigned long *dirty_bitmap_buffer;
  955. as_id = log->slot >> 16;
  956. id = (u16)log->slot;
  957. if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
  958. return -EINVAL;
  959. slots = __kvm_memslots(kvm, as_id);
  960. memslot = id_to_memslot(slots, id);
  961. dirty_bitmap = memslot->dirty_bitmap;
  962. if (!dirty_bitmap)
  963. return -ENOENT;
  964. n = kvm_dirty_bitmap_bytes(memslot);
  965. dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
  966. memset(dirty_bitmap_buffer, 0, n);
  967. spin_lock(&kvm->mmu_lock);
  968. *is_dirty = false;
  969. for (i = 0; i < n / sizeof(long); i++) {
  970. unsigned long mask;
  971. gfn_t offset;
  972. if (!dirty_bitmap[i])
  973. continue;
  974. *is_dirty = true;
  975. mask = xchg(&dirty_bitmap[i], 0);
  976. dirty_bitmap_buffer[i] = mask;
  977. if (mask) {
  978. offset = i * BITS_PER_LONG;
  979. kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
  980. offset, mask);
  981. }
  982. }
  983. spin_unlock(&kvm->mmu_lock);
  984. if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
  985. return -EFAULT;
  986. return 0;
  987. }
  988. EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
  989. #endif
  990. bool kvm_largepages_enabled(void)
  991. {
  992. return largepages_enabled;
  993. }
  994. void kvm_disable_largepages(void)
  995. {
  996. largepages_enabled = false;
  997. }
  998. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  999. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  1000. {
  1001. return __gfn_to_memslot(kvm_memslots(kvm), gfn);
  1002. }
  1003. EXPORT_SYMBOL_GPL(gfn_to_memslot);
  1004. struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
  1005. {
  1006. return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
  1007. }
  1008. bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  1009. {
  1010. struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
  1011. if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
  1012. memslot->flags & KVM_MEMSLOT_INVALID)
  1013. return false;
  1014. return true;
  1015. }
  1016. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  1017. unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
  1018. {
  1019. struct vm_area_struct *vma;
  1020. unsigned long addr, size;
  1021. size = PAGE_SIZE;
  1022. addr = gfn_to_hva(kvm, gfn);
  1023. if (kvm_is_error_hva(addr))
  1024. return PAGE_SIZE;
  1025. down_read(&current->mm->mmap_sem);
  1026. vma = find_vma(current->mm, addr);
  1027. if (!vma)
  1028. goto out;
  1029. size = vma_kernel_pagesize(vma);
  1030. out:
  1031. up_read(&current->mm->mmap_sem);
  1032. return size;
  1033. }
  1034. static bool memslot_is_readonly(struct kvm_memory_slot *slot)
  1035. {
  1036. return slot->flags & KVM_MEM_READONLY;
  1037. }
  1038. static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
  1039. gfn_t *nr_pages, bool write)
  1040. {
  1041. if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
  1042. return KVM_HVA_ERR_BAD;
  1043. if (memslot_is_readonly(slot) && write)
  1044. return KVM_HVA_ERR_RO_BAD;
  1045. if (nr_pages)
  1046. *nr_pages = slot->npages - (gfn - slot->base_gfn);
  1047. return __gfn_to_hva_memslot(slot, gfn);
  1048. }
  1049. static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
  1050. gfn_t *nr_pages)
  1051. {
  1052. return __gfn_to_hva_many(slot, gfn, nr_pages, true);
  1053. }
  1054. unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
  1055. gfn_t gfn)
  1056. {
  1057. return gfn_to_hva_many(slot, gfn, NULL);
  1058. }
  1059. EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
  1060. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  1061. {
  1062. return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
  1063. }
  1064. EXPORT_SYMBOL_GPL(gfn_to_hva);
  1065. unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
  1066. {
  1067. return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
  1068. }
  1069. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
  1070. /*
  1071. * If writable is set to false, the hva returned by this function is only
  1072. * allowed to be read.
  1073. */
  1074. unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
  1075. gfn_t gfn, bool *writable)
  1076. {
  1077. unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
  1078. if (!kvm_is_error_hva(hva) && writable)
  1079. *writable = !memslot_is_readonly(slot);
  1080. return hva;
  1081. }
  1082. unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
  1083. {
  1084. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1085. return gfn_to_hva_memslot_prot(slot, gfn, writable);
  1086. }
  1087. unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
  1088. {
  1089. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1090. return gfn_to_hva_memslot_prot(slot, gfn, writable);
  1091. }
  1092. static inline int check_user_page_hwpoison(unsigned long addr)
  1093. {
  1094. int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
  1095. rc = get_user_pages(addr, 1, flags, NULL, NULL);
  1096. return rc == -EHWPOISON;
  1097. }
  1098. /*
  1099. * The atomic path to get the writable pfn which will be stored in @pfn,
  1100. * true indicates success, otherwise false is returned.
  1101. */
  1102. static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
  1103. bool write_fault, bool *writable, kvm_pfn_t *pfn)
  1104. {
  1105. struct page *page[1];
  1106. int npages;
  1107. if (!(async || atomic))
  1108. return false;
  1109. /*
  1110. * Fast pin a writable pfn only if it is a write fault request
  1111. * or the caller allows to map a writable pfn for a read fault
  1112. * request.
  1113. */
  1114. if (!(write_fault || writable))
  1115. return false;
  1116. npages = __get_user_pages_fast(addr, 1, 1, page);
  1117. if (npages == 1) {
  1118. *pfn = page_to_pfn(page[0]);
  1119. if (writable)
  1120. *writable = true;
  1121. return true;
  1122. }
  1123. return false;
  1124. }
  1125. /*
  1126. * The slow path to get the pfn of the specified host virtual address,
  1127. * 1 indicates success, -errno is returned if error is detected.
  1128. */
  1129. static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
  1130. bool *writable, kvm_pfn_t *pfn)
  1131. {
  1132. unsigned int flags = FOLL_HWPOISON;
  1133. struct page *page;
  1134. int npages = 0;
  1135. might_sleep();
  1136. if (writable)
  1137. *writable = write_fault;
  1138. if (write_fault)
  1139. flags |= FOLL_WRITE;
  1140. if (async)
  1141. flags |= FOLL_NOWAIT;
  1142. npages = get_user_pages_unlocked(addr, 1, &page, flags);
  1143. if (npages != 1)
  1144. return npages;
  1145. /* map read fault as writable if possible */
  1146. if (unlikely(!write_fault) && writable) {
  1147. struct page *wpage;
  1148. if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
  1149. *writable = true;
  1150. put_page(page);
  1151. page = wpage;
  1152. }
  1153. }
  1154. *pfn = page_to_pfn(page);
  1155. return npages;
  1156. }
  1157. static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
  1158. {
  1159. if (unlikely(!(vma->vm_flags & VM_READ)))
  1160. return false;
  1161. if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
  1162. return false;
  1163. return true;
  1164. }
  1165. static int hva_to_pfn_remapped(struct vm_area_struct *vma,
  1166. unsigned long addr, bool *async,
  1167. bool write_fault, bool *writable,
  1168. kvm_pfn_t *p_pfn)
  1169. {
  1170. unsigned long pfn;
  1171. int r;
  1172. r = follow_pfn(vma, addr, &pfn);
  1173. if (r) {
  1174. /*
  1175. * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
  1176. * not call the fault handler, so do it here.
  1177. */
  1178. bool unlocked = false;
  1179. r = fixup_user_fault(current, current->mm, addr,
  1180. (write_fault ? FAULT_FLAG_WRITE : 0),
  1181. &unlocked);
  1182. if (unlocked)
  1183. return -EAGAIN;
  1184. if (r)
  1185. return r;
  1186. r = follow_pfn(vma, addr, &pfn);
  1187. if (r)
  1188. return r;
  1189. }
  1190. if (writable)
  1191. *writable = true;
  1192. /*
  1193. * Get a reference here because callers of *hva_to_pfn* and
  1194. * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
  1195. * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
  1196. * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
  1197. * simply do nothing for reserved pfns.
  1198. *
  1199. * Whoever called remap_pfn_range is also going to call e.g.
  1200. * unmap_mapping_range before the underlying pages are freed,
  1201. * causing a call to our MMU notifier.
  1202. */
  1203. kvm_get_pfn(pfn);
  1204. *p_pfn = pfn;
  1205. return 0;
  1206. }
  1207. /*
  1208. * Pin guest page in memory and return its pfn.
  1209. * @addr: host virtual address which maps memory to the guest
  1210. * @atomic: whether this function can sleep
  1211. * @async: whether this function need to wait IO complete if the
  1212. * host page is not in the memory
  1213. * @write_fault: whether we should get a writable host page
  1214. * @writable: whether it allows to map a writable host page for !@write_fault
  1215. *
  1216. * The function will map a writable host page for these two cases:
  1217. * 1): @write_fault = true
  1218. * 2): @write_fault = false && @writable, @writable will tell the caller
  1219. * whether the mapping is writable.
  1220. */
  1221. static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
  1222. bool write_fault, bool *writable)
  1223. {
  1224. struct vm_area_struct *vma;
  1225. kvm_pfn_t pfn = 0;
  1226. int npages, r;
  1227. /* we can do it either atomically or asynchronously, not both */
  1228. BUG_ON(atomic && async);
  1229. if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
  1230. return pfn;
  1231. if (atomic)
  1232. return KVM_PFN_ERR_FAULT;
  1233. npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
  1234. if (npages == 1)
  1235. return pfn;
  1236. down_read(&current->mm->mmap_sem);
  1237. if (npages == -EHWPOISON ||
  1238. (!async && check_user_page_hwpoison(addr))) {
  1239. pfn = KVM_PFN_ERR_HWPOISON;
  1240. goto exit;
  1241. }
  1242. retry:
  1243. vma = find_vma_intersection(current->mm, addr, addr + 1);
  1244. if (vma == NULL)
  1245. pfn = KVM_PFN_ERR_FAULT;
  1246. else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
  1247. r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
  1248. if (r == -EAGAIN)
  1249. goto retry;
  1250. if (r < 0)
  1251. pfn = KVM_PFN_ERR_FAULT;
  1252. } else {
  1253. if (async && vma_is_valid(vma, write_fault))
  1254. *async = true;
  1255. pfn = KVM_PFN_ERR_FAULT;
  1256. }
  1257. exit:
  1258. up_read(&current->mm->mmap_sem);
  1259. return pfn;
  1260. }
  1261. kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
  1262. bool atomic, bool *async, bool write_fault,
  1263. bool *writable)
  1264. {
  1265. unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
  1266. if (addr == KVM_HVA_ERR_RO_BAD) {
  1267. if (writable)
  1268. *writable = false;
  1269. return KVM_PFN_ERR_RO_FAULT;
  1270. }
  1271. if (kvm_is_error_hva(addr)) {
  1272. if (writable)
  1273. *writable = false;
  1274. return KVM_PFN_NOSLOT;
  1275. }
  1276. /* Do not map writable pfn in the readonly memslot. */
  1277. if (writable && memslot_is_readonly(slot)) {
  1278. *writable = false;
  1279. writable = NULL;
  1280. }
  1281. return hva_to_pfn(addr, atomic, async, write_fault,
  1282. writable);
  1283. }
  1284. EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
  1285. kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
  1286. bool *writable)
  1287. {
  1288. return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
  1289. write_fault, writable);
  1290. }
  1291. EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
  1292. kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
  1293. {
  1294. return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
  1295. }
  1296. EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
  1297. kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
  1298. {
  1299. return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
  1300. }
  1301. EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
  1302. kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
  1303. {
  1304. return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
  1305. }
  1306. EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
  1307. kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
  1308. {
  1309. return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
  1310. }
  1311. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
  1312. kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  1313. {
  1314. return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
  1315. }
  1316. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  1317. kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  1318. {
  1319. return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
  1320. }
  1321. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
  1322. int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
  1323. struct page **pages, int nr_pages)
  1324. {
  1325. unsigned long addr;
  1326. gfn_t entry = 0;
  1327. addr = gfn_to_hva_many(slot, gfn, &entry);
  1328. if (kvm_is_error_hva(addr))
  1329. return -1;
  1330. if (entry < nr_pages)
  1331. return 0;
  1332. return __get_user_pages_fast(addr, nr_pages, 1, pages);
  1333. }
  1334. EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
  1335. static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
  1336. {
  1337. if (is_error_noslot_pfn(pfn))
  1338. return KVM_ERR_PTR_BAD_PAGE;
  1339. if (kvm_is_reserved_pfn(pfn)) {
  1340. WARN_ON(1);
  1341. return KVM_ERR_PTR_BAD_PAGE;
  1342. }
  1343. return pfn_to_page(pfn);
  1344. }
  1345. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  1346. {
  1347. kvm_pfn_t pfn;
  1348. pfn = gfn_to_pfn(kvm, gfn);
  1349. return kvm_pfn_to_page(pfn);
  1350. }
  1351. EXPORT_SYMBOL_GPL(gfn_to_page);
  1352. struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
  1353. {
  1354. kvm_pfn_t pfn;
  1355. pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
  1356. return kvm_pfn_to_page(pfn);
  1357. }
  1358. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
  1359. void kvm_release_page_clean(struct page *page)
  1360. {
  1361. WARN_ON(is_error_page(page));
  1362. kvm_release_pfn_clean(page_to_pfn(page));
  1363. }
  1364. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  1365. void kvm_release_pfn_clean(kvm_pfn_t pfn)
  1366. {
  1367. if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
  1368. put_page(pfn_to_page(pfn));
  1369. }
  1370. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  1371. void kvm_release_page_dirty(struct page *page)
  1372. {
  1373. WARN_ON(is_error_page(page));
  1374. kvm_release_pfn_dirty(page_to_pfn(page));
  1375. }
  1376. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  1377. void kvm_release_pfn_dirty(kvm_pfn_t pfn)
  1378. {
  1379. kvm_set_pfn_dirty(pfn);
  1380. kvm_release_pfn_clean(pfn);
  1381. }
  1382. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  1383. void kvm_set_pfn_dirty(kvm_pfn_t pfn)
  1384. {
  1385. if (!kvm_is_reserved_pfn(pfn)) {
  1386. struct page *page = pfn_to_page(pfn);
  1387. if (!PageReserved(page))
  1388. SetPageDirty(page);
  1389. }
  1390. }
  1391. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  1392. void kvm_set_pfn_accessed(kvm_pfn_t pfn)
  1393. {
  1394. if (!kvm_is_reserved_pfn(pfn))
  1395. mark_page_accessed(pfn_to_page(pfn));
  1396. }
  1397. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  1398. void kvm_get_pfn(kvm_pfn_t pfn)
  1399. {
  1400. if (!kvm_is_reserved_pfn(pfn))
  1401. get_page(pfn_to_page(pfn));
  1402. }
  1403. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  1404. static int next_segment(unsigned long len, int offset)
  1405. {
  1406. if (len > PAGE_SIZE - offset)
  1407. return PAGE_SIZE - offset;
  1408. else
  1409. return len;
  1410. }
  1411. static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
  1412. void *data, int offset, int len)
  1413. {
  1414. int r;
  1415. unsigned long addr;
  1416. addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
  1417. if (kvm_is_error_hva(addr))
  1418. return -EFAULT;
  1419. r = __copy_from_user(data, (void __user *)addr + offset, len);
  1420. if (r)
  1421. return -EFAULT;
  1422. return 0;
  1423. }
  1424. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  1425. int len)
  1426. {
  1427. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1428. return __kvm_read_guest_page(slot, gfn, data, offset, len);
  1429. }
  1430. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  1431. int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
  1432. int offset, int len)
  1433. {
  1434. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1435. return __kvm_read_guest_page(slot, gfn, data, offset, len);
  1436. }
  1437. EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
  1438. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  1439. {
  1440. gfn_t gfn = gpa >> PAGE_SHIFT;
  1441. int seg;
  1442. int offset = offset_in_page(gpa);
  1443. int ret;
  1444. while ((seg = next_segment(len, offset)) != 0) {
  1445. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  1446. if (ret < 0)
  1447. return ret;
  1448. offset = 0;
  1449. len -= seg;
  1450. data += seg;
  1451. ++gfn;
  1452. }
  1453. return 0;
  1454. }
  1455. EXPORT_SYMBOL_GPL(kvm_read_guest);
  1456. int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
  1457. {
  1458. gfn_t gfn = gpa >> PAGE_SHIFT;
  1459. int seg;
  1460. int offset = offset_in_page(gpa);
  1461. int ret;
  1462. while ((seg = next_segment(len, offset)) != 0) {
  1463. ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
  1464. if (ret < 0)
  1465. return ret;
  1466. offset = 0;
  1467. len -= seg;
  1468. data += seg;
  1469. ++gfn;
  1470. }
  1471. return 0;
  1472. }
  1473. EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
  1474. static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
  1475. void *data, int offset, unsigned long len)
  1476. {
  1477. int r;
  1478. unsigned long addr;
  1479. addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
  1480. if (kvm_is_error_hva(addr))
  1481. return -EFAULT;
  1482. pagefault_disable();
  1483. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  1484. pagefault_enable();
  1485. if (r)
  1486. return -EFAULT;
  1487. return 0;
  1488. }
  1489. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  1490. unsigned long len)
  1491. {
  1492. gfn_t gfn = gpa >> PAGE_SHIFT;
  1493. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1494. int offset = offset_in_page(gpa);
  1495. return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
  1496. }
  1497. EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
  1498. int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
  1499. void *data, unsigned long len)
  1500. {
  1501. gfn_t gfn = gpa >> PAGE_SHIFT;
  1502. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1503. int offset = offset_in_page(gpa);
  1504. return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
  1505. }
  1506. EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
  1507. static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
  1508. const void *data, int offset, int len)
  1509. {
  1510. int r;
  1511. unsigned long addr;
  1512. addr = gfn_to_hva_memslot(memslot, gfn);
  1513. if (kvm_is_error_hva(addr))
  1514. return -EFAULT;
  1515. r = __copy_to_user((void __user *)addr + offset, data, len);
  1516. if (r)
  1517. return -EFAULT;
  1518. mark_page_dirty_in_slot(memslot, gfn);
  1519. return 0;
  1520. }
  1521. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
  1522. const void *data, int offset, int len)
  1523. {
  1524. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1525. return __kvm_write_guest_page(slot, gfn, data, offset, len);
  1526. }
  1527. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  1528. int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
  1529. const void *data, int offset, int len)
  1530. {
  1531. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1532. return __kvm_write_guest_page(slot, gfn, data, offset, len);
  1533. }
  1534. EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
  1535. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  1536. unsigned long len)
  1537. {
  1538. gfn_t gfn = gpa >> PAGE_SHIFT;
  1539. int seg;
  1540. int offset = offset_in_page(gpa);
  1541. int ret;
  1542. while ((seg = next_segment(len, offset)) != 0) {
  1543. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  1544. if (ret < 0)
  1545. return ret;
  1546. offset = 0;
  1547. len -= seg;
  1548. data += seg;
  1549. ++gfn;
  1550. }
  1551. return 0;
  1552. }
  1553. EXPORT_SYMBOL_GPL(kvm_write_guest);
  1554. int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
  1555. unsigned long len)
  1556. {
  1557. gfn_t gfn = gpa >> PAGE_SHIFT;
  1558. int seg;
  1559. int offset = offset_in_page(gpa);
  1560. int ret;
  1561. while ((seg = next_segment(len, offset)) != 0) {
  1562. ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
  1563. if (ret < 0)
  1564. return ret;
  1565. offset = 0;
  1566. len -= seg;
  1567. data += seg;
  1568. ++gfn;
  1569. }
  1570. return 0;
  1571. }
  1572. EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
  1573. static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
  1574. struct gfn_to_hva_cache *ghc,
  1575. gpa_t gpa, unsigned long len)
  1576. {
  1577. int offset = offset_in_page(gpa);
  1578. gfn_t start_gfn = gpa >> PAGE_SHIFT;
  1579. gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
  1580. gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
  1581. gfn_t nr_pages_avail;
  1582. ghc->gpa = gpa;
  1583. ghc->generation = slots->generation;
  1584. ghc->len = len;
  1585. ghc->memslot = __gfn_to_memslot(slots, start_gfn);
  1586. ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
  1587. if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
  1588. ghc->hva += offset;
  1589. } else {
  1590. /*
  1591. * If the requested region crosses two memslots, we still
  1592. * verify that the entire region is valid here.
  1593. */
  1594. while (start_gfn <= end_gfn) {
  1595. nr_pages_avail = 0;
  1596. ghc->memslot = __gfn_to_memslot(slots, start_gfn);
  1597. ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
  1598. &nr_pages_avail);
  1599. if (kvm_is_error_hva(ghc->hva))
  1600. return -EFAULT;
  1601. start_gfn += nr_pages_avail;
  1602. }
  1603. /* Use the slow path for cross page reads and writes. */
  1604. ghc->memslot = NULL;
  1605. }
  1606. return 0;
  1607. }
  1608. int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1609. gpa_t gpa, unsigned long len)
  1610. {
  1611. struct kvm_memslots *slots = kvm_memslots(kvm);
  1612. return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
  1613. }
  1614. EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
  1615. int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1616. void *data, int offset, unsigned long len)
  1617. {
  1618. struct kvm_memslots *slots = kvm_memslots(kvm);
  1619. int r;
  1620. gpa_t gpa = ghc->gpa + offset;
  1621. BUG_ON(len + offset > ghc->len);
  1622. if (slots->generation != ghc->generation)
  1623. __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
  1624. if (unlikely(!ghc->memslot))
  1625. return kvm_write_guest(kvm, gpa, data, len);
  1626. if (kvm_is_error_hva(ghc->hva))
  1627. return -EFAULT;
  1628. r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
  1629. if (r)
  1630. return -EFAULT;
  1631. mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
  1632. return 0;
  1633. }
  1634. EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
  1635. int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1636. void *data, unsigned long len)
  1637. {
  1638. return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
  1639. }
  1640. EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
  1641. int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1642. void *data, unsigned long len)
  1643. {
  1644. struct kvm_memslots *slots = kvm_memslots(kvm);
  1645. int r;
  1646. BUG_ON(len > ghc->len);
  1647. if (slots->generation != ghc->generation)
  1648. __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
  1649. if (unlikely(!ghc->memslot))
  1650. return kvm_read_guest(kvm, ghc->gpa, data, len);
  1651. if (kvm_is_error_hva(ghc->hva))
  1652. return -EFAULT;
  1653. r = __copy_from_user(data, (void __user *)ghc->hva, len);
  1654. if (r)
  1655. return -EFAULT;
  1656. return 0;
  1657. }
  1658. EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
  1659. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  1660. {
  1661. const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
  1662. return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
  1663. }
  1664. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  1665. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  1666. {
  1667. gfn_t gfn = gpa >> PAGE_SHIFT;
  1668. int seg;
  1669. int offset = offset_in_page(gpa);
  1670. int ret;
  1671. while ((seg = next_segment(len, offset)) != 0) {
  1672. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  1673. if (ret < 0)
  1674. return ret;
  1675. offset = 0;
  1676. len -= seg;
  1677. ++gfn;
  1678. }
  1679. return 0;
  1680. }
  1681. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  1682. static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
  1683. gfn_t gfn)
  1684. {
  1685. if (memslot && memslot->dirty_bitmap) {
  1686. unsigned long rel_gfn = gfn - memslot->base_gfn;
  1687. set_bit_le(rel_gfn, memslot->dirty_bitmap);
  1688. }
  1689. }
  1690. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  1691. {
  1692. struct kvm_memory_slot *memslot;
  1693. memslot = gfn_to_memslot(kvm, gfn);
  1694. mark_page_dirty_in_slot(memslot, gfn);
  1695. }
  1696. EXPORT_SYMBOL_GPL(mark_page_dirty);
  1697. void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
  1698. {
  1699. struct kvm_memory_slot *memslot;
  1700. memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1701. mark_page_dirty_in_slot(memslot, gfn);
  1702. }
  1703. EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
  1704. void kvm_sigset_activate(struct kvm_vcpu *vcpu)
  1705. {
  1706. if (!vcpu->sigset_active)
  1707. return;
  1708. /*
  1709. * This does a lockless modification of ->real_blocked, which is fine
  1710. * because, only current can change ->real_blocked and all readers of
  1711. * ->real_blocked don't care as long ->real_blocked is always a subset
  1712. * of ->blocked.
  1713. */
  1714. sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
  1715. }
  1716. void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
  1717. {
  1718. if (!vcpu->sigset_active)
  1719. return;
  1720. sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
  1721. sigemptyset(&current->real_blocked);
  1722. }
  1723. static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
  1724. {
  1725. unsigned int old, val, grow;
  1726. old = val = vcpu->halt_poll_ns;
  1727. grow = READ_ONCE(halt_poll_ns_grow);
  1728. /* 10us base */
  1729. if (val == 0 && grow)
  1730. val = 10000;
  1731. else
  1732. val *= grow;
  1733. if (val > halt_poll_ns)
  1734. val = halt_poll_ns;
  1735. vcpu->halt_poll_ns = val;
  1736. trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
  1737. }
  1738. static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
  1739. {
  1740. unsigned int old, val, shrink;
  1741. old = val = vcpu->halt_poll_ns;
  1742. shrink = READ_ONCE(halt_poll_ns_shrink);
  1743. if (shrink == 0)
  1744. val = 0;
  1745. else
  1746. val /= shrink;
  1747. vcpu->halt_poll_ns = val;
  1748. trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
  1749. }
  1750. static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
  1751. {
  1752. if (kvm_arch_vcpu_runnable(vcpu)) {
  1753. kvm_make_request(KVM_REQ_UNHALT, vcpu);
  1754. return -EINTR;
  1755. }
  1756. if (kvm_cpu_has_pending_timer(vcpu))
  1757. return -EINTR;
  1758. if (signal_pending(current))
  1759. return -EINTR;
  1760. return 0;
  1761. }
  1762. /*
  1763. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  1764. */
  1765. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  1766. {
  1767. ktime_t start, cur;
  1768. DECLARE_SWAITQUEUE(wait);
  1769. bool waited = false;
  1770. u64 block_ns;
  1771. start = cur = ktime_get();
  1772. if (vcpu->halt_poll_ns) {
  1773. ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
  1774. ++vcpu->stat.halt_attempted_poll;
  1775. do {
  1776. /*
  1777. * This sets KVM_REQ_UNHALT if an interrupt
  1778. * arrives.
  1779. */
  1780. if (kvm_vcpu_check_block(vcpu) < 0) {
  1781. ++vcpu->stat.halt_successful_poll;
  1782. if (!vcpu_valid_wakeup(vcpu))
  1783. ++vcpu->stat.halt_poll_invalid;
  1784. goto out;
  1785. }
  1786. cur = ktime_get();
  1787. } while (single_task_running() && ktime_before(cur, stop));
  1788. }
  1789. kvm_arch_vcpu_blocking(vcpu);
  1790. for (;;) {
  1791. prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1792. if (kvm_vcpu_check_block(vcpu) < 0)
  1793. break;
  1794. waited = true;
  1795. schedule();
  1796. }
  1797. finish_swait(&vcpu->wq, &wait);
  1798. cur = ktime_get();
  1799. kvm_arch_vcpu_unblocking(vcpu);
  1800. out:
  1801. block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
  1802. if (!vcpu_valid_wakeup(vcpu))
  1803. shrink_halt_poll_ns(vcpu);
  1804. else if (halt_poll_ns) {
  1805. if (block_ns <= vcpu->halt_poll_ns)
  1806. ;
  1807. /* we had a long block, shrink polling */
  1808. else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
  1809. shrink_halt_poll_ns(vcpu);
  1810. /* we had a short halt and our poll time is too small */
  1811. else if (vcpu->halt_poll_ns < halt_poll_ns &&
  1812. block_ns < halt_poll_ns)
  1813. grow_halt_poll_ns(vcpu);
  1814. } else
  1815. vcpu->halt_poll_ns = 0;
  1816. trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
  1817. kvm_arch_vcpu_block_finish(vcpu);
  1818. }
  1819. EXPORT_SYMBOL_GPL(kvm_vcpu_block);
  1820. bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
  1821. {
  1822. struct swait_queue_head *wqp;
  1823. wqp = kvm_arch_vcpu_wq(vcpu);
  1824. if (swq_has_sleeper(wqp)) {
  1825. swake_up(wqp);
  1826. ++vcpu->stat.halt_wakeup;
  1827. return true;
  1828. }
  1829. return false;
  1830. }
  1831. EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
  1832. #ifndef CONFIG_S390
  1833. /*
  1834. * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
  1835. */
  1836. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  1837. {
  1838. int me;
  1839. int cpu = vcpu->cpu;
  1840. if (kvm_vcpu_wake_up(vcpu))
  1841. return;
  1842. me = get_cpu();
  1843. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  1844. if (kvm_arch_vcpu_should_kick(vcpu))
  1845. smp_send_reschedule(cpu);
  1846. put_cpu();
  1847. }
  1848. EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
  1849. #endif /* !CONFIG_S390 */
  1850. int kvm_vcpu_yield_to(struct kvm_vcpu *target)
  1851. {
  1852. struct pid *pid;
  1853. struct task_struct *task = NULL;
  1854. int ret = 0;
  1855. rcu_read_lock();
  1856. pid = rcu_dereference(target->pid);
  1857. if (pid)
  1858. task = get_pid_task(pid, PIDTYPE_PID);
  1859. rcu_read_unlock();
  1860. if (!task)
  1861. return ret;
  1862. ret = yield_to(task, 1);
  1863. put_task_struct(task);
  1864. return ret;
  1865. }
  1866. EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
  1867. /*
  1868. * Helper that checks whether a VCPU is eligible for directed yield.
  1869. * Most eligible candidate to yield is decided by following heuristics:
  1870. *
  1871. * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
  1872. * (preempted lock holder), indicated by @in_spin_loop.
  1873. * Set at the beiginning and cleared at the end of interception/PLE handler.
  1874. *
  1875. * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
  1876. * chance last time (mostly it has become eligible now since we have probably
  1877. * yielded to lockholder in last iteration. This is done by toggling
  1878. * @dy_eligible each time a VCPU checked for eligibility.)
  1879. *
  1880. * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
  1881. * to preempted lock-holder could result in wrong VCPU selection and CPU
  1882. * burning. Giving priority for a potential lock-holder increases lock
  1883. * progress.
  1884. *
  1885. * Since algorithm is based on heuristics, accessing another VCPU data without
  1886. * locking does not harm. It may result in trying to yield to same VCPU, fail
  1887. * and continue with next VCPU and so on.
  1888. */
  1889. static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
  1890. {
  1891. #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
  1892. bool eligible;
  1893. eligible = !vcpu->spin_loop.in_spin_loop ||
  1894. vcpu->spin_loop.dy_eligible;
  1895. if (vcpu->spin_loop.in_spin_loop)
  1896. kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
  1897. return eligible;
  1898. #else
  1899. return true;
  1900. #endif
  1901. }
  1902. void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
  1903. {
  1904. struct kvm *kvm = me->kvm;
  1905. struct kvm_vcpu *vcpu;
  1906. int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
  1907. int yielded = 0;
  1908. int try = 3;
  1909. int pass;
  1910. int i;
  1911. kvm_vcpu_set_in_spin_loop(me, true);
  1912. /*
  1913. * We boost the priority of a VCPU that is runnable but not
  1914. * currently running, because it got preempted by something
  1915. * else and called schedule in __vcpu_run. Hopefully that
  1916. * VCPU is holding the lock that we need and will release it.
  1917. * We approximate round-robin by starting at the last boosted VCPU.
  1918. */
  1919. for (pass = 0; pass < 2 && !yielded && try; pass++) {
  1920. kvm_for_each_vcpu(i, vcpu, kvm) {
  1921. if (!pass && i <= last_boosted_vcpu) {
  1922. i = last_boosted_vcpu;
  1923. continue;
  1924. } else if (pass && i > last_boosted_vcpu)
  1925. break;
  1926. if (!READ_ONCE(vcpu->preempted))
  1927. continue;
  1928. if (vcpu == me)
  1929. continue;
  1930. if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
  1931. continue;
  1932. if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
  1933. continue;
  1934. if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
  1935. continue;
  1936. yielded = kvm_vcpu_yield_to(vcpu);
  1937. if (yielded > 0) {
  1938. kvm->last_boosted_vcpu = i;
  1939. break;
  1940. } else if (yielded < 0) {
  1941. try--;
  1942. if (!try)
  1943. break;
  1944. }
  1945. }
  1946. }
  1947. kvm_vcpu_set_in_spin_loop(me, false);
  1948. /* Ensure vcpu is not eligible during next spinloop */
  1949. kvm_vcpu_set_dy_eligible(me, false);
  1950. }
  1951. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  1952. static int kvm_vcpu_fault(struct vm_fault *vmf)
  1953. {
  1954. struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
  1955. struct page *page;
  1956. if (vmf->pgoff == 0)
  1957. page = virt_to_page(vcpu->run);
  1958. #ifdef CONFIG_X86
  1959. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  1960. page = virt_to_page(vcpu->arch.pio_data);
  1961. #endif
  1962. #ifdef CONFIG_KVM_MMIO
  1963. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  1964. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  1965. #endif
  1966. else
  1967. return kvm_arch_vcpu_fault(vcpu, vmf);
  1968. get_page(page);
  1969. vmf->page = page;
  1970. return 0;
  1971. }
  1972. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  1973. .fault = kvm_vcpu_fault,
  1974. };
  1975. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1976. {
  1977. vma->vm_ops = &kvm_vcpu_vm_ops;
  1978. return 0;
  1979. }
  1980. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1981. {
  1982. struct kvm_vcpu *vcpu = filp->private_data;
  1983. debugfs_remove_recursive(vcpu->debugfs_dentry);
  1984. kvm_put_kvm(vcpu->kvm);
  1985. return 0;
  1986. }
  1987. static struct file_operations kvm_vcpu_fops = {
  1988. .release = kvm_vcpu_release,
  1989. .unlocked_ioctl = kvm_vcpu_ioctl,
  1990. #ifdef CONFIG_KVM_COMPAT
  1991. .compat_ioctl = kvm_vcpu_compat_ioctl,
  1992. #endif
  1993. .mmap = kvm_vcpu_mmap,
  1994. .llseek = noop_llseek,
  1995. };
  1996. /*
  1997. * Allocates an inode for the vcpu.
  1998. */
  1999. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  2000. {
  2001. char name[8 + 1 + ITOA_MAX_LEN + 1];
  2002. snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
  2003. return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
  2004. }
  2005. static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
  2006. {
  2007. char dir_name[ITOA_MAX_LEN * 2];
  2008. int ret;
  2009. if (!kvm_arch_has_vcpu_debugfs())
  2010. return 0;
  2011. if (!debugfs_initialized())
  2012. return 0;
  2013. snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
  2014. vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
  2015. vcpu->kvm->debugfs_dentry);
  2016. if (!vcpu->debugfs_dentry)
  2017. return -ENOMEM;
  2018. ret = kvm_arch_create_vcpu_debugfs(vcpu);
  2019. if (ret < 0) {
  2020. debugfs_remove_recursive(vcpu->debugfs_dentry);
  2021. return ret;
  2022. }
  2023. return 0;
  2024. }
  2025. /*
  2026. * Creates some virtual cpus. Good luck creating more than one.
  2027. */
  2028. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  2029. {
  2030. int r;
  2031. struct kvm_vcpu *vcpu;
  2032. if (id >= KVM_MAX_VCPU_ID)
  2033. return -EINVAL;
  2034. mutex_lock(&kvm->lock);
  2035. if (kvm->created_vcpus == KVM_MAX_VCPUS) {
  2036. mutex_unlock(&kvm->lock);
  2037. return -EINVAL;
  2038. }
  2039. kvm->created_vcpus++;
  2040. mutex_unlock(&kvm->lock);
  2041. vcpu = kvm_arch_vcpu_create(kvm, id);
  2042. if (IS_ERR(vcpu)) {
  2043. r = PTR_ERR(vcpu);
  2044. goto vcpu_decrement;
  2045. }
  2046. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  2047. r = kvm_arch_vcpu_setup(vcpu);
  2048. if (r)
  2049. goto vcpu_destroy;
  2050. r = kvm_create_vcpu_debugfs(vcpu);
  2051. if (r)
  2052. goto vcpu_destroy;
  2053. mutex_lock(&kvm->lock);
  2054. if (kvm_get_vcpu_by_id(kvm, id)) {
  2055. r = -EEXIST;
  2056. goto unlock_vcpu_destroy;
  2057. }
  2058. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  2059. /* Now it's all set up, let userspace reach it */
  2060. kvm_get_kvm(kvm);
  2061. r = create_vcpu_fd(vcpu);
  2062. if (r < 0) {
  2063. kvm_put_kvm(kvm);
  2064. goto unlock_vcpu_destroy;
  2065. }
  2066. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  2067. /*
  2068. * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
  2069. * before kvm->online_vcpu's incremented value.
  2070. */
  2071. smp_wmb();
  2072. atomic_inc(&kvm->online_vcpus);
  2073. mutex_unlock(&kvm->lock);
  2074. kvm_arch_vcpu_postcreate(vcpu);
  2075. return r;
  2076. unlock_vcpu_destroy:
  2077. mutex_unlock(&kvm->lock);
  2078. debugfs_remove_recursive(vcpu->debugfs_dentry);
  2079. vcpu_destroy:
  2080. kvm_arch_vcpu_destroy(vcpu);
  2081. vcpu_decrement:
  2082. mutex_lock(&kvm->lock);
  2083. kvm->created_vcpus--;
  2084. mutex_unlock(&kvm->lock);
  2085. return r;
  2086. }
  2087. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  2088. {
  2089. if (sigset) {
  2090. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2091. vcpu->sigset_active = 1;
  2092. vcpu->sigset = *sigset;
  2093. } else
  2094. vcpu->sigset_active = 0;
  2095. return 0;
  2096. }
  2097. static long kvm_vcpu_ioctl(struct file *filp,
  2098. unsigned int ioctl, unsigned long arg)
  2099. {
  2100. struct kvm_vcpu *vcpu = filp->private_data;
  2101. void __user *argp = (void __user *)arg;
  2102. int r;
  2103. struct kvm_fpu *fpu = NULL;
  2104. struct kvm_sregs *kvm_sregs = NULL;
  2105. if (vcpu->kvm->mm != current->mm)
  2106. return -EIO;
  2107. if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
  2108. return -EINVAL;
  2109. /*
  2110. * Some architectures have vcpu ioctls that are asynchronous to vcpu
  2111. * execution; mutex_lock() would break them.
  2112. */
  2113. r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
  2114. if (r != -ENOIOCTLCMD)
  2115. return r;
  2116. if (mutex_lock_killable(&vcpu->mutex))
  2117. return -EINTR;
  2118. switch (ioctl) {
  2119. case KVM_RUN: {
  2120. struct pid *oldpid;
  2121. r = -EINVAL;
  2122. if (arg)
  2123. goto out;
  2124. oldpid = rcu_access_pointer(vcpu->pid);
  2125. if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) {
  2126. /* The thread running this VCPU changed. */
  2127. struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
  2128. rcu_assign_pointer(vcpu->pid, newpid);
  2129. if (oldpid)
  2130. synchronize_rcu();
  2131. put_pid(oldpid);
  2132. }
  2133. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  2134. trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
  2135. break;
  2136. }
  2137. case KVM_GET_REGS: {
  2138. struct kvm_regs *kvm_regs;
  2139. r = -ENOMEM;
  2140. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  2141. if (!kvm_regs)
  2142. goto out;
  2143. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  2144. if (r)
  2145. goto out_free1;
  2146. r = -EFAULT;
  2147. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  2148. goto out_free1;
  2149. r = 0;
  2150. out_free1:
  2151. kfree(kvm_regs);
  2152. break;
  2153. }
  2154. case KVM_SET_REGS: {
  2155. struct kvm_regs *kvm_regs;
  2156. r = -ENOMEM;
  2157. kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
  2158. if (IS_ERR(kvm_regs)) {
  2159. r = PTR_ERR(kvm_regs);
  2160. goto out;
  2161. }
  2162. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  2163. kfree(kvm_regs);
  2164. break;
  2165. }
  2166. case KVM_GET_SREGS: {
  2167. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  2168. r = -ENOMEM;
  2169. if (!kvm_sregs)
  2170. goto out;
  2171. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  2172. if (r)
  2173. goto out;
  2174. r = -EFAULT;
  2175. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  2176. goto out;
  2177. r = 0;
  2178. break;
  2179. }
  2180. case KVM_SET_SREGS: {
  2181. kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
  2182. if (IS_ERR(kvm_sregs)) {
  2183. r = PTR_ERR(kvm_sregs);
  2184. kvm_sregs = NULL;
  2185. goto out;
  2186. }
  2187. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  2188. break;
  2189. }
  2190. case KVM_GET_MP_STATE: {
  2191. struct kvm_mp_state mp_state;
  2192. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  2193. if (r)
  2194. goto out;
  2195. r = -EFAULT;
  2196. if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
  2197. goto out;
  2198. r = 0;
  2199. break;
  2200. }
  2201. case KVM_SET_MP_STATE: {
  2202. struct kvm_mp_state mp_state;
  2203. r = -EFAULT;
  2204. if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
  2205. goto out;
  2206. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  2207. break;
  2208. }
  2209. case KVM_TRANSLATE: {
  2210. struct kvm_translation tr;
  2211. r = -EFAULT;
  2212. if (copy_from_user(&tr, argp, sizeof(tr)))
  2213. goto out;
  2214. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  2215. if (r)
  2216. goto out;
  2217. r = -EFAULT;
  2218. if (copy_to_user(argp, &tr, sizeof(tr)))
  2219. goto out;
  2220. r = 0;
  2221. break;
  2222. }
  2223. case KVM_SET_GUEST_DEBUG: {
  2224. struct kvm_guest_debug dbg;
  2225. r = -EFAULT;
  2226. if (copy_from_user(&dbg, argp, sizeof(dbg)))
  2227. goto out;
  2228. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  2229. break;
  2230. }
  2231. case KVM_SET_SIGNAL_MASK: {
  2232. struct kvm_signal_mask __user *sigmask_arg = argp;
  2233. struct kvm_signal_mask kvm_sigmask;
  2234. sigset_t sigset, *p;
  2235. p = NULL;
  2236. if (argp) {
  2237. r = -EFAULT;
  2238. if (copy_from_user(&kvm_sigmask, argp,
  2239. sizeof(kvm_sigmask)))
  2240. goto out;
  2241. r = -EINVAL;
  2242. if (kvm_sigmask.len != sizeof(sigset))
  2243. goto out;
  2244. r = -EFAULT;
  2245. if (copy_from_user(&sigset, sigmask_arg->sigset,
  2246. sizeof(sigset)))
  2247. goto out;
  2248. p = &sigset;
  2249. }
  2250. r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
  2251. break;
  2252. }
  2253. case KVM_GET_FPU: {
  2254. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  2255. r = -ENOMEM;
  2256. if (!fpu)
  2257. goto out;
  2258. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  2259. if (r)
  2260. goto out;
  2261. r = -EFAULT;
  2262. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  2263. goto out;
  2264. r = 0;
  2265. break;
  2266. }
  2267. case KVM_SET_FPU: {
  2268. fpu = memdup_user(argp, sizeof(*fpu));
  2269. if (IS_ERR(fpu)) {
  2270. r = PTR_ERR(fpu);
  2271. fpu = NULL;
  2272. goto out;
  2273. }
  2274. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  2275. break;
  2276. }
  2277. default:
  2278. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  2279. }
  2280. out:
  2281. mutex_unlock(&vcpu->mutex);
  2282. kfree(fpu);
  2283. kfree(kvm_sregs);
  2284. return r;
  2285. }
  2286. #ifdef CONFIG_KVM_COMPAT
  2287. static long kvm_vcpu_compat_ioctl(struct file *filp,
  2288. unsigned int ioctl, unsigned long arg)
  2289. {
  2290. struct kvm_vcpu *vcpu = filp->private_data;
  2291. void __user *argp = compat_ptr(arg);
  2292. int r;
  2293. if (vcpu->kvm->mm != current->mm)
  2294. return -EIO;
  2295. switch (ioctl) {
  2296. case KVM_SET_SIGNAL_MASK: {
  2297. struct kvm_signal_mask __user *sigmask_arg = argp;
  2298. struct kvm_signal_mask kvm_sigmask;
  2299. sigset_t sigset;
  2300. if (argp) {
  2301. r = -EFAULT;
  2302. if (copy_from_user(&kvm_sigmask, argp,
  2303. sizeof(kvm_sigmask)))
  2304. goto out;
  2305. r = -EINVAL;
  2306. if (kvm_sigmask.len != sizeof(compat_sigset_t))
  2307. goto out;
  2308. r = -EFAULT;
  2309. if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
  2310. goto out;
  2311. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  2312. } else
  2313. r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
  2314. break;
  2315. }
  2316. default:
  2317. r = kvm_vcpu_ioctl(filp, ioctl, arg);
  2318. }
  2319. out:
  2320. return r;
  2321. }
  2322. #endif
  2323. static int kvm_device_ioctl_attr(struct kvm_device *dev,
  2324. int (*accessor)(struct kvm_device *dev,
  2325. struct kvm_device_attr *attr),
  2326. unsigned long arg)
  2327. {
  2328. struct kvm_device_attr attr;
  2329. if (!accessor)
  2330. return -EPERM;
  2331. if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
  2332. return -EFAULT;
  2333. return accessor(dev, &attr);
  2334. }
  2335. static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
  2336. unsigned long arg)
  2337. {
  2338. struct kvm_device *dev = filp->private_data;
  2339. switch (ioctl) {
  2340. case KVM_SET_DEVICE_ATTR:
  2341. return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
  2342. case KVM_GET_DEVICE_ATTR:
  2343. return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
  2344. case KVM_HAS_DEVICE_ATTR:
  2345. return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
  2346. default:
  2347. if (dev->ops->ioctl)
  2348. return dev->ops->ioctl(dev, ioctl, arg);
  2349. return -ENOTTY;
  2350. }
  2351. }
  2352. static int kvm_device_release(struct inode *inode, struct file *filp)
  2353. {
  2354. struct kvm_device *dev = filp->private_data;
  2355. struct kvm *kvm = dev->kvm;
  2356. kvm_put_kvm(kvm);
  2357. return 0;
  2358. }
  2359. static const struct file_operations kvm_device_fops = {
  2360. .unlocked_ioctl = kvm_device_ioctl,
  2361. #ifdef CONFIG_KVM_COMPAT
  2362. .compat_ioctl = kvm_device_ioctl,
  2363. #endif
  2364. .release = kvm_device_release,
  2365. };
  2366. struct kvm_device *kvm_device_from_filp(struct file *filp)
  2367. {
  2368. if (filp->f_op != &kvm_device_fops)
  2369. return NULL;
  2370. return filp->private_data;
  2371. }
  2372. static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
  2373. #ifdef CONFIG_KVM_MPIC
  2374. [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
  2375. [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
  2376. #endif
  2377. };
  2378. int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
  2379. {
  2380. if (type >= ARRAY_SIZE(kvm_device_ops_table))
  2381. return -ENOSPC;
  2382. if (kvm_device_ops_table[type] != NULL)
  2383. return -EEXIST;
  2384. kvm_device_ops_table[type] = ops;
  2385. return 0;
  2386. }
  2387. void kvm_unregister_device_ops(u32 type)
  2388. {
  2389. if (kvm_device_ops_table[type] != NULL)
  2390. kvm_device_ops_table[type] = NULL;
  2391. }
  2392. static int kvm_ioctl_create_device(struct kvm *kvm,
  2393. struct kvm_create_device *cd)
  2394. {
  2395. struct kvm_device_ops *ops = NULL;
  2396. struct kvm_device *dev;
  2397. bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
  2398. int ret;
  2399. if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
  2400. return -ENODEV;
  2401. ops = kvm_device_ops_table[cd->type];
  2402. if (ops == NULL)
  2403. return -ENODEV;
  2404. if (test)
  2405. return 0;
  2406. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  2407. if (!dev)
  2408. return -ENOMEM;
  2409. dev->ops = ops;
  2410. dev->kvm = kvm;
  2411. mutex_lock(&kvm->lock);
  2412. ret = ops->create(dev, cd->type);
  2413. if (ret < 0) {
  2414. mutex_unlock(&kvm->lock);
  2415. kfree(dev);
  2416. return ret;
  2417. }
  2418. list_add(&dev->vm_node, &kvm->devices);
  2419. mutex_unlock(&kvm->lock);
  2420. if (ops->init)
  2421. ops->init(dev);
  2422. ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
  2423. if (ret < 0) {
  2424. mutex_lock(&kvm->lock);
  2425. list_del(&dev->vm_node);
  2426. mutex_unlock(&kvm->lock);
  2427. ops->destroy(dev);
  2428. return ret;
  2429. }
  2430. kvm_get_kvm(kvm);
  2431. cd->fd = ret;
  2432. return 0;
  2433. }
  2434. static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
  2435. {
  2436. switch (arg) {
  2437. case KVM_CAP_USER_MEMORY:
  2438. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  2439. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  2440. case KVM_CAP_INTERNAL_ERROR_DATA:
  2441. #ifdef CONFIG_HAVE_KVM_MSI
  2442. case KVM_CAP_SIGNAL_MSI:
  2443. #endif
  2444. #ifdef CONFIG_HAVE_KVM_IRQFD
  2445. case KVM_CAP_IRQFD:
  2446. case KVM_CAP_IRQFD_RESAMPLE:
  2447. #endif
  2448. case KVM_CAP_IOEVENTFD_ANY_LENGTH:
  2449. case KVM_CAP_CHECK_EXTENSION_VM:
  2450. return 1;
  2451. #ifdef CONFIG_KVM_MMIO
  2452. case KVM_CAP_COALESCED_MMIO:
  2453. return KVM_COALESCED_MMIO_PAGE_OFFSET;
  2454. #endif
  2455. #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
  2456. case KVM_CAP_IRQ_ROUTING:
  2457. return KVM_MAX_IRQ_ROUTES;
  2458. #endif
  2459. #if KVM_ADDRESS_SPACE_NUM > 1
  2460. case KVM_CAP_MULTI_ADDRESS_SPACE:
  2461. return KVM_ADDRESS_SPACE_NUM;
  2462. #endif
  2463. case KVM_CAP_MAX_VCPU_ID:
  2464. return KVM_MAX_VCPU_ID;
  2465. default:
  2466. break;
  2467. }
  2468. return kvm_vm_ioctl_check_extension(kvm, arg);
  2469. }
  2470. static long kvm_vm_ioctl(struct file *filp,
  2471. unsigned int ioctl, unsigned long arg)
  2472. {
  2473. struct kvm *kvm = filp->private_data;
  2474. void __user *argp = (void __user *)arg;
  2475. int r;
  2476. if (kvm->mm != current->mm)
  2477. return -EIO;
  2478. switch (ioctl) {
  2479. case KVM_CREATE_VCPU:
  2480. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  2481. break;
  2482. case KVM_SET_USER_MEMORY_REGION: {
  2483. struct kvm_userspace_memory_region kvm_userspace_mem;
  2484. r = -EFAULT;
  2485. if (copy_from_user(&kvm_userspace_mem, argp,
  2486. sizeof(kvm_userspace_mem)))
  2487. goto out;
  2488. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
  2489. break;
  2490. }
  2491. case KVM_GET_DIRTY_LOG: {
  2492. struct kvm_dirty_log log;
  2493. r = -EFAULT;
  2494. if (copy_from_user(&log, argp, sizeof(log)))
  2495. goto out;
  2496. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  2497. break;
  2498. }
  2499. #ifdef CONFIG_KVM_MMIO
  2500. case KVM_REGISTER_COALESCED_MMIO: {
  2501. struct kvm_coalesced_mmio_zone zone;
  2502. r = -EFAULT;
  2503. if (copy_from_user(&zone, argp, sizeof(zone)))
  2504. goto out;
  2505. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  2506. break;
  2507. }
  2508. case KVM_UNREGISTER_COALESCED_MMIO: {
  2509. struct kvm_coalesced_mmio_zone zone;
  2510. r = -EFAULT;
  2511. if (copy_from_user(&zone, argp, sizeof(zone)))
  2512. goto out;
  2513. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  2514. break;
  2515. }
  2516. #endif
  2517. case KVM_IRQFD: {
  2518. struct kvm_irqfd data;
  2519. r = -EFAULT;
  2520. if (copy_from_user(&data, argp, sizeof(data)))
  2521. goto out;
  2522. r = kvm_irqfd(kvm, &data);
  2523. break;
  2524. }
  2525. case KVM_IOEVENTFD: {
  2526. struct kvm_ioeventfd data;
  2527. r = -EFAULT;
  2528. if (copy_from_user(&data, argp, sizeof(data)))
  2529. goto out;
  2530. r = kvm_ioeventfd(kvm, &data);
  2531. break;
  2532. }
  2533. #ifdef CONFIG_HAVE_KVM_MSI
  2534. case KVM_SIGNAL_MSI: {
  2535. struct kvm_msi msi;
  2536. r = -EFAULT;
  2537. if (copy_from_user(&msi, argp, sizeof(msi)))
  2538. goto out;
  2539. r = kvm_send_userspace_msi(kvm, &msi);
  2540. break;
  2541. }
  2542. #endif
  2543. #ifdef __KVM_HAVE_IRQ_LINE
  2544. case KVM_IRQ_LINE_STATUS:
  2545. case KVM_IRQ_LINE: {
  2546. struct kvm_irq_level irq_event;
  2547. r = -EFAULT;
  2548. if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
  2549. goto out;
  2550. r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
  2551. ioctl == KVM_IRQ_LINE_STATUS);
  2552. if (r)
  2553. goto out;
  2554. r = -EFAULT;
  2555. if (ioctl == KVM_IRQ_LINE_STATUS) {
  2556. if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
  2557. goto out;
  2558. }
  2559. r = 0;
  2560. break;
  2561. }
  2562. #endif
  2563. #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
  2564. case KVM_SET_GSI_ROUTING: {
  2565. struct kvm_irq_routing routing;
  2566. struct kvm_irq_routing __user *urouting;
  2567. struct kvm_irq_routing_entry *entries = NULL;
  2568. r = -EFAULT;
  2569. if (copy_from_user(&routing, argp, sizeof(routing)))
  2570. goto out;
  2571. r = -EINVAL;
  2572. if (!kvm_arch_can_set_irq_routing(kvm))
  2573. goto out;
  2574. if (routing.nr > KVM_MAX_IRQ_ROUTES)
  2575. goto out;
  2576. if (routing.flags)
  2577. goto out;
  2578. if (routing.nr) {
  2579. r = -ENOMEM;
  2580. entries = vmalloc(array_size(sizeof(*entries),
  2581. routing.nr));
  2582. if (!entries)
  2583. goto out;
  2584. r = -EFAULT;
  2585. urouting = argp;
  2586. if (copy_from_user(entries, urouting->entries,
  2587. routing.nr * sizeof(*entries)))
  2588. goto out_free_irq_routing;
  2589. }
  2590. r = kvm_set_irq_routing(kvm, entries, routing.nr,
  2591. routing.flags);
  2592. out_free_irq_routing:
  2593. vfree(entries);
  2594. break;
  2595. }
  2596. #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
  2597. case KVM_CREATE_DEVICE: {
  2598. struct kvm_create_device cd;
  2599. r = -EFAULT;
  2600. if (copy_from_user(&cd, argp, sizeof(cd)))
  2601. goto out;
  2602. r = kvm_ioctl_create_device(kvm, &cd);
  2603. if (r)
  2604. goto out;
  2605. r = -EFAULT;
  2606. if (copy_to_user(argp, &cd, sizeof(cd)))
  2607. goto out;
  2608. r = 0;
  2609. break;
  2610. }
  2611. case KVM_CHECK_EXTENSION:
  2612. r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
  2613. break;
  2614. default:
  2615. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  2616. }
  2617. out:
  2618. return r;
  2619. }
  2620. #ifdef CONFIG_KVM_COMPAT
  2621. struct compat_kvm_dirty_log {
  2622. __u32 slot;
  2623. __u32 padding1;
  2624. union {
  2625. compat_uptr_t dirty_bitmap; /* one bit per page */
  2626. __u64 padding2;
  2627. };
  2628. };
  2629. static long kvm_vm_compat_ioctl(struct file *filp,
  2630. unsigned int ioctl, unsigned long arg)
  2631. {
  2632. struct kvm *kvm = filp->private_data;
  2633. int r;
  2634. if (kvm->mm != current->mm)
  2635. return -EIO;
  2636. switch (ioctl) {
  2637. case KVM_GET_DIRTY_LOG: {
  2638. struct compat_kvm_dirty_log compat_log;
  2639. struct kvm_dirty_log log;
  2640. if (copy_from_user(&compat_log, (void __user *)arg,
  2641. sizeof(compat_log)))
  2642. return -EFAULT;
  2643. log.slot = compat_log.slot;
  2644. log.padding1 = compat_log.padding1;
  2645. log.padding2 = compat_log.padding2;
  2646. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  2647. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  2648. break;
  2649. }
  2650. default:
  2651. r = kvm_vm_ioctl(filp, ioctl, arg);
  2652. }
  2653. return r;
  2654. }
  2655. #endif
  2656. static struct file_operations kvm_vm_fops = {
  2657. .release = kvm_vm_release,
  2658. .unlocked_ioctl = kvm_vm_ioctl,
  2659. #ifdef CONFIG_KVM_COMPAT
  2660. .compat_ioctl = kvm_vm_compat_ioctl,
  2661. #endif
  2662. .llseek = noop_llseek,
  2663. };
  2664. static int kvm_dev_ioctl_create_vm(unsigned long type)
  2665. {
  2666. int r;
  2667. struct kvm *kvm;
  2668. struct file *file;
  2669. kvm = kvm_create_vm(type);
  2670. if (IS_ERR(kvm))
  2671. return PTR_ERR(kvm);
  2672. #ifdef CONFIG_KVM_MMIO
  2673. r = kvm_coalesced_mmio_init(kvm);
  2674. if (r < 0)
  2675. goto put_kvm;
  2676. #endif
  2677. r = get_unused_fd_flags(O_CLOEXEC);
  2678. if (r < 0)
  2679. goto put_kvm;
  2680. file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  2681. if (IS_ERR(file)) {
  2682. put_unused_fd(r);
  2683. r = PTR_ERR(file);
  2684. goto put_kvm;
  2685. }
  2686. /*
  2687. * Don't call kvm_put_kvm anymore at this point; file->f_op is
  2688. * already set, with ->release() being kvm_vm_release(). In error
  2689. * cases it will be called by the final fput(file) and will take
  2690. * care of doing kvm_put_kvm(kvm).
  2691. */
  2692. if (kvm_create_vm_debugfs(kvm, r) < 0) {
  2693. put_unused_fd(r);
  2694. fput(file);
  2695. return -ENOMEM;
  2696. }
  2697. kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
  2698. fd_install(r, file);
  2699. return r;
  2700. put_kvm:
  2701. kvm_put_kvm(kvm);
  2702. return r;
  2703. }
  2704. static long kvm_dev_ioctl(struct file *filp,
  2705. unsigned int ioctl, unsigned long arg)
  2706. {
  2707. long r = -EINVAL;
  2708. switch (ioctl) {
  2709. case KVM_GET_API_VERSION:
  2710. if (arg)
  2711. goto out;
  2712. r = KVM_API_VERSION;
  2713. break;
  2714. case KVM_CREATE_VM:
  2715. r = kvm_dev_ioctl_create_vm(arg);
  2716. break;
  2717. case KVM_CHECK_EXTENSION:
  2718. r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
  2719. break;
  2720. case KVM_GET_VCPU_MMAP_SIZE:
  2721. if (arg)
  2722. goto out;
  2723. r = PAGE_SIZE; /* struct kvm_run */
  2724. #ifdef CONFIG_X86
  2725. r += PAGE_SIZE; /* pio data page */
  2726. #endif
  2727. #ifdef CONFIG_KVM_MMIO
  2728. r += PAGE_SIZE; /* coalesced mmio ring page */
  2729. #endif
  2730. break;
  2731. case KVM_TRACE_ENABLE:
  2732. case KVM_TRACE_PAUSE:
  2733. case KVM_TRACE_DISABLE:
  2734. r = -EOPNOTSUPP;
  2735. break;
  2736. default:
  2737. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  2738. }
  2739. out:
  2740. return r;
  2741. }
  2742. static struct file_operations kvm_chardev_ops = {
  2743. .unlocked_ioctl = kvm_dev_ioctl,
  2744. .compat_ioctl = kvm_dev_ioctl,
  2745. .llseek = noop_llseek,
  2746. };
  2747. static struct miscdevice kvm_dev = {
  2748. KVM_MINOR,
  2749. "kvm",
  2750. &kvm_chardev_ops,
  2751. };
  2752. static void hardware_enable_nolock(void *junk)
  2753. {
  2754. int cpu = raw_smp_processor_id();
  2755. int r;
  2756. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  2757. return;
  2758. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  2759. r = kvm_arch_hardware_enable();
  2760. if (r) {
  2761. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  2762. atomic_inc(&hardware_enable_failed);
  2763. pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
  2764. }
  2765. }
  2766. static int kvm_starting_cpu(unsigned int cpu)
  2767. {
  2768. raw_spin_lock(&kvm_count_lock);
  2769. if (kvm_usage_count)
  2770. hardware_enable_nolock(NULL);
  2771. raw_spin_unlock(&kvm_count_lock);
  2772. return 0;
  2773. }
  2774. static void hardware_disable_nolock(void *junk)
  2775. {
  2776. int cpu = raw_smp_processor_id();
  2777. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  2778. return;
  2779. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  2780. kvm_arch_hardware_disable();
  2781. }
  2782. static int kvm_dying_cpu(unsigned int cpu)
  2783. {
  2784. raw_spin_lock(&kvm_count_lock);
  2785. if (kvm_usage_count)
  2786. hardware_disable_nolock(NULL);
  2787. raw_spin_unlock(&kvm_count_lock);
  2788. return 0;
  2789. }
  2790. static void hardware_disable_all_nolock(void)
  2791. {
  2792. BUG_ON(!kvm_usage_count);
  2793. kvm_usage_count--;
  2794. if (!kvm_usage_count)
  2795. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2796. }
  2797. static void hardware_disable_all(void)
  2798. {
  2799. raw_spin_lock(&kvm_count_lock);
  2800. hardware_disable_all_nolock();
  2801. raw_spin_unlock(&kvm_count_lock);
  2802. }
  2803. static int hardware_enable_all(void)
  2804. {
  2805. int r = 0;
  2806. raw_spin_lock(&kvm_count_lock);
  2807. kvm_usage_count++;
  2808. if (kvm_usage_count == 1) {
  2809. atomic_set(&hardware_enable_failed, 0);
  2810. on_each_cpu(hardware_enable_nolock, NULL, 1);
  2811. if (atomic_read(&hardware_enable_failed)) {
  2812. hardware_disable_all_nolock();
  2813. r = -EBUSY;
  2814. }
  2815. }
  2816. raw_spin_unlock(&kvm_count_lock);
  2817. return r;
  2818. }
  2819. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  2820. void *v)
  2821. {
  2822. /*
  2823. * Some (well, at least mine) BIOSes hang on reboot if
  2824. * in vmx root mode.
  2825. *
  2826. * And Intel TXT required VMX off for all cpu when system shutdown.
  2827. */
  2828. pr_info("kvm: exiting hardware virtualization\n");
  2829. kvm_rebooting = true;
  2830. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2831. return NOTIFY_OK;
  2832. }
  2833. static struct notifier_block kvm_reboot_notifier = {
  2834. .notifier_call = kvm_reboot,
  2835. .priority = 0,
  2836. };
  2837. static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  2838. {
  2839. int i;
  2840. for (i = 0; i < bus->dev_count; i++) {
  2841. struct kvm_io_device *pos = bus->range[i].dev;
  2842. kvm_iodevice_destructor(pos);
  2843. }
  2844. kfree(bus);
  2845. }
  2846. static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
  2847. const struct kvm_io_range *r2)
  2848. {
  2849. gpa_t addr1 = r1->addr;
  2850. gpa_t addr2 = r2->addr;
  2851. if (addr1 < addr2)
  2852. return -1;
  2853. /* If r2->len == 0, match the exact address. If r2->len != 0,
  2854. * accept any overlapping write. Any order is acceptable for
  2855. * overlapping ranges, because kvm_io_bus_get_first_dev ensures
  2856. * we process all of them.
  2857. */
  2858. if (r2->len) {
  2859. addr1 += r1->len;
  2860. addr2 += r2->len;
  2861. }
  2862. if (addr1 > addr2)
  2863. return 1;
  2864. return 0;
  2865. }
  2866. static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
  2867. {
  2868. return kvm_io_bus_cmp(p1, p2);
  2869. }
  2870. static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
  2871. gpa_t addr, int len)
  2872. {
  2873. struct kvm_io_range *range, key;
  2874. int off;
  2875. key = (struct kvm_io_range) {
  2876. .addr = addr,
  2877. .len = len,
  2878. };
  2879. range = bsearch(&key, bus->range, bus->dev_count,
  2880. sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
  2881. if (range == NULL)
  2882. return -ENOENT;
  2883. off = range - bus->range;
  2884. while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
  2885. off--;
  2886. return off;
  2887. }
  2888. static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
  2889. struct kvm_io_range *range, const void *val)
  2890. {
  2891. int idx;
  2892. idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
  2893. if (idx < 0)
  2894. return -EOPNOTSUPP;
  2895. while (idx < bus->dev_count &&
  2896. kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
  2897. if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
  2898. range->len, val))
  2899. return idx;
  2900. idx++;
  2901. }
  2902. return -EOPNOTSUPP;
  2903. }
  2904. /* kvm_io_bus_write - called under kvm->slots_lock */
  2905. int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
  2906. int len, const void *val)
  2907. {
  2908. struct kvm_io_bus *bus;
  2909. struct kvm_io_range range;
  2910. int r;
  2911. range = (struct kvm_io_range) {
  2912. .addr = addr,
  2913. .len = len,
  2914. };
  2915. bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
  2916. if (!bus)
  2917. return -ENOMEM;
  2918. r = __kvm_io_bus_write(vcpu, bus, &range, val);
  2919. return r < 0 ? r : 0;
  2920. }
  2921. /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
  2922. int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
  2923. gpa_t addr, int len, const void *val, long cookie)
  2924. {
  2925. struct kvm_io_bus *bus;
  2926. struct kvm_io_range range;
  2927. range = (struct kvm_io_range) {
  2928. .addr = addr,
  2929. .len = len,
  2930. };
  2931. bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
  2932. if (!bus)
  2933. return -ENOMEM;
  2934. /* First try the device referenced by cookie. */
  2935. if ((cookie >= 0) && (cookie < bus->dev_count) &&
  2936. (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
  2937. if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
  2938. val))
  2939. return cookie;
  2940. /*
  2941. * cookie contained garbage; fall back to search and return the
  2942. * correct cookie value.
  2943. */
  2944. return __kvm_io_bus_write(vcpu, bus, &range, val);
  2945. }
  2946. static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
  2947. struct kvm_io_range *range, void *val)
  2948. {
  2949. int idx;
  2950. idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
  2951. if (idx < 0)
  2952. return -EOPNOTSUPP;
  2953. while (idx < bus->dev_count &&
  2954. kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
  2955. if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
  2956. range->len, val))
  2957. return idx;
  2958. idx++;
  2959. }
  2960. return -EOPNOTSUPP;
  2961. }
  2962. EXPORT_SYMBOL_GPL(kvm_io_bus_write);
  2963. /* kvm_io_bus_read - called under kvm->slots_lock */
  2964. int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
  2965. int len, void *val)
  2966. {
  2967. struct kvm_io_bus *bus;
  2968. struct kvm_io_range range;
  2969. int r;
  2970. range = (struct kvm_io_range) {
  2971. .addr = addr,
  2972. .len = len,
  2973. };
  2974. bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
  2975. if (!bus)
  2976. return -ENOMEM;
  2977. r = __kvm_io_bus_read(vcpu, bus, &range, val);
  2978. return r < 0 ? r : 0;
  2979. }
  2980. /* Caller must hold slots_lock. */
  2981. int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2982. int len, struct kvm_io_device *dev)
  2983. {
  2984. int i;
  2985. struct kvm_io_bus *new_bus, *bus;
  2986. struct kvm_io_range range;
  2987. bus = kvm_get_bus(kvm, bus_idx);
  2988. if (!bus)
  2989. return -ENOMEM;
  2990. /* exclude ioeventfd which is limited by maximum fd */
  2991. if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
  2992. return -ENOSPC;
  2993. new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
  2994. sizeof(struct kvm_io_range)), GFP_KERNEL);
  2995. if (!new_bus)
  2996. return -ENOMEM;
  2997. range = (struct kvm_io_range) {
  2998. .addr = addr,
  2999. .len = len,
  3000. .dev = dev,
  3001. };
  3002. for (i = 0; i < bus->dev_count; i++)
  3003. if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
  3004. break;
  3005. memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
  3006. new_bus->dev_count++;
  3007. new_bus->range[i] = range;
  3008. memcpy(new_bus->range + i + 1, bus->range + i,
  3009. (bus->dev_count - i) * sizeof(struct kvm_io_range));
  3010. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  3011. synchronize_srcu_expedited(&kvm->srcu);
  3012. kfree(bus);
  3013. return 0;
  3014. }
  3015. /* Caller must hold slots_lock. */
  3016. void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  3017. struct kvm_io_device *dev)
  3018. {
  3019. int i;
  3020. struct kvm_io_bus *new_bus, *bus;
  3021. bus = kvm_get_bus(kvm, bus_idx);
  3022. if (!bus)
  3023. return;
  3024. for (i = 0; i < bus->dev_count; i++)
  3025. if (bus->range[i].dev == dev) {
  3026. break;
  3027. }
  3028. if (i == bus->dev_count)
  3029. return;
  3030. new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
  3031. sizeof(struct kvm_io_range)), GFP_KERNEL);
  3032. if (!new_bus) {
  3033. pr_err("kvm: failed to shrink bus, removing it completely\n");
  3034. goto broken;
  3035. }
  3036. memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
  3037. new_bus->dev_count--;
  3038. memcpy(new_bus->range + i, bus->range + i + 1,
  3039. (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
  3040. broken:
  3041. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  3042. synchronize_srcu_expedited(&kvm->srcu);
  3043. kfree(bus);
  3044. return;
  3045. }
  3046. struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  3047. gpa_t addr)
  3048. {
  3049. struct kvm_io_bus *bus;
  3050. int dev_idx, srcu_idx;
  3051. struct kvm_io_device *iodev = NULL;
  3052. srcu_idx = srcu_read_lock(&kvm->srcu);
  3053. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  3054. if (!bus)
  3055. goto out_unlock;
  3056. dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
  3057. if (dev_idx < 0)
  3058. goto out_unlock;
  3059. iodev = bus->range[dev_idx].dev;
  3060. out_unlock:
  3061. srcu_read_unlock(&kvm->srcu, srcu_idx);
  3062. return iodev;
  3063. }
  3064. EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
  3065. static int kvm_debugfs_open(struct inode *inode, struct file *file,
  3066. int (*get)(void *, u64 *), int (*set)(void *, u64),
  3067. const char *fmt)
  3068. {
  3069. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
  3070. inode->i_private;
  3071. /* The debugfs files are a reference to the kvm struct which
  3072. * is still valid when kvm_destroy_vm is called.
  3073. * To avoid the race between open and the removal of the debugfs
  3074. * directory we test against the users count.
  3075. */
  3076. if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
  3077. return -ENOENT;
  3078. if (simple_attr_open(inode, file, get, set, fmt)) {
  3079. kvm_put_kvm(stat_data->kvm);
  3080. return -ENOMEM;
  3081. }
  3082. return 0;
  3083. }
  3084. static int kvm_debugfs_release(struct inode *inode, struct file *file)
  3085. {
  3086. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
  3087. inode->i_private;
  3088. simple_attr_release(inode, file);
  3089. kvm_put_kvm(stat_data->kvm);
  3090. return 0;
  3091. }
  3092. static int vm_stat_get_per_vm(void *data, u64 *val)
  3093. {
  3094. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3095. *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
  3096. return 0;
  3097. }
  3098. static int vm_stat_clear_per_vm(void *data, u64 val)
  3099. {
  3100. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3101. if (val)
  3102. return -EINVAL;
  3103. *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
  3104. return 0;
  3105. }
  3106. static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
  3107. {
  3108. __simple_attr_check_format("%llu\n", 0ull);
  3109. return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
  3110. vm_stat_clear_per_vm, "%llu\n");
  3111. }
  3112. static const struct file_operations vm_stat_get_per_vm_fops = {
  3113. .owner = THIS_MODULE,
  3114. .open = vm_stat_get_per_vm_open,
  3115. .release = kvm_debugfs_release,
  3116. .read = simple_attr_read,
  3117. .write = simple_attr_write,
  3118. .llseek = no_llseek,
  3119. };
  3120. static int vcpu_stat_get_per_vm(void *data, u64 *val)
  3121. {
  3122. int i;
  3123. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3124. struct kvm_vcpu *vcpu;
  3125. *val = 0;
  3126. kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
  3127. *val += *(u64 *)((void *)vcpu + stat_data->offset);
  3128. return 0;
  3129. }
  3130. static int vcpu_stat_clear_per_vm(void *data, u64 val)
  3131. {
  3132. int i;
  3133. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3134. struct kvm_vcpu *vcpu;
  3135. if (val)
  3136. return -EINVAL;
  3137. kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
  3138. *(u64 *)((void *)vcpu + stat_data->offset) = 0;
  3139. return 0;
  3140. }
  3141. static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
  3142. {
  3143. __simple_attr_check_format("%llu\n", 0ull);
  3144. return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
  3145. vcpu_stat_clear_per_vm, "%llu\n");
  3146. }
  3147. static const struct file_operations vcpu_stat_get_per_vm_fops = {
  3148. .owner = THIS_MODULE,
  3149. .open = vcpu_stat_get_per_vm_open,
  3150. .release = kvm_debugfs_release,
  3151. .read = simple_attr_read,
  3152. .write = simple_attr_write,
  3153. .llseek = no_llseek,
  3154. };
  3155. static const struct file_operations *stat_fops_per_vm[] = {
  3156. [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
  3157. [KVM_STAT_VM] = &vm_stat_get_per_vm_fops,
  3158. };
  3159. static int vm_stat_get(void *_offset, u64 *val)
  3160. {
  3161. unsigned offset = (long)_offset;
  3162. struct kvm *kvm;
  3163. struct kvm_stat_data stat_tmp = {.offset = offset};
  3164. u64 tmp_val;
  3165. *val = 0;
  3166. spin_lock(&kvm_lock);
  3167. list_for_each_entry(kvm, &vm_list, vm_list) {
  3168. stat_tmp.kvm = kvm;
  3169. vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
  3170. *val += tmp_val;
  3171. }
  3172. spin_unlock(&kvm_lock);
  3173. return 0;
  3174. }
  3175. static int vm_stat_clear(void *_offset, u64 val)
  3176. {
  3177. unsigned offset = (long)_offset;
  3178. struct kvm *kvm;
  3179. struct kvm_stat_data stat_tmp = {.offset = offset};
  3180. if (val)
  3181. return -EINVAL;
  3182. spin_lock(&kvm_lock);
  3183. list_for_each_entry(kvm, &vm_list, vm_list) {
  3184. stat_tmp.kvm = kvm;
  3185. vm_stat_clear_per_vm((void *)&stat_tmp, 0);
  3186. }
  3187. spin_unlock(&kvm_lock);
  3188. return 0;
  3189. }
  3190. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
  3191. static int vcpu_stat_get(void *_offset, u64 *val)
  3192. {
  3193. unsigned offset = (long)_offset;
  3194. struct kvm *kvm;
  3195. struct kvm_stat_data stat_tmp = {.offset = offset};
  3196. u64 tmp_val;
  3197. *val = 0;
  3198. spin_lock(&kvm_lock);
  3199. list_for_each_entry(kvm, &vm_list, vm_list) {
  3200. stat_tmp.kvm = kvm;
  3201. vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
  3202. *val += tmp_val;
  3203. }
  3204. spin_unlock(&kvm_lock);
  3205. return 0;
  3206. }
  3207. static int vcpu_stat_clear(void *_offset, u64 val)
  3208. {
  3209. unsigned offset = (long)_offset;
  3210. struct kvm *kvm;
  3211. struct kvm_stat_data stat_tmp = {.offset = offset};
  3212. if (val)
  3213. return -EINVAL;
  3214. spin_lock(&kvm_lock);
  3215. list_for_each_entry(kvm, &vm_list, vm_list) {
  3216. stat_tmp.kvm = kvm;
  3217. vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
  3218. }
  3219. spin_unlock(&kvm_lock);
  3220. return 0;
  3221. }
  3222. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
  3223. "%llu\n");
  3224. static const struct file_operations *stat_fops[] = {
  3225. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  3226. [KVM_STAT_VM] = &vm_stat_fops,
  3227. };
  3228. static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
  3229. {
  3230. struct kobj_uevent_env *env;
  3231. unsigned long long created, active;
  3232. if (!kvm_dev.this_device || !kvm)
  3233. return;
  3234. spin_lock(&kvm_lock);
  3235. if (type == KVM_EVENT_CREATE_VM) {
  3236. kvm_createvm_count++;
  3237. kvm_active_vms++;
  3238. } else if (type == KVM_EVENT_DESTROY_VM) {
  3239. kvm_active_vms--;
  3240. }
  3241. created = kvm_createvm_count;
  3242. active = kvm_active_vms;
  3243. spin_unlock(&kvm_lock);
  3244. env = kzalloc(sizeof(*env), GFP_KERNEL);
  3245. if (!env)
  3246. return;
  3247. add_uevent_var(env, "CREATED=%llu", created);
  3248. add_uevent_var(env, "COUNT=%llu", active);
  3249. if (type == KVM_EVENT_CREATE_VM) {
  3250. add_uevent_var(env, "EVENT=create");
  3251. kvm->userspace_pid = task_pid_nr(current);
  3252. } else if (type == KVM_EVENT_DESTROY_VM) {
  3253. add_uevent_var(env, "EVENT=destroy");
  3254. }
  3255. add_uevent_var(env, "PID=%d", kvm->userspace_pid);
  3256. if (kvm->debugfs_dentry) {
  3257. char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
  3258. if (p) {
  3259. tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
  3260. if (!IS_ERR(tmp))
  3261. add_uevent_var(env, "STATS_PATH=%s", tmp);
  3262. kfree(p);
  3263. }
  3264. }
  3265. /* no need for checks, since we are adding at most only 5 keys */
  3266. env->envp[env->envp_idx++] = NULL;
  3267. kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
  3268. kfree(env);
  3269. }
  3270. static int kvm_init_debug(void)
  3271. {
  3272. int r = -EEXIST;
  3273. struct kvm_stats_debugfs_item *p;
  3274. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  3275. if (kvm_debugfs_dir == NULL)
  3276. goto out;
  3277. kvm_debugfs_num_entries = 0;
  3278. for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
  3279. if (!debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
  3280. (void *)(long)p->offset,
  3281. stat_fops[p->kind]))
  3282. goto out_dir;
  3283. }
  3284. return 0;
  3285. out_dir:
  3286. debugfs_remove_recursive(kvm_debugfs_dir);
  3287. out:
  3288. return r;
  3289. }
  3290. static int kvm_suspend(void)
  3291. {
  3292. if (kvm_usage_count)
  3293. hardware_disable_nolock(NULL);
  3294. return 0;
  3295. }
  3296. static void kvm_resume(void)
  3297. {
  3298. if (kvm_usage_count) {
  3299. WARN_ON(raw_spin_is_locked(&kvm_count_lock));
  3300. hardware_enable_nolock(NULL);
  3301. }
  3302. }
  3303. static struct syscore_ops kvm_syscore_ops = {
  3304. .suspend = kvm_suspend,
  3305. .resume = kvm_resume,
  3306. };
  3307. static inline
  3308. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  3309. {
  3310. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  3311. }
  3312. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  3313. {
  3314. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  3315. if (vcpu->preempted)
  3316. vcpu->preempted = false;
  3317. kvm_arch_sched_in(vcpu, cpu);
  3318. kvm_arch_vcpu_load(vcpu, cpu);
  3319. }
  3320. static void kvm_sched_out(struct preempt_notifier *pn,
  3321. struct task_struct *next)
  3322. {
  3323. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  3324. if (current->state == TASK_RUNNING)
  3325. vcpu->preempted = true;
  3326. kvm_arch_vcpu_put(vcpu);
  3327. }
  3328. int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  3329. struct module *module)
  3330. {
  3331. int r;
  3332. int cpu;
  3333. r = kvm_arch_init(opaque);
  3334. if (r)
  3335. goto out_fail;
  3336. /*
  3337. * kvm_arch_init makes sure there's at most one caller
  3338. * for architectures that support multiple implementations,
  3339. * like intel and amd on x86.
  3340. * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
  3341. * conflicts in case kvm is already setup for another implementation.
  3342. */
  3343. r = kvm_irqfd_init();
  3344. if (r)
  3345. goto out_irqfd;
  3346. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  3347. r = -ENOMEM;
  3348. goto out_free_0;
  3349. }
  3350. r = kvm_arch_hardware_setup();
  3351. if (r < 0)
  3352. goto out_free_0a;
  3353. for_each_online_cpu(cpu) {
  3354. smp_call_function_single(cpu,
  3355. kvm_arch_check_processor_compat,
  3356. &r, 1);
  3357. if (r < 0)
  3358. goto out_free_1;
  3359. }
  3360. r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
  3361. kvm_starting_cpu, kvm_dying_cpu);
  3362. if (r)
  3363. goto out_free_2;
  3364. register_reboot_notifier(&kvm_reboot_notifier);
  3365. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  3366. if (!vcpu_align)
  3367. vcpu_align = __alignof__(struct kvm_vcpu);
  3368. kvm_vcpu_cache =
  3369. kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
  3370. SLAB_ACCOUNT,
  3371. offsetof(struct kvm_vcpu, arch),
  3372. sizeof_field(struct kvm_vcpu, arch),
  3373. NULL);
  3374. if (!kvm_vcpu_cache) {
  3375. r = -ENOMEM;
  3376. goto out_free_3;
  3377. }
  3378. r = kvm_async_pf_init();
  3379. if (r)
  3380. goto out_free;
  3381. kvm_chardev_ops.owner = module;
  3382. kvm_vm_fops.owner = module;
  3383. kvm_vcpu_fops.owner = module;
  3384. r = misc_register(&kvm_dev);
  3385. if (r) {
  3386. pr_err("kvm: misc device register failed\n");
  3387. goto out_unreg;
  3388. }
  3389. register_syscore_ops(&kvm_syscore_ops);
  3390. kvm_preempt_ops.sched_in = kvm_sched_in;
  3391. kvm_preempt_ops.sched_out = kvm_sched_out;
  3392. r = kvm_init_debug();
  3393. if (r) {
  3394. pr_err("kvm: create debugfs files failed\n");
  3395. goto out_undebugfs;
  3396. }
  3397. r = kvm_vfio_ops_init();
  3398. WARN_ON(r);
  3399. return 0;
  3400. out_undebugfs:
  3401. unregister_syscore_ops(&kvm_syscore_ops);
  3402. misc_deregister(&kvm_dev);
  3403. out_unreg:
  3404. kvm_async_pf_deinit();
  3405. out_free:
  3406. kmem_cache_destroy(kvm_vcpu_cache);
  3407. out_free_3:
  3408. unregister_reboot_notifier(&kvm_reboot_notifier);
  3409. cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
  3410. out_free_2:
  3411. out_free_1:
  3412. kvm_arch_hardware_unsetup();
  3413. out_free_0a:
  3414. free_cpumask_var(cpus_hardware_enabled);
  3415. out_free_0:
  3416. kvm_irqfd_exit();
  3417. out_irqfd:
  3418. kvm_arch_exit();
  3419. out_fail:
  3420. return r;
  3421. }
  3422. EXPORT_SYMBOL_GPL(kvm_init);
  3423. void kvm_exit(void)
  3424. {
  3425. debugfs_remove_recursive(kvm_debugfs_dir);
  3426. misc_deregister(&kvm_dev);
  3427. kmem_cache_destroy(kvm_vcpu_cache);
  3428. kvm_async_pf_deinit();
  3429. unregister_syscore_ops(&kvm_syscore_ops);
  3430. unregister_reboot_notifier(&kvm_reboot_notifier);
  3431. cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
  3432. on_each_cpu(hardware_disable_nolock, NULL, 1);
  3433. kvm_arch_hardware_unsetup();
  3434. kvm_arch_exit();
  3435. kvm_irqfd_exit();
  3436. free_cpumask_var(cpus_hardware_enabled);
  3437. kvm_vfio_ops_exit();
  3438. }
  3439. EXPORT_SYMBOL_GPL(kvm_exit);