kvm_main.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. *
  14. * This work is licensed under the terms of the GNU GPL, version 2. See
  15. * the COPYING file in the top-level directory.
  16. *
  17. */
  18. #include <kvm/iodev.h>
  19. #include <linux/kvm_host.h>
  20. #include <linux/kvm.h>
  21. #include <linux/module.h>
  22. #include <linux/errno.h>
  23. #include <linux/percpu.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/syscore_ops.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched/signal.h>
  34. #include <linux/sched/mm.h>
  35. #include <linux/sched/stat.h>
  36. #include <linux/cpumask.h>
  37. #include <linux/smp.h>
  38. #include <linux/anon_inodes.h>
  39. #include <linux/profile.h>
  40. #include <linux/kvm_para.h>
  41. #include <linux/pagemap.h>
  42. #include <linux/mman.h>
  43. #include <linux/swap.h>
  44. #include <linux/bitops.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/compat.h>
  47. #include <linux/srcu.h>
  48. #include <linux/hugetlb.h>
  49. #include <linux/slab.h>
  50. #include <linux/sort.h>
  51. #include <linux/bsearch.h>
  52. #include <asm/processor.h>
  53. #include <asm/io.h>
  54. #include <asm/ioctl.h>
  55. #include <linux/uaccess.h>
  56. #include <asm/pgtable.h>
  57. #include "coalesced_mmio.h"
  58. #include "async_pf.h"
  59. #include "vfio.h"
  60. #define CREATE_TRACE_POINTS
  61. #include <trace/events/kvm.h>
  62. /* Worst case buffer size needed for holding an integer. */
  63. #define ITOA_MAX_LEN 12
  64. MODULE_AUTHOR("Qumranet");
  65. MODULE_LICENSE("GPL");
  66. /* Architectures should define their poll value according to the halt latency */
  67. unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
  68. module_param(halt_poll_ns, uint, 0644);
  69. EXPORT_SYMBOL_GPL(halt_poll_ns);
  70. /* Default doubles per-vcpu halt_poll_ns. */
  71. unsigned int halt_poll_ns_grow = 2;
  72. module_param(halt_poll_ns_grow, uint, 0644);
  73. EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
  74. /* Default resets per-vcpu halt_poll_ns . */
  75. unsigned int halt_poll_ns_shrink;
  76. module_param(halt_poll_ns_shrink, uint, 0644);
  77. EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
  78. /*
  79. * Ordering of locks:
  80. *
  81. * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  82. */
  83. DEFINE_SPINLOCK(kvm_lock);
  84. static DEFINE_RAW_SPINLOCK(kvm_count_lock);
  85. LIST_HEAD(vm_list);
  86. static cpumask_var_t cpus_hardware_enabled;
  87. static int kvm_usage_count;
  88. static atomic_t hardware_enable_failed;
  89. struct kmem_cache *kvm_vcpu_cache;
  90. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  91. static __read_mostly struct preempt_ops kvm_preempt_ops;
  92. struct dentry *kvm_debugfs_dir;
  93. EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
  94. static int kvm_debugfs_num_entries;
  95. static const struct file_operations *stat_fops_per_vm[];
  96. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  97. unsigned long arg);
  98. #ifdef CONFIG_KVM_COMPAT
  99. static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
  100. unsigned long arg);
  101. #endif
  102. static int hardware_enable_all(void);
  103. static void hardware_disable_all(void);
  104. static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  105. static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
  106. __visible bool kvm_rebooting;
  107. EXPORT_SYMBOL_GPL(kvm_rebooting);
  108. static bool largepages_enabled = true;
  109. #define KVM_EVENT_CREATE_VM 0
  110. #define KVM_EVENT_DESTROY_VM 1
  111. static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
  112. static unsigned long long kvm_createvm_count;
  113. static unsigned long long kvm_active_vms;
  114. bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
  115. {
  116. if (pfn_valid(pfn))
  117. return PageReserved(pfn_to_page(pfn));
  118. return true;
  119. }
  120. /*
  121. * Switches to specified vcpu, until a matching vcpu_put()
  122. */
  123. int vcpu_load(struct kvm_vcpu *vcpu)
  124. {
  125. int cpu;
  126. if (mutex_lock_killable(&vcpu->mutex))
  127. return -EINTR;
  128. cpu = get_cpu();
  129. preempt_notifier_register(&vcpu->preempt_notifier);
  130. kvm_arch_vcpu_load(vcpu, cpu);
  131. put_cpu();
  132. return 0;
  133. }
  134. EXPORT_SYMBOL_GPL(vcpu_load);
  135. void vcpu_put(struct kvm_vcpu *vcpu)
  136. {
  137. preempt_disable();
  138. kvm_arch_vcpu_put(vcpu);
  139. preempt_notifier_unregister(&vcpu->preempt_notifier);
  140. preempt_enable();
  141. mutex_unlock(&vcpu->mutex);
  142. }
  143. EXPORT_SYMBOL_GPL(vcpu_put);
  144. /* TODO: merge with kvm_arch_vcpu_should_kick */
  145. static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
  146. {
  147. int mode = kvm_vcpu_exiting_guest_mode(vcpu);
  148. /*
  149. * We need to wait for the VCPU to reenable interrupts and get out of
  150. * READING_SHADOW_PAGE_TABLES mode.
  151. */
  152. if (req & KVM_REQUEST_WAIT)
  153. return mode != OUTSIDE_GUEST_MODE;
  154. /*
  155. * Need to kick a running VCPU, but otherwise there is nothing to do.
  156. */
  157. return mode == IN_GUEST_MODE;
  158. }
  159. static void ack_flush(void *_completed)
  160. {
  161. }
  162. static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
  163. {
  164. if (unlikely(!cpus))
  165. cpus = cpu_online_mask;
  166. if (cpumask_empty(cpus))
  167. return false;
  168. smp_call_function_many(cpus, ack_flush, NULL, wait);
  169. return true;
  170. }
  171. bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
  172. {
  173. int i, cpu, me;
  174. cpumask_var_t cpus;
  175. bool called;
  176. struct kvm_vcpu *vcpu;
  177. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  178. me = get_cpu();
  179. kvm_for_each_vcpu(i, vcpu, kvm) {
  180. kvm_make_request(req, vcpu);
  181. cpu = vcpu->cpu;
  182. if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
  183. continue;
  184. if (cpus != NULL && cpu != -1 && cpu != me &&
  185. kvm_request_needs_ipi(vcpu, req))
  186. __cpumask_set_cpu(cpu, cpus);
  187. }
  188. called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
  189. put_cpu();
  190. free_cpumask_var(cpus);
  191. return called;
  192. }
  193. #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
  194. void kvm_flush_remote_tlbs(struct kvm *kvm)
  195. {
  196. /*
  197. * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
  198. * kvm_make_all_cpus_request.
  199. */
  200. long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
  201. /*
  202. * We want to publish modifications to the page tables before reading
  203. * mode. Pairs with a memory barrier in arch-specific code.
  204. * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
  205. * and smp_mb in walk_shadow_page_lockless_begin/end.
  206. * - powerpc: smp_mb in kvmppc_prepare_to_enter.
  207. *
  208. * There is already an smp_mb__after_atomic() before
  209. * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
  210. * barrier here.
  211. */
  212. if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  213. ++kvm->stat.remote_tlb_flush;
  214. cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
  215. }
  216. EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
  217. #endif
  218. void kvm_reload_remote_mmus(struct kvm *kvm)
  219. {
  220. kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  221. }
  222. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  223. {
  224. struct page *page;
  225. int r;
  226. mutex_init(&vcpu->mutex);
  227. vcpu->cpu = -1;
  228. vcpu->kvm = kvm;
  229. vcpu->vcpu_id = id;
  230. vcpu->pid = NULL;
  231. init_swait_queue_head(&vcpu->wq);
  232. kvm_async_pf_vcpu_init(vcpu);
  233. vcpu->pre_pcpu = -1;
  234. INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
  235. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  236. if (!page) {
  237. r = -ENOMEM;
  238. goto fail;
  239. }
  240. vcpu->run = page_address(page);
  241. kvm_vcpu_set_in_spin_loop(vcpu, false);
  242. kvm_vcpu_set_dy_eligible(vcpu, false);
  243. vcpu->preempted = false;
  244. r = kvm_arch_vcpu_init(vcpu);
  245. if (r < 0)
  246. goto fail_free_run;
  247. return 0;
  248. fail_free_run:
  249. free_page((unsigned long)vcpu->run);
  250. fail:
  251. return r;
  252. }
  253. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  254. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  255. {
  256. /*
  257. * no need for rcu_read_lock as VCPU_RUN is the only place that
  258. * will change the vcpu->pid pointer and on uninit all file
  259. * descriptors are already gone.
  260. */
  261. put_pid(rcu_dereference_protected(vcpu->pid, 1));
  262. kvm_arch_vcpu_uninit(vcpu);
  263. free_page((unsigned long)vcpu->run);
  264. }
  265. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  266. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  267. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  268. {
  269. return container_of(mn, struct kvm, mmu_notifier);
  270. }
  271. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  272. struct mm_struct *mm,
  273. unsigned long address,
  274. pte_t pte)
  275. {
  276. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  277. int idx;
  278. idx = srcu_read_lock(&kvm->srcu);
  279. spin_lock(&kvm->mmu_lock);
  280. kvm->mmu_notifier_seq++;
  281. kvm_set_spte_hva(kvm, address, pte);
  282. spin_unlock(&kvm->mmu_lock);
  283. srcu_read_unlock(&kvm->srcu, idx);
  284. }
  285. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  286. struct mm_struct *mm,
  287. unsigned long start,
  288. unsigned long end)
  289. {
  290. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  291. int need_tlb_flush = 0, idx;
  292. idx = srcu_read_lock(&kvm->srcu);
  293. spin_lock(&kvm->mmu_lock);
  294. /*
  295. * The count increase must become visible at unlock time as no
  296. * spte can be established without taking the mmu_lock and
  297. * count is also read inside the mmu_lock critical section.
  298. */
  299. kvm->mmu_notifier_count++;
  300. need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
  301. need_tlb_flush |= kvm->tlbs_dirty;
  302. /* we've to flush the tlb before the pages can be freed */
  303. if (need_tlb_flush)
  304. kvm_flush_remote_tlbs(kvm);
  305. spin_unlock(&kvm->mmu_lock);
  306. srcu_read_unlock(&kvm->srcu, idx);
  307. }
  308. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  309. struct mm_struct *mm,
  310. unsigned long start,
  311. unsigned long end)
  312. {
  313. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  314. spin_lock(&kvm->mmu_lock);
  315. /*
  316. * This sequence increase will notify the kvm page fault that
  317. * the page that is going to be mapped in the spte could have
  318. * been freed.
  319. */
  320. kvm->mmu_notifier_seq++;
  321. smp_wmb();
  322. /*
  323. * The above sequence increase must be visible before the
  324. * below count decrease, which is ensured by the smp_wmb above
  325. * in conjunction with the smp_rmb in mmu_notifier_retry().
  326. */
  327. kvm->mmu_notifier_count--;
  328. spin_unlock(&kvm->mmu_lock);
  329. BUG_ON(kvm->mmu_notifier_count < 0);
  330. }
  331. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  332. struct mm_struct *mm,
  333. unsigned long start,
  334. unsigned long end)
  335. {
  336. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  337. int young, idx;
  338. idx = srcu_read_lock(&kvm->srcu);
  339. spin_lock(&kvm->mmu_lock);
  340. young = kvm_age_hva(kvm, start, end);
  341. if (young)
  342. kvm_flush_remote_tlbs(kvm);
  343. spin_unlock(&kvm->mmu_lock);
  344. srcu_read_unlock(&kvm->srcu, idx);
  345. return young;
  346. }
  347. static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
  348. struct mm_struct *mm,
  349. unsigned long start,
  350. unsigned long end)
  351. {
  352. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  353. int young, idx;
  354. idx = srcu_read_lock(&kvm->srcu);
  355. spin_lock(&kvm->mmu_lock);
  356. /*
  357. * Even though we do not flush TLB, this will still adversely
  358. * affect performance on pre-Haswell Intel EPT, where there is
  359. * no EPT Access Bit to clear so that we have to tear down EPT
  360. * tables instead. If we find this unacceptable, we can always
  361. * add a parameter to kvm_age_hva so that it effectively doesn't
  362. * do anything on clear_young.
  363. *
  364. * Also note that currently we never issue secondary TLB flushes
  365. * from clear_young, leaving this job up to the regular system
  366. * cadence. If we find this inaccurate, we might come up with a
  367. * more sophisticated heuristic later.
  368. */
  369. young = kvm_age_hva(kvm, start, end);
  370. spin_unlock(&kvm->mmu_lock);
  371. srcu_read_unlock(&kvm->srcu, idx);
  372. return young;
  373. }
  374. static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
  375. struct mm_struct *mm,
  376. unsigned long address)
  377. {
  378. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  379. int young, idx;
  380. idx = srcu_read_lock(&kvm->srcu);
  381. spin_lock(&kvm->mmu_lock);
  382. young = kvm_test_age_hva(kvm, address);
  383. spin_unlock(&kvm->mmu_lock);
  384. srcu_read_unlock(&kvm->srcu, idx);
  385. return young;
  386. }
  387. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  388. struct mm_struct *mm)
  389. {
  390. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  391. int idx;
  392. idx = srcu_read_lock(&kvm->srcu);
  393. kvm_arch_flush_shadow_all(kvm);
  394. srcu_read_unlock(&kvm->srcu, idx);
  395. }
  396. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  397. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  398. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  399. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  400. .clear_young = kvm_mmu_notifier_clear_young,
  401. .test_young = kvm_mmu_notifier_test_young,
  402. .change_pte = kvm_mmu_notifier_change_pte,
  403. .release = kvm_mmu_notifier_release,
  404. };
  405. static int kvm_init_mmu_notifier(struct kvm *kvm)
  406. {
  407. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  408. return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  409. }
  410. #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
  411. static int kvm_init_mmu_notifier(struct kvm *kvm)
  412. {
  413. return 0;
  414. }
  415. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  416. static struct kvm_memslots *kvm_alloc_memslots(void)
  417. {
  418. int i;
  419. struct kvm_memslots *slots;
  420. slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  421. if (!slots)
  422. return NULL;
  423. for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  424. slots->id_to_index[i] = slots->memslots[i].id = i;
  425. return slots;
  426. }
  427. static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
  428. {
  429. if (!memslot->dirty_bitmap)
  430. return;
  431. kvfree(memslot->dirty_bitmap);
  432. memslot->dirty_bitmap = NULL;
  433. }
  434. /*
  435. * Free any memory in @free but not in @dont.
  436. */
  437. static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  438. struct kvm_memory_slot *dont)
  439. {
  440. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  441. kvm_destroy_dirty_bitmap(free);
  442. kvm_arch_free_memslot(kvm, free, dont);
  443. free->npages = 0;
  444. }
  445. static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
  446. {
  447. struct kvm_memory_slot *memslot;
  448. if (!slots)
  449. return;
  450. kvm_for_each_memslot(memslot, slots)
  451. kvm_free_memslot(kvm, memslot, NULL);
  452. kvfree(slots);
  453. }
  454. static void kvm_destroy_vm_debugfs(struct kvm *kvm)
  455. {
  456. int i;
  457. if (!kvm->debugfs_dentry)
  458. return;
  459. debugfs_remove_recursive(kvm->debugfs_dentry);
  460. if (kvm->debugfs_stat_data) {
  461. for (i = 0; i < kvm_debugfs_num_entries; i++)
  462. kfree(kvm->debugfs_stat_data[i]);
  463. kfree(kvm->debugfs_stat_data);
  464. }
  465. }
  466. static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
  467. {
  468. char dir_name[ITOA_MAX_LEN * 2];
  469. struct kvm_stat_data *stat_data;
  470. struct kvm_stats_debugfs_item *p;
  471. if (!debugfs_initialized())
  472. return 0;
  473. snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
  474. kvm->debugfs_dentry = debugfs_create_dir(dir_name,
  475. kvm_debugfs_dir);
  476. if (!kvm->debugfs_dentry)
  477. return -ENOMEM;
  478. kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
  479. sizeof(*kvm->debugfs_stat_data),
  480. GFP_KERNEL);
  481. if (!kvm->debugfs_stat_data)
  482. return -ENOMEM;
  483. for (p = debugfs_entries; p->name; p++) {
  484. stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL);
  485. if (!stat_data)
  486. return -ENOMEM;
  487. stat_data->kvm = kvm;
  488. stat_data->offset = p->offset;
  489. kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
  490. if (!debugfs_create_file(p->name, 0644,
  491. kvm->debugfs_dentry,
  492. stat_data,
  493. stat_fops_per_vm[p->kind]))
  494. return -ENOMEM;
  495. }
  496. return 0;
  497. }
  498. static struct kvm *kvm_create_vm(unsigned long type)
  499. {
  500. int r, i;
  501. struct kvm *kvm = kvm_arch_alloc_vm();
  502. if (!kvm)
  503. return ERR_PTR(-ENOMEM);
  504. spin_lock_init(&kvm->mmu_lock);
  505. mmgrab(current->mm);
  506. kvm->mm = current->mm;
  507. kvm_eventfd_init(kvm);
  508. mutex_init(&kvm->lock);
  509. mutex_init(&kvm->irq_lock);
  510. mutex_init(&kvm->slots_lock);
  511. refcount_set(&kvm->users_count, 1);
  512. INIT_LIST_HEAD(&kvm->devices);
  513. r = kvm_arch_init_vm(kvm, type);
  514. if (r)
  515. goto out_err_no_disable;
  516. r = hardware_enable_all();
  517. if (r)
  518. goto out_err_no_disable;
  519. #ifdef CONFIG_HAVE_KVM_IRQFD
  520. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  521. #endif
  522. BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
  523. r = -ENOMEM;
  524. for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
  525. struct kvm_memslots *slots = kvm_alloc_memslots();
  526. if (!slots)
  527. goto out_err_no_srcu;
  528. /*
  529. * Generations must be different for each address space.
  530. * Init kvm generation close to the maximum to easily test the
  531. * code of handling generation number wrap-around.
  532. */
  533. slots->generation = i * 2 - 150;
  534. rcu_assign_pointer(kvm->memslots[i], slots);
  535. }
  536. if (init_srcu_struct(&kvm->srcu))
  537. goto out_err_no_srcu;
  538. if (init_srcu_struct(&kvm->irq_srcu))
  539. goto out_err_no_irq_srcu;
  540. for (i = 0; i < KVM_NR_BUSES; i++) {
  541. rcu_assign_pointer(kvm->buses[i],
  542. kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL));
  543. if (!kvm->buses[i])
  544. goto out_err;
  545. }
  546. r = kvm_init_mmu_notifier(kvm);
  547. if (r)
  548. goto out_err;
  549. spin_lock(&kvm_lock);
  550. list_add(&kvm->vm_list, &vm_list);
  551. spin_unlock(&kvm_lock);
  552. preempt_notifier_inc();
  553. return kvm;
  554. out_err:
  555. cleanup_srcu_struct(&kvm->irq_srcu);
  556. out_err_no_irq_srcu:
  557. cleanup_srcu_struct(&kvm->srcu);
  558. out_err_no_srcu:
  559. hardware_disable_all();
  560. out_err_no_disable:
  561. refcount_set(&kvm->users_count, 0);
  562. for (i = 0; i < KVM_NR_BUSES; i++)
  563. kfree(kvm_get_bus(kvm, i));
  564. for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
  565. kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
  566. kvm_arch_free_vm(kvm);
  567. mmdrop(current->mm);
  568. return ERR_PTR(r);
  569. }
  570. static void kvm_destroy_devices(struct kvm *kvm)
  571. {
  572. struct kvm_device *dev, *tmp;
  573. /*
  574. * We do not need to take the kvm->lock here, because nobody else
  575. * has a reference to the struct kvm at this point and therefore
  576. * cannot access the devices list anyhow.
  577. */
  578. list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
  579. list_del(&dev->vm_node);
  580. dev->ops->destroy(dev);
  581. }
  582. }
  583. static void kvm_destroy_vm(struct kvm *kvm)
  584. {
  585. int i;
  586. struct mm_struct *mm = kvm->mm;
  587. kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
  588. kvm_destroy_vm_debugfs(kvm);
  589. kvm_arch_sync_events(kvm);
  590. spin_lock(&kvm_lock);
  591. list_del(&kvm->vm_list);
  592. spin_unlock(&kvm_lock);
  593. kvm_free_irq_routing(kvm);
  594. for (i = 0; i < KVM_NR_BUSES; i++) {
  595. struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
  596. if (bus)
  597. kvm_io_bus_destroy(bus);
  598. kvm->buses[i] = NULL;
  599. }
  600. kvm_coalesced_mmio_free(kvm);
  601. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  602. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  603. #else
  604. kvm_arch_flush_shadow_all(kvm);
  605. #endif
  606. kvm_arch_destroy_vm(kvm);
  607. kvm_destroy_devices(kvm);
  608. for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
  609. kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
  610. cleanup_srcu_struct(&kvm->irq_srcu);
  611. cleanup_srcu_struct(&kvm->srcu);
  612. kvm_arch_free_vm(kvm);
  613. preempt_notifier_dec();
  614. hardware_disable_all();
  615. mmdrop(mm);
  616. }
  617. void kvm_get_kvm(struct kvm *kvm)
  618. {
  619. refcount_inc(&kvm->users_count);
  620. }
  621. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  622. void kvm_put_kvm(struct kvm *kvm)
  623. {
  624. if (refcount_dec_and_test(&kvm->users_count))
  625. kvm_destroy_vm(kvm);
  626. }
  627. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  628. static int kvm_vm_release(struct inode *inode, struct file *filp)
  629. {
  630. struct kvm *kvm = filp->private_data;
  631. kvm_irqfd_release(kvm);
  632. kvm_put_kvm(kvm);
  633. return 0;
  634. }
  635. /*
  636. * Allocation size is twice as large as the actual dirty bitmap size.
  637. * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
  638. */
  639. static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
  640. {
  641. unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
  642. memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL);
  643. if (!memslot->dirty_bitmap)
  644. return -ENOMEM;
  645. return 0;
  646. }
  647. /*
  648. * Insert memslot and re-sort memslots based on their GFN,
  649. * so binary search could be used to lookup GFN.
  650. * Sorting algorithm takes advantage of having initially
  651. * sorted array and known changed memslot position.
  652. */
  653. static void update_memslots(struct kvm_memslots *slots,
  654. struct kvm_memory_slot *new)
  655. {
  656. int id = new->id;
  657. int i = slots->id_to_index[id];
  658. struct kvm_memory_slot *mslots = slots->memslots;
  659. WARN_ON(mslots[i].id != id);
  660. if (!new->npages) {
  661. WARN_ON(!mslots[i].npages);
  662. if (mslots[i].npages)
  663. slots->used_slots--;
  664. } else {
  665. if (!mslots[i].npages)
  666. slots->used_slots++;
  667. }
  668. while (i < KVM_MEM_SLOTS_NUM - 1 &&
  669. new->base_gfn <= mslots[i + 1].base_gfn) {
  670. if (!mslots[i + 1].npages)
  671. break;
  672. mslots[i] = mslots[i + 1];
  673. slots->id_to_index[mslots[i].id] = i;
  674. i++;
  675. }
  676. /*
  677. * The ">=" is needed when creating a slot with base_gfn == 0,
  678. * so that it moves before all those with base_gfn == npages == 0.
  679. *
  680. * On the other hand, if new->npages is zero, the above loop has
  681. * already left i pointing to the beginning of the empty part of
  682. * mslots, and the ">=" would move the hole backwards in this
  683. * case---which is wrong. So skip the loop when deleting a slot.
  684. */
  685. if (new->npages) {
  686. while (i > 0 &&
  687. new->base_gfn >= mslots[i - 1].base_gfn) {
  688. mslots[i] = mslots[i - 1];
  689. slots->id_to_index[mslots[i].id] = i;
  690. i--;
  691. }
  692. } else
  693. WARN_ON_ONCE(i != slots->used_slots);
  694. mslots[i] = *new;
  695. slots->id_to_index[mslots[i].id] = i;
  696. }
  697. static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
  698. {
  699. u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
  700. #ifdef __KVM_HAVE_READONLY_MEM
  701. valid_flags |= KVM_MEM_READONLY;
  702. #endif
  703. if (mem->flags & ~valid_flags)
  704. return -EINVAL;
  705. return 0;
  706. }
  707. static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
  708. int as_id, struct kvm_memslots *slots)
  709. {
  710. struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
  711. /*
  712. * Set the low bit in the generation, which disables SPTE caching
  713. * until the end of synchronize_srcu_expedited.
  714. */
  715. WARN_ON(old_memslots->generation & 1);
  716. slots->generation = old_memslots->generation + 1;
  717. rcu_assign_pointer(kvm->memslots[as_id], slots);
  718. synchronize_srcu_expedited(&kvm->srcu);
  719. /*
  720. * Increment the new memslot generation a second time. This prevents
  721. * vm exits that race with memslot updates from caching a memslot
  722. * generation that will (potentially) be valid forever.
  723. *
  724. * Generations must be unique even across address spaces. We do not need
  725. * a global counter for that, instead the generation space is evenly split
  726. * across address spaces. For example, with two address spaces, address
  727. * space 0 will use generations 0, 4, 8, ... while * address space 1 will
  728. * use generations 2, 6, 10, 14, ...
  729. */
  730. slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
  731. kvm_arch_memslots_updated(kvm, slots);
  732. return old_memslots;
  733. }
  734. /*
  735. * Allocate some memory and give it an address in the guest physical address
  736. * space.
  737. *
  738. * Discontiguous memory is allowed, mostly for framebuffers.
  739. *
  740. * Must be called holding kvm->slots_lock for write.
  741. */
  742. int __kvm_set_memory_region(struct kvm *kvm,
  743. const struct kvm_userspace_memory_region *mem)
  744. {
  745. int r;
  746. gfn_t base_gfn;
  747. unsigned long npages;
  748. struct kvm_memory_slot *slot;
  749. struct kvm_memory_slot old, new;
  750. struct kvm_memslots *slots = NULL, *old_memslots;
  751. int as_id, id;
  752. enum kvm_mr_change change;
  753. r = check_memory_region_flags(mem);
  754. if (r)
  755. goto out;
  756. r = -EINVAL;
  757. as_id = mem->slot >> 16;
  758. id = (u16)mem->slot;
  759. /* General sanity checks */
  760. if (mem->memory_size & (PAGE_SIZE - 1))
  761. goto out;
  762. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  763. goto out;
  764. /* We can read the guest memory with __xxx_user() later on. */
  765. if ((id < KVM_USER_MEM_SLOTS) &&
  766. ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
  767. !access_ok(VERIFY_WRITE,
  768. (void __user *)(unsigned long)mem->userspace_addr,
  769. mem->memory_size)))
  770. goto out;
  771. if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
  772. goto out;
  773. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  774. goto out;
  775. slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
  776. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  777. npages = mem->memory_size >> PAGE_SHIFT;
  778. if (npages > KVM_MEM_MAX_NR_PAGES)
  779. goto out;
  780. new = old = *slot;
  781. new.id = id;
  782. new.base_gfn = base_gfn;
  783. new.npages = npages;
  784. new.flags = mem->flags;
  785. if (npages) {
  786. if (!old.npages)
  787. change = KVM_MR_CREATE;
  788. else { /* Modify an existing slot. */
  789. if ((mem->userspace_addr != old.userspace_addr) ||
  790. (npages != old.npages) ||
  791. ((new.flags ^ old.flags) & KVM_MEM_READONLY))
  792. goto out;
  793. if (base_gfn != old.base_gfn)
  794. change = KVM_MR_MOVE;
  795. else if (new.flags != old.flags)
  796. change = KVM_MR_FLAGS_ONLY;
  797. else { /* Nothing to change. */
  798. r = 0;
  799. goto out;
  800. }
  801. }
  802. } else {
  803. if (!old.npages)
  804. goto out;
  805. change = KVM_MR_DELETE;
  806. new.base_gfn = 0;
  807. new.flags = 0;
  808. }
  809. if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
  810. /* Check for overlaps */
  811. r = -EEXIST;
  812. kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
  813. if ((slot->id >= KVM_USER_MEM_SLOTS) ||
  814. (slot->id == id))
  815. continue;
  816. if (!((base_gfn + npages <= slot->base_gfn) ||
  817. (base_gfn >= slot->base_gfn + slot->npages)))
  818. goto out;
  819. }
  820. }
  821. /* Free page dirty bitmap if unneeded */
  822. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  823. new.dirty_bitmap = NULL;
  824. r = -ENOMEM;
  825. if (change == KVM_MR_CREATE) {
  826. new.userspace_addr = mem->userspace_addr;
  827. if (kvm_arch_create_memslot(kvm, &new, npages))
  828. goto out_free;
  829. }
  830. /* Allocate page dirty bitmap if needed */
  831. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  832. if (kvm_create_dirty_bitmap(&new) < 0)
  833. goto out_free;
  834. }
  835. slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  836. if (!slots)
  837. goto out_free;
  838. memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
  839. if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
  840. slot = id_to_memslot(slots, id);
  841. slot->flags |= KVM_MEMSLOT_INVALID;
  842. old_memslots = install_new_memslots(kvm, as_id, slots);
  843. /* From this point no new shadow pages pointing to a deleted,
  844. * or moved, memslot will be created.
  845. *
  846. * validation of sp->gfn happens in:
  847. * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
  848. * - kvm_is_visible_gfn (mmu_check_roots)
  849. */
  850. kvm_arch_flush_shadow_memslot(kvm, slot);
  851. /*
  852. * We can re-use the old_memslots from above, the only difference
  853. * from the currently installed memslots is the invalid flag. This
  854. * will get overwritten by update_memslots anyway.
  855. */
  856. slots = old_memslots;
  857. }
  858. r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
  859. if (r)
  860. goto out_slots;
  861. /* actual memory is freed via old in kvm_free_memslot below */
  862. if (change == KVM_MR_DELETE) {
  863. new.dirty_bitmap = NULL;
  864. memset(&new.arch, 0, sizeof(new.arch));
  865. }
  866. update_memslots(slots, &new);
  867. old_memslots = install_new_memslots(kvm, as_id, slots);
  868. kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
  869. kvm_free_memslot(kvm, &old, &new);
  870. kvfree(old_memslots);
  871. return 0;
  872. out_slots:
  873. kvfree(slots);
  874. out_free:
  875. kvm_free_memslot(kvm, &new, &old);
  876. out:
  877. return r;
  878. }
  879. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  880. int kvm_set_memory_region(struct kvm *kvm,
  881. const struct kvm_userspace_memory_region *mem)
  882. {
  883. int r;
  884. mutex_lock(&kvm->slots_lock);
  885. r = __kvm_set_memory_region(kvm, mem);
  886. mutex_unlock(&kvm->slots_lock);
  887. return r;
  888. }
  889. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  890. static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  891. struct kvm_userspace_memory_region *mem)
  892. {
  893. if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
  894. return -EINVAL;
  895. return kvm_set_memory_region(kvm, mem);
  896. }
  897. int kvm_get_dirty_log(struct kvm *kvm,
  898. struct kvm_dirty_log *log, int *is_dirty)
  899. {
  900. struct kvm_memslots *slots;
  901. struct kvm_memory_slot *memslot;
  902. int i, as_id, id;
  903. unsigned long n;
  904. unsigned long any = 0;
  905. as_id = log->slot >> 16;
  906. id = (u16)log->slot;
  907. if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
  908. return -EINVAL;
  909. slots = __kvm_memslots(kvm, as_id);
  910. memslot = id_to_memslot(slots, id);
  911. if (!memslot->dirty_bitmap)
  912. return -ENOENT;
  913. n = kvm_dirty_bitmap_bytes(memslot);
  914. for (i = 0; !any && i < n/sizeof(long); ++i)
  915. any = memslot->dirty_bitmap[i];
  916. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  917. return -EFAULT;
  918. if (any)
  919. *is_dirty = 1;
  920. return 0;
  921. }
  922. EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
  923. #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
  924. /**
  925. * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
  926. * are dirty write protect them for next write.
  927. * @kvm: pointer to kvm instance
  928. * @log: slot id and address to which we copy the log
  929. * @is_dirty: flag set if any page is dirty
  930. *
  931. * We need to keep it in mind that VCPU threads can write to the bitmap
  932. * concurrently. So, to avoid losing track of dirty pages we keep the
  933. * following order:
  934. *
  935. * 1. Take a snapshot of the bit and clear it if needed.
  936. * 2. Write protect the corresponding page.
  937. * 3. Copy the snapshot to the userspace.
  938. * 4. Upon return caller flushes TLB's if needed.
  939. *
  940. * Between 2 and 4, the guest may write to the page using the remaining TLB
  941. * entry. This is not a problem because the page is reported dirty using
  942. * the snapshot taken before and step 4 ensures that writes done after
  943. * exiting to userspace will be logged for the next call.
  944. *
  945. */
  946. int kvm_get_dirty_log_protect(struct kvm *kvm,
  947. struct kvm_dirty_log *log, bool *is_dirty)
  948. {
  949. struct kvm_memslots *slots;
  950. struct kvm_memory_slot *memslot;
  951. int i, as_id, id;
  952. unsigned long n;
  953. unsigned long *dirty_bitmap;
  954. unsigned long *dirty_bitmap_buffer;
  955. as_id = log->slot >> 16;
  956. id = (u16)log->slot;
  957. if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
  958. return -EINVAL;
  959. slots = __kvm_memslots(kvm, as_id);
  960. memslot = id_to_memslot(slots, id);
  961. dirty_bitmap = memslot->dirty_bitmap;
  962. if (!dirty_bitmap)
  963. return -ENOENT;
  964. n = kvm_dirty_bitmap_bytes(memslot);
  965. dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
  966. memset(dirty_bitmap_buffer, 0, n);
  967. spin_lock(&kvm->mmu_lock);
  968. *is_dirty = false;
  969. for (i = 0; i < n / sizeof(long); i++) {
  970. unsigned long mask;
  971. gfn_t offset;
  972. if (!dirty_bitmap[i])
  973. continue;
  974. *is_dirty = true;
  975. mask = xchg(&dirty_bitmap[i], 0);
  976. dirty_bitmap_buffer[i] = mask;
  977. if (mask) {
  978. offset = i * BITS_PER_LONG;
  979. kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
  980. offset, mask);
  981. }
  982. }
  983. spin_unlock(&kvm->mmu_lock);
  984. if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
  985. return -EFAULT;
  986. return 0;
  987. }
  988. EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
  989. #endif
  990. bool kvm_largepages_enabled(void)
  991. {
  992. return largepages_enabled;
  993. }
  994. void kvm_disable_largepages(void)
  995. {
  996. largepages_enabled = false;
  997. }
  998. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  999. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  1000. {
  1001. return __gfn_to_memslot(kvm_memslots(kvm), gfn);
  1002. }
  1003. EXPORT_SYMBOL_GPL(gfn_to_memslot);
  1004. struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
  1005. {
  1006. return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
  1007. }
  1008. bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  1009. {
  1010. struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
  1011. if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
  1012. memslot->flags & KVM_MEMSLOT_INVALID)
  1013. return false;
  1014. return true;
  1015. }
  1016. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  1017. unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
  1018. {
  1019. struct vm_area_struct *vma;
  1020. unsigned long addr, size;
  1021. size = PAGE_SIZE;
  1022. addr = gfn_to_hva(kvm, gfn);
  1023. if (kvm_is_error_hva(addr))
  1024. return PAGE_SIZE;
  1025. down_read(&current->mm->mmap_sem);
  1026. vma = find_vma(current->mm, addr);
  1027. if (!vma)
  1028. goto out;
  1029. size = vma_kernel_pagesize(vma);
  1030. out:
  1031. up_read(&current->mm->mmap_sem);
  1032. return size;
  1033. }
  1034. static bool memslot_is_readonly(struct kvm_memory_slot *slot)
  1035. {
  1036. return slot->flags & KVM_MEM_READONLY;
  1037. }
  1038. static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
  1039. gfn_t *nr_pages, bool write)
  1040. {
  1041. if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
  1042. return KVM_HVA_ERR_BAD;
  1043. if (memslot_is_readonly(slot) && write)
  1044. return KVM_HVA_ERR_RO_BAD;
  1045. if (nr_pages)
  1046. *nr_pages = slot->npages - (gfn - slot->base_gfn);
  1047. return __gfn_to_hva_memslot(slot, gfn);
  1048. }
  1049. static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
  1050. gfn_t *nr_pages)
  1051. {
  1052. return __gfn_to_hva_many(slot, gfn, nr_pages, true);
  1053. }
  1054. unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
  1055. gfn_t gfn)
  1056. {
  1057. return gfn_to_hva_many(slot, gfn, NULL);
  1058. }
  1059. EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
  1060. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  1061. {
  1062. return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
  1063. }
  1064. EXPORT_SYMBOL_GPL(gfn_to_hva);
  1065. unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
  1066. {
  1067. return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
  1068. }
  1069. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
  1070. /*
  1071. * If writable is set to false, the hva returned by this function is only
  1072. * allowed to be read.
  1073. */
  1074. unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
  1075. gfn_t gfn, bool *writable)
  1076. {
  1077. unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
  1078. if (!kvm_is_error_hva(hva) && writable)
  1079. *writable = !memslot_is_readonly(slot);
  1080. return hva;
  1081. }
  1082. unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
  1083. {
  1084. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1085. return gfn_to_hva_memslot_prot(slot, gfn, writable);
  1086. }
  1087. unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
  1088. {
  1089. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1090. return gfn_to_hva_memslot_prot(slot, gfn, writable);
  1091. }
  1092. static int get_user_page_nowait(unsigned long start, int write,
  1093. struct page **page)
  1094. {
  1095. int flags = FOLL_NOWAIT | FOLL_HWPOISON;
  1096. if (write)
  1097. flags |= FOLL_WRITE;
  1098. return get_user_pages(start, 1, flags, page, NULL);
  1099. }
  1100. static inline int check_user_page_hwpoison(unsigned long addr)
  1101. {
  1102. int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
  1103. rc = get_user_pages(addr, 1, flags, NULL, NULL);
  1104. return rc == -EHWPOISON;
  1105. }
  1106. /*
  1107. * The atomic path to get the writable pfn which will be stored in @pfn,
  1108. * true indicates success, otherwise false is returned.
  1109. */
  1110. static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
  1111. bool write_fault, bool *writable, kvm_pfn_t *pfn)
  1112. {
  1113. struct page *page[1];
  1114. int npages;
  1115. if (!(async || atomic))
  1116. return false;
  1117. /*
  1118. * Fast pin a writable pfn only if it is a write fault request
  1119. * or the caller allows to map a writable pfn for a read fault
  1120. * request.
  1121. */
  1122. if (!(write_fault || writable))
  1123. return false;
  1124. npages = __get_user_pages_fast(addr, 1, 1, page);
  1125. if (npages == 1) {
  1126. *pfn = page_to_pfn(page[0]);
  1127. if (writable)
  1128. *writable = true;
  1129. return true;
  1130. }
  1131. return false;
  1132. }
  1133. /*
  1134. * The slow path to get the pfn of the specified host virtual address,
  1135. * 1 indicates success, -errno is returned if error is detected.
  1136. */
  1137. static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
  1138. bool *writable, kvm_pfn_t *pfn)
  1139. {
  1140. struct page *page[1];
  1141. int npages = 0;
  1142. might_sleep();
  1143. if (writable)
  1144. *writable = write_fault;
  1145. if (async) {
  1146. down_read(&current->mm->mmap_sem);
  1147. npages = get_user_page_nowait(addr, write_fault, page);
  1148. up_read(&current->mm->mmap_sem);
  1149. } else {
  1150. unsigned int flags = FOLL_HWPOISON;
  1151. if (write_fault)
  1152. flags |= FOLL_WRITE;
  1153. npages = get_user_pages_unlocked(addr, 1, page, flags);
  1154. }
  1155. if (npages != 1)
  1156. return npages;
  1157. /* map read fault as writable if possible */
  1158. if (unlikely(!write_fault) && writable) {
  1159. struct page *wpage[1];
  1160. npages = __get_user_pages_fast(addr, 1, 1, wpage);
  1161. if (npages == 1) {
  1162. *writable = true;
  1163. put_page(page[0]);
  1164. page[0] = wpage[0];
  1165. }
  1166. npages = 1;
  1167. }
  1168. *pfn = page_to_pfn(page[0]);
  1169. return npages;
  1170. }
  1171. static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
  1172. {
  1173. if (unlikely(!(vma->vm_flags & VM_READ)))
  1174. return false;
  1175. if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
  1176. return false;
  1177. return true;
  1178. }
  1179. static int hva_to_pfn_remapped(struct vm_area_struct *vma,
  1180. unsigned long addr, bool *async,
  1181. bool write_fault, kvm_pfn_t *p_pfn)
  1182. {
  1183. unsigned long pfn;
  1184. int r;
  1185. r = follow_pfn(vma, addr, &pfn);
  1186. if (r) {
  1187. /*
  1188. * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
  1189. * not call the fault handler, so do it here.
  1190. */
  1191. bool unlocked = false;
  1192. r = fixup_user_fault(current, current->mm, addr,
  1193. (write_fault ? FAULT_FLAG_WRITE : 0),
  1194. &unlocked);
  1195. if (unlocked)
  1196. return -EAGAIN;
  1197. if (r)
  1198. return r;
  1199. r = follow_pfn(vma, addr, &pfn);
  1200. if (r)
  1201. return r;
  1202. }
  1203. /*
  1204. * Get a reference here because callers of *hva_to_pfn* and
  1205. * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
  1206. * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
  1207. * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
  1208. * simply do nothing for reserved pfns.
  1209. *
  1210. * Whoever called remap_pfn_range is also going to call e.g.
  1211. * unmap_mapping_range before the underlying pages are freed,
  1212. * causing a call to our MMU notifier.
  1213. */
  1214. kvm_get_pfn(pfn);
  1215. *p_pfn = pfn;
  1216. return 0;
  1217. }
  1218. /*
  1219. * Pin guest page in memory and return its pfn.
  1220. * @addr: host virtual address which maps memory to the guest
  1221. * @atomic: whether this function can sleep
  1222. * @async: whether this function need to wait IO complete if the
  1223. * host page is not in the memory
  1224. * @write_fault: whether we should get a writable host page
  1225. * @writable: whether it allows to map a writable host page for !@write_fault
  1226. *
  1227. * The function will map a writable host page for these two cases:
  1228. * 1): @write_fault = true
  1229. * 2): @write_fault = false && @writable, @writable will tell the caller
  1230. * whether the mapping is writable.
  1231. */
  1232. static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
  1233. bool write_fault, bool *writable)
  1234. {
  1235. struct vm_area_struct *vma;
  1236. kvm_pfn_t pfn = 0;
  1237. int npages, r;
  1238. /* we can do it either atomically or asynchronously, not both */
  1239. BUG_ON(atomic && async);
  1240. if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
  1241. return pfn;
  1242. if (atomic)
  1243. return KVM_PFN_ERR_FAULT;
  1244. npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
  1245. if (npages == 1)
  1246. return pfn;
  1247. down_read(&current->mm->mmap_sem);
  1248. if (npages == -EHWPOISON ||
  1249. (!async && check_user_page_hwpoison(addr))) {
  1250. pfn = KVM_PFN_ERR_HWPOISON;
  1251. goto exit;
  1252. }
  1253. retry:
  1254. vma = find_vma_intersection(current->mm, addr, addr + 1);
  1255. if (vma == NULL)
  1256. pfn = KVM_PFN_ERR_FAULT;
  1257. else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
  1258. r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
  1259. if (r == -EAGAIN)
  1260. goto retry;
  1261. if (r < 0)
  1262. pfn = KVM_PFN_ERR_FAULT;
  1263. } else {
  1264. if (async && vma_is_valid(vma, write_fault))
  1265. *async = true;
  1266. pfn = KVM_PFN_ERR_FAULT;
  1267. }
  1268. exit:
  1269. up_read(&current->mm->mmap_sem);
  1270. return pfn;
  1271. }
  1272. kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
  1273. bool atomic, bool *async, bool write_fault,
  1274. bool *writable)
  1275. {
  1276. unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
  1277. if (addr == KVM_HVA_ERR_RO_BAD) {
  1278. if (writable)
  1279. *writable = false;
  1280. return KVM_PFN_ERR_RO_FAULT;
  1281. }
  1282. if (kvm_is_error_hva(addr)) {
  1283. if (writable)
  1284. *writable = false;
  1285. return KVM_PFN_NOSLOT;
  1286. }
  1287. /* Do not map writable pfn in the readonly memslot. */
  1288. if (writable && memslot_is_readonly(slot)) {
  1289. *writable = false;
  1290. writable = NULL;
  1291. }
  1292. return hva_to_pfn(addr, atomic, async, write_fault,
  1293. writable);
  1294. }
  1295. EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
  1296. kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
  1297. bool *writable)
  1298. {
  1299. return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
  1300. write_fault, writable);
  1301. }
  1302. EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
  1303. kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
  1304. {
  1305. return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
  1306. }
  1307. EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
  1308. kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
  1309. {
  1310. return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
  1311. }
  1312. EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
  1313. kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
  1314. {
  1315. return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
  1316. }
  1317. EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
  1318. kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
  1319. {
  1320. return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
  1321. }
  1322. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
  1323. kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  1324. {
  1325. return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
  1326. }
  1327. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  1328. kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  1329. {
  1330. return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
  1331. }
  1332. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
  1333. int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
  1334. struct page **pages, int nr_pages)
  1335. {
  1336. unsigned long addr;
  1337. gfn_t entry = 0;
  1338. addr = gfn_to_hva_many(slot, gfn, &entry);
  1339. if (kvm_is_error_hva(addr))
  1340. return -1;
  1341. if (entry < nr_pages)
  1342. return 0;
  1343. return __get_user_pages_fast(addr, nr_pages, 1, pages);
  1344. }
  1345. EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
  1346. static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
  1347. {
  1348. if (is_error_noslot_pfn(pfn))
  1349. return KVM_ERR_PTR_BAD_PAGE;
  1350. if (kvm_is_reserved_pfn(pfn)) {
  1351. WARN_ON(1);
  1352. return KVM_ERR_PTR_BAD_PAGE;
  1353. }
  1354. return pfn_to_page(pfn);
  1355. }
  1356. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  1357. {
  1358. kvm_pfn_t pfn;
  1359. pfn = gfn_to_pfn(kvm, gfn);
  1360. return kvm_pfn_to_page(pfn);
  1361. }
  1362. EXPORT_SYMBOL_GPL(gfn_to_page);
  1363. struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
  1364. {
  1365. kvm_pfn_t pfn;
  1366. pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
  1367. return kvm_pfn_to_page(pfn);
  1368. }
  1369. EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
  1370. void kvm_release_page_clean(struct page *page)
  1371. {
  1372. WARN_ON(is_error_page(page));
  1373. kvm_release_pfn_clean(page_to_pfn(page));
  1374. }
  1375. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  1376. void kvm_release_pfn_clean(kvm_pfn_t pfn)
  1377. {
  1378. if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
  1379. put_page(pfn_to_page(pfn));
  1380. }
  1381. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  1382. void kvm_release_page_dirty(struct page *page)
  1383. {
  1384. WARN_ON(is_error_page(page));
  1385. kvm_release_pfn_dirty(page_to_pfn(page));
  1386. }
  1387. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  1388. void kvm_release_pfn_dirty(kvm_pfn_t pfn)
  1389. {
  1390. kvm_set_pfn_dirty(pfn);
  1391. kvm_release_pfn_clean(pfn);
  1392. }
  1393. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  1394. void kvm_set_pfn_dirty(kvm_pfn_t pfn)
  1395. {
  1396. if (!kvm_is_reserved_pfn(pfn)) {
  1397. struct page *page = pfn_to_page(pfn);
  1398. if (!PageReserved(page))
  1399. SetPageDirty(page);
  1400. }
  1401. }
  1402. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  1403. void kvm_set_pfn_accessed(kvm_pfn_t pfn)
  1404. {
  1405. if (!kvm_is_reserved_pfn(pfn))
  1406. mark_page_accessed(pfn_to_page(pfn));
  1407. }
  1408. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  1409. void kvm_get_pfn(kvm_pfn_t pfn)
  1410. {
  1411. if (!kvm_is_reserved_pfn(pfn))
  1412. get_page(pfn_to_page(pfn));
  1413. }
  1414. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  1415. static int next_segment(unsigned long len, int offset)
  1416. {
  1417. if (len > PAGE_SIZE - offset)
  1418. return PAGE_SIZE - offset;
  1419. else
  1420. return len;
  1421. }
  1422. static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
  1423. void *data, int offset, int len)
  1424. {
  1425. int r;
  1426. unsigned long addr;
  1427. addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
  1428. if (kvm_is_error_hva(addr))
  1429. return -EFAULT;
  1430. r = __copy_from_user(data, (void __user *)addr + offset, len);
  1431. if (r)
  1432. return -EFAULT;
  1433. return 0;
  1434. }
  1435. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  1436. int len)
  1437. {
  1438. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1439. return __kvm_read_guest_page(slot, gfn, data, offset, len);
  1440. }
  1441. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  1442. int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
  1443. int offset, int len)
  1444. {
  1445. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1446. return __kvm_read_guest_page(slot, gfn, data, offset, len);
  1447. }
  1448. EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
  1449. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  1450. {
  1451. gfn_t gfn = gpa >> PAGE_SHIFT;
  1452. int seg;
  1453. int offset = offset_in_page(gpa);
  1454. int ret;
  1455. while ((seg = next_segment(len, offset)) != 0) {
  1456. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  1457. if (ret < 0)
  1458. return ret;
  1459. offset = 0;
  1460. len -= seg;
  1461. data += seg;
  1462. ++gfn;
  1463. }
  1464. return 0;
  1465. }
  1466. EXPORT_SYMBOL_GPL(kvm_read_guest);
  1467. int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
  1468. {
  1469. gfn_t gfn = gpa >> PAGE_SHIFT;
  1470. int seg;
  1471. int offset = offset_in_page(gpa);
  1472. int ret;
  1473. while ((seg = next_segment(len, offset)) != 0) {
  1474. ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
  1475. if (ret < 0)
  1476. return ret;
  1477. offset = 0;
  1478. len -= seg;
  1479. data += seg;
  1480. ++gfn;
  1481. }
  1482. return 0;
  1483. }
  1484. EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
  1485. static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
  1486. void *data, int offset, unsigned long len)
  1487. {
  1488. int r;
  1489. unsigned long addr;
  1490. addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
  1491. if (kvm_is_error_hva(addr))
  1492. return -EFAULT;
  1493. pagefault_disable();
  1494. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  1495. pagefault_enable();
  1496. if (r)
  1497. return -EFAULT;
  1498. return 0;
  1499. }
  1500. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  1501. unsigned long len)
  1502. {
  1503. gfn_t gfn = gpa >> PAGE_SHIFT;
  1504. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1505. int offset = offset_in_page(gpa);
  1506. return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
  1507. }
  1508. EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
  1509. int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
  1510. void *data, unsigned long len)
  1511. {
  1512. gfn_t gfn = gpa >> PAGE_SHIFT;
  1513. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1514. int offset = offset_in_page(gpa);
  1515. return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
  1516. }
  1517. EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
  1518. static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
  1519. const void *data, int offset, int len)
  1520. {
  1521. int r;
  1522. unsigned long addr;
  1523. addr = gfn_to_hva_memslot(memslot, gfn);
  1524. if (kvm_is_error_hva(addr))
  1525. return -EFAULT;
  1526. r = __copy_to_user((void __user *)addr + offset, data, len);
  1527. if (r)
  1528. return -EFAULT;
  1529. mark_page_dirty_in_slot(memslot, gfn);
  1530. return 0;
  1531. }
  1532. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
  1533. const void *data, int offset, int len)
  1534. {
  1535. struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
  1536. return __kvm_write_guest_page(slot, gfn, data, offset, len);
  1537. }
  1538. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  1539. int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
  1540. const void *data, int offset, int len)
  1541. {
  1542. struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1543. return __kvm_write_guest_page(slot, gfn, data, offset, len);
  1544. }
  1545. EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
  1546. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  1547. unsigned long len)
  1548. {
  1549. gfn_t gfn = gpa >> PAGE_SHIFT;
  1550. int seg;
  1551. int offset = offset_in_page(gpa);
  1552. int ret;
  1553. while ((seg = next_segment(len, offset)) != 0) {
  1554. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  1555. if (ret < 0)
  1556. return ret;
  1557. offset = 0;
  1558. len -= seg;
  1559. data += seg;
  1560. ++gfn;
  1561. }
  1562. return 0;
  1563. }
  1564. EXPORT_SYMBOL_GPL(kvm_write_guest);
  1565. int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
  1566. unsigned long len)
  1567. {
  1568. gfn_t gfn = gpa >> PAGE_SHIFT;
  1569. int seg;
  1570. int offset = offset_in_page(gpa);
  1571. int ret;
  1572. while ((seg = next_segment(len, offset)) != 0) {
  1573. ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
  1574. if (ret < 0)
  1575. return ret;
  1576. offset = 0;
  1577. len -= seg;
  1578. data += seg;
  1579. ++gfn;
  1580. }
  1581. return 0;
  1582. }
  1583. EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
  1584. static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
  1585. struct gfn_to_hva_cache *ghc,
  1586. gpa_t gpa, unsigned long len)
  1587. {
  1588. int offset = offset_in_page(gpa);
  1589. gfn_t start_gfn = gpa >> PAGE_SHIFT;
  1590. gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
  1591. gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
  1592. gfn_t nr_pages_avail;
  1593. ghc->gpa = gpa;
  1594. ghc->generation = slots->generation;
  1595. ghc->len = len;
  1596. ghc->memslot = __gfn_to_memslot(slots, start_gfn);
  1597. ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
  1598. if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
  1599. ghc->hva += offset;
  1600. } else {
  1601. /*
  1602. * If the requested region crosses two memslots, we still
  1603. * verify that the entire region is valid here.
  1604. */
  1605. while (start_gfn <= end_gfn) {
  1606. nr_pages_avail = 0;
  1607. ghc->memslot = __gfn_to_memslot(slots, start_gfn);
  1608. ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
  1609. &nr_pages_avail);
  1610. if (kvm_is_error_hva(ghc->hva))
  1611. return -EFAULT;
  1612. start_gfn += nr_pages_avail;
  1613. }
  1614. /* Use the slow path for cross page reads and writes. */
  1615. ghc->memslot = NULL;
  1616. }
  1617. return 0;
  1618. }
  1619. int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1620. gpa_t gpa, unsigned long len)
  1621. {
  1622. struct kvm_memslots *slots = kvm_memslots(kvm);
  1623. return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
  1624. }
  1625. EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
  1626. int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1627. void *data, int offset, unsigned long len)
  1628. {
  1629. struct kvm_memslots *slots = kvm_memslots(kvm);
  1630. int r;
  1631. gpa_t gpa = ghc->gpa + offset;
  1632. BUG_ON(len + offset > ghc->len);
  1633. if (slots->generation != ghc->generation)
  1634. __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
  1635. if (unlikely(!ghc->memslot))
  1636. return kvm_write_guest(kvm, gpa, data, len);
  1637. if (kvm_is_error_hva(ghc->hva))
  1638. return -EFAULT;
  1639. r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
  1640. if (r)
  1641. return -EFAULT;
  1642. mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
  1643. return 0;
  1644. }
  1645. EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
  1646. int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1647. void *data, unsigned long len)
  1648. {
  1649. return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
  1650. }
  1651. EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
  1652. int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1653. void *data, unsigned long len)
  1654. {
  1655. struct kvm_memslots *slots = kvm_memslots(kvm);
  1656. int r;
  1657. BUG_ON(len > ghc->len);
  1658. if (slots->generation != ghc->generation)
  1659. __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
  1660. if (unlikely(!ghc->memslot))
  1661. return kvm_read_guest(kvm, ghc->gpa, data, len);
  1662. if (kvm_is_error_hva(ghc->hva))
  1663. return -EFAULT;
  1664. r = __copy_from_user(data, (void __user *)ghc->hva, len);
  1665. if (r)
  1666. return -EFAULT;
  1667. return 0;
  1668. }
  1669. EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
  1670. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  1671. {
  1672. const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
  1673. return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
  1674. }
  1675. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  1676. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  1677. {
  1678. gfn_t gfn = gpa >> PAGE_SHIFT;
  1679. int seg;
  1680. int offset = offset_in_page(gpa);
  1681. int ret;
  1682. while ((seg = next_segment(len, offset)) != 0) {
  1683. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  1684. if (ret < 0)
  1685. return ret;
  1686. offset = 0;
  1687. len -= seg;
  1688. ++gfn;
  1689. }
  1690. return 0;
  1691. }
  1692. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  1693. static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
  1694. gfn_t gfn)
  1695. {
  1696. if (memslot && memslot->dirty_bitmap) {
  1697. unsigned long rel_gfn = gfn - memslot->base_gfn;
  1698. set_bit_le(rel_gfn, memslot->dirty_bitmap);
  1699. }
  1700. }
  1701. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  1702. {
  1703. struct kvm_memory_slot *memslot;
  1704. memslot = gfn_to_memslot(kvm, gfn);
  1705. mark_page_dirty_in_slot(memslot, gfn);
  1706. }
  1707. EXPORT_SYMBOL_GPL(mark_page_dirty);
  1708. void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
  1709. {
  1710. struct kvm_memory_slot *memslot;
  1711. memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
  1712. mark_page_dirty_in_slot(memslot, gfn);
  1713. }
  1714. EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
  1715. static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
  1716. {
  1717. unsigned int old, val, grow;
  1718. old = val = vcpu->halt_poll_ns;
  1719. grow = READ_ONCE(halt_poll_ns_grow);
  1720. /* 10us base */
  1721. if (val == 0 && grow)
  1722. val = 10000;
  1723. else
  1724. val *= grow;
  1725. if (val > halt_poll_ns)
  1726. val = halt_poll_ns;
  1727. vcpu->halt_poll_ns = val;
  1728. trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
  1729. }
  1730. static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
  1731. {
  1732. unsigned int old, val, shrink;
  1733. old = val = vcpu->halt_poll_ns;
  1734. shrink = READ_ONCE(halt_poll_ns_shrink);
  1735. if (shrink == 0)
  1736. val = 0;
  1737. else
  1738. val /= shrink;
  1739. vcpu->halt_poll_ns = val;
  1740. trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
  1741. }
  1742. static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
  1743. {
  1744. if (kvm_arch_vcpu_runnable(vcpu)) {
  1745. kvm_make_request(KVM_REQ_UNHALT, vcpu);
  1746. return -EINTR;
  1747. }
  1748. if (kvm_cpu_has_pending_timer(vcpu))
  1749. return -EINTR;
  1750. if (signal_pending(current))
  1751. return -EINTR;
  1752. return 0;
  1753. }
  1754. /*
  1755. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  1756. */
  1757. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  1758. {
  1759. ktime_t start, cur;
  1760. DECLARE_SWAITQUEUE(wait);
  1761. bool waited = false;
  1762. u64 block_ns;
  1763. start = cur = ktime_get();
  1764. if (vcpu->halt_poll_ns) {
  1765. ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
  1766. ++vcpu->stat.halt_attempted_poll;
  1767. do {
  1768. /*
  1769. * This sets KVM_REQ_UNHALT if an interrupt
  1770. * arrives.
  1771. */
  1772. if (kvm_vcpu_check_block(vcpu) < 0) {
  1773. ++vcpu->stat.halt_successful_poll;
  1774. if (!vcpu_valid_wakeup(vcpu))
  1775. ++vcpu->stat.halt_poll_invalid;
  1776. goto out;
  1777. }
  1778. cur = ktime_get();
  1779. } while (single_task_running() && ktime_before(cur, stop));
  1780. }
  1781. kvm_arch_vcpu_blocking(vcpu);
  1782. for (;;) {
  1783. prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1784. if (kvm_vcpu_check_block(vcpu) < 0)
  1785. break;
  1786. waited = true;
  1787. schedule();
  1788. }
  1789. finish_swait(&vcpu->wq, &wait);
  1790. cur = ktime_get();
  1791. kvm_arch_vcpu_unblocking(vcpu);
  1792. out:
  1793. block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
  1794. if (!vcpu_valid_wakeup(vcpu))
  1795. shrink_halt_poll_ns(vcpu);
  1796. else if (halt_poll_ns) {
  1797. if (block_ns <= vcpu->halt_poll_ns)
  1798. ;
  1799. /* we had a long block, shrink polling */
  1800. else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
  1801. shrink_halt_poll_ns(vcpu);
  1802. /* we had a short halt and our poll time is too small */
  1803. else if (vcpu->halt_poll_ns < halt_poll_ns &&
  1804. block_ns < halt_poll_ns)
  1805. grow_halt_poll_ns(vcpu);
  1806. } else
  1807. vcpu->halt_poll_ns = 0;
  1808. trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
  1809. kvm_arch_vcpu_block_finish(vcpu);
  1810. }
  1811. EXPORT_SYMBOL_GPL(kvm_vcpu_block);
  1812. bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
  1813. {
  1814. struct swait_queue_head *wqp;
  1815. wqp = kvm_arch_vcpu_wq(vcpu);
  1816. if (swq_has_sleeper(wqp)) {
  1817. swake_up(wqp);
  1818. ++vcpu->stat.halt_wakeup;
  1819. return true;
  1820. }
  1821. return false;
  1822. }
  1823. EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
  1824. #ifndef CONFIG_S390
  1825. /*
  1826. * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
  1827. */
  1828. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  1829. {
  1830. int me;
  1831. int cpu = vcpu->cpu;
  1832. if (kvm_vcpu_wake_up(vcpu))
  1833. return;
  1834. me = get_cpu();
  1835. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  1836. if (kvm_arch_vcpu_should_kick(vcpu))
  1837. smp_send_reschedule(cpu);
  1838. put_cpu();
  1839. }
  1840. EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
  1841. #endif /* !CONFIG_S390 */
  1842. int kvm_vcpu_yield_to(struct kvm_vcpu *target)
  1843. {
  1844. struct pid *pid;
  1845. struct task_struct *task = NULL;
  1846. int ret = 0;
  1847. rcu_read_lock();
  1848. pid = rcu_dereference(target->pid);
  1849. if (pid)
  1850. task = get_pid_task(pid, PIDTYPE_PID);
  1851. rcu_read_unlock();
  1852. if (!task)
  1853. return ret;
  1854. ret = yield_to(task, 1);
  1855. put_task_struct(task);
  1856. return ret;
  1857. }
  1858. EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
  1859. /*
  1860. * Helper that checks whether a VCPU is eligible for directed yield.
  1861. * Most eligible candidate to yield is decided by following heuristics:
  1862. *
  1863. * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
  1864. * (preempted lock holder), indicated by @in_spin_loop.
  1865. * Set at the beiginning and cleared at the end of interception/PLE handler.
  1866. *
  1867. * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
  1868. * chance last time (mostly it has become eligible now since we have probably
  1869. * yielded to lockholder in last iteration. This is done by toggling
  1870. * @dy_eligible each time a VCPU checked for eligibility.)
  1871. *
  1872. * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
  1873. * to preempted lock-holder could result in wrong VCPU selection and CPU
  1874. * burning. Giving priority for a potential lock-holder increases lock
  1875. * progress.
  1876. *
  1877. * Since algorithm is based on heuristics, accessing another VCPU data without
  1878. * locking does not harm. It may result in trying to yield to same VCPU, fail
  1879. * and continue with next VCPU and so on.
  1880. */
  1881. static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
  1882. {
  1883. #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
  1884. bool eligible;
  1885. eligible = !vcpu->spin_loop.in_spin_loop ||
  1886. vcpu->spin_loop.dy_eligible;
  1887. if (vcpu->spin_loop.in_spin_loop)
  1888. kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
  1889. return eligible;
  1890. #else
  1891. return true;
  1892. #endif
  1893. }
  1894. void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
  1895. {
  1896. struct kvm *kvm = me->kvm;
  1897. struct kvm_vcpu *vcpu;
  1898. int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
  1899. int yielded = 0;
  1900. int try = 3;
  1901. int pass;
  1902. int i;
  1903. kvm_vcpu_set_in_spin_loop(me, true);
  1904. /*
  1905. * We boost the priority of a VCPU that is runnable but not
  1906. * currently running, because it got preempted by something
  1907. * else and called schedule in __vcpu_run. Hopefully that
  1908. * VCPU is holding the lock that we need and will release it.
  1909. * We approximate round-robin by starting at the last boosted VCPU.
  1910. */
  1911. for (pass = 0; pass < 2 && !yielded && try; pass++) {
  1912. kvm_for_each_vcpu(i, vcpu, kvm) {
  1913. if (!pass && i <= last_boosted_vcpu) {
  1914. i = last_boosted_vcpu;
  1915. continue;
  1916. } else if (pass && i > last_boosted_vcpu)
  1917. break;
  1918. if (!READ_ONCE(vcpu->preempted))
  1919. continue;
  1920. if (vcpu == me)
  1921. continue;
  1922. if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
  1923. continue;
  1924. if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
  1925. continue;
  1926. if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
  1927. continue;
  1928. yielded = kvm_vcpu_yield_to(vcpu);
  1929. if (yielded > 0) {
  1930. kvm->last_boosted_vcpu = i;
  1931. break;
  1932. } else if (yielded < 0) {
  1933. try--;
  1934. if (!try)
  1935. break;
  1936. }
  1937. }
  1938. }
  1939. kvm_vcpu_set_in_spin_loop(me, false);
  1940. /* Ensure vcpu is not eligible during next spinloop */
  1941. kvm_vcpu_set_dy_eligible(me, false);
  1942. }
  1943. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  1944. static int kvm_vcpu_fault(struct vm_fault *vmf)
  1945. {
  1946. struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
  1947. struct page *page;
  1948. if (vmf->pgoff == 0)
  1949. page = virt_to_page(vcpu->run);
  1950. #ifdef CONFIG_X86
  1951. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  1952. page = virt_to_page(vcpu->arch.pio_data);
  1953. #endif
  1954. #ifdef CONFIG_KVM_MMIO
  1955. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  1956. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  1957. #endif
  1958. else
  1959. return kvm_arch_vcpu_fault(vcpu, vmf);
  1960. get_page(page);
  1961. vmf->page = page;
  1962. return 0;
  1963. }
  1964. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  1965. .fault = kvm_vcpu_fault,
  1966. };
  1967. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1968. {
  1969. vma->vm_ops = &kvm_vcpu_vm_ops;
  1970. return 0;
  1971. }
  1972. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1973. {
  1974. struct kvm_vcpu *vcpu = filp->private_data;
  1975. debugfs_remove_recursive(vcpu->debugfs_dentry);
  1976. kvm_put_kvm(vcpu->kvm);
  1977. return 0;
  1978. }
  1979. static struct file_operations kvm_vcpu_fops = {
  1980. .release = kvm_vcpu_release,
  1981. .unlocked_ioctl = kvm_vcpu_ioctl,
  1982. #ifdef CONFIG_KVM_COMPAT
  1983. .compat_ioctl = kvm_vcpu_compat_ioctl,
  1984. #endif
  1985. .mmap = kvm_vcpu_mmap,
  1986. .llseek = noop_llseek,
  1987. };
  1988. /*
  1989. * Allocates an inode for the vcpu.
  1990. */
  1991. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  1992. {
  1993. return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
  1994. }
  1995. static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
  1996. {
  1997. char dir_name[ITOA_MAX_LEN * 2];
  1998. int ret;
  1999. if (!kvm_arch_has_vcpu_debugfs())
  2000. return 0;
  2001. if (!debugfs_initialized())
  2002. return 0;
  2003. snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
  2004. vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
  2005. vcpu->kvm->debugfs_dentry);
  2006. if (!vcpu->debugfs_dentry)
  2007. return -ENOMEM;
  2008. ret = kvm_arch_create_vcpu_debugfs(vcpu);
  2009. if (ret < 0) {
  2010. debugfs_remove_recursive(vcpu->debugfs_dentry);
  2011. return ret;
  2012. }
  2013. return 0;
  2014. }
  2015. /*
  2016. * Creates some virtual cpus. Good luck creating more than one.
  2017. */
  2018. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  2019. {
  2020. int r;
  2021. struct kvm_vcpu *vcpu;
  2022. if (id >= KVM_MAX_VCPU_ID)
  2023. return -EINVAL;
  2024. mutex_lock(&kvm->lock);
  2025. if (kvm->created_vcpus == KVM_MAX_VCPUS) {
  2026. mutex_unlock(&kvm->lock);
  2027. return -EINVAL;
  2028. }
  2029. kvm->created_vcpus++;
  2030. mutex_unlock(&kvm->lock);
  2031. vcpu = kvm_arch_vcpu_create(kvm, id);
  2032. if (IS_ERR(vcpu)) {
  2033. r = PTR_ERR(vcpu);
  2034. goto vcpu_decrement;
  2035. }
  2036. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  2037. r = kvm_arch_vcpu_setup(vcpu);
  2038. if (r)
  2039. goto vcpu_destroy;
  2040. r = kvm_create_vcpu_debugfs(vcpu);
  2041. if (r)
  2042. goto vcpu_destroy;
  2043. mutex_lock(&kvm->lock);
  2044. if (kvm_get_vcpu_by_id(kvm, id)) {
  2045. r = -EEXIST;
  2046. goto unlock_vcpu_destroy;
  2047. }
  2048. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  2049. /* Now it's all set up, let userspace reach it */
  2050. kvm_get_kvm(kvm);
  2051. r = create_vcpu_fd(vcpu);
  2052. if (r < 0) {
  2053. kvm_put_kvm(kvm);
  2054. goto unlock_vcpu_destroy;
  2055. }
  2056. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  2057. /*
  2058. * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
  2059. * before kvm->online_vcpu's incremented value.
  2060. */
  2061. smp_wmb();
  2062. atomic_inc(&kvm->online_vcpus);
  2063. mutex_unlock(&kvm->lock);
  2064. kvm_arch_vcpu_postcreate(vcpu);
  2065. return r;
  2066. unlock_vcpu_destroy:
  2067. mutex_unlock(&kvm->lock);
  2068. debugfs_remove_recursive(vcpu->debugfs_dentry);
  2069. vcpu_destroy:
  2070. kvm_arch_vcpu_destroy(vcpu);
  2071. vcpu_decrement:
  2072. mutex_lock(&kvm->lock);
  2073. kvm->created_vcpus--;
  2074. mutex_unlock(&kvm->lock);
  2075. return r;
  2076. }
  2077. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  2078. {
  2079. if (sigset) {
  2080. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2081. vcpu->sigset_active = 1;
  2082. vcpu->sigset = *sigset;
  2083. } else
  2084. vcpu->sigset_active = 0;
  2085. return 0;
  2086. }
  2087. static long kvm_vcpu_ioctl(struct file *filp,
  2088. unsigned int ioctl, unsigned long arg)
  2089. {
  2090. struct kvm_vcpu *vcpu = filp->private_data;
  2091. void __user *argp = (void __user *)arg;
  2092. int r;
  2093. struct kvm_fpu *fpu = NULL;
  2094. struct kvm_sregs *kvm_sregs = NULL;
  2095. if (vcpu->kvm->mm != current->mm)
  2096. return -EIO;
  2097. if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
  2098. return -EINVAL;
  2099. #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
  2100. /*
  2101. * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
  2102. * so vcpu_load() would break it.
  2103. */
  2104. if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
  2105. return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  2106. #endif
  2107. r = vcpu_load(vcpu);
  2108. if (r)
  2109. return r;
  2110. switch (ioctl) {
  2111. case KVM_RUN: {
  2112. struct pid *oldpid;
  2113. r = -EINVAL;
  2114. if (arg)
  2115. goto out;
  2116. oldpid = rcu_access_pointer(vcpu->pid);
  2117. if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) {
  2118. /* The thread running this VCPU changed. */
  2119. struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
  2120. rcu_assign_pointer(vcpu->pid, newpid);
  2121. if (oldpid)
  2122. synchronize_rcu();
  2123. put_pid(oldpid);
  2124. }
  2125. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  2126. trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
  2127. break;
  2128. }
  2129. case KVM_GET_REGS: {
  2130. struct kvm_regs *kvm_regs;
  2131. r = -ENOMEM;
  2132. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  2133. if (!kvm_regs)
  2134. goto out;
  2135. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  2136. if (r)
  2137. goto out_free1;
  2138. r = -EFAULT;
  2139. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  2140. goto out_free1;
  2141. r = 0;
  2142. out_free1:
  2143. kfree(kvm_regs);
  2144. break;
  2145. }
  2146. case KVM_SET_REGS: {
  2147. struct kvm_regs *kvm_regs;
  2148. r = -ENOMEM;
  2149. kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
  2150. if (IS_ERR(kvm_regs)) {
  2151. r = PTR_ERR(kvm_regs);
  2152. goto out;
  2153. }
  2154. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  2155. kfree(kvm_regs);
  2156. break;
  2157. }
  2158. case KVM_GET_SREGS: {
  2159. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  2160. r = -ENOMEM;
  2161. if (!kvm_sregs)
  2162. goto out;
  2163. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  2164. if (r)
  2165. goto out;
  2166. r = -EFAULT;
  2167. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  2168. goto out;
  2169. r = 0;
  2170. break;
  2171. }
  2172. case KVM_SET_SREGS: {
  2173. kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
  2174. if (IS_ERR(kvm_sregs)) {
  2175. r = PTR_ERR(kvm_sregs);
  2176. kvm_sregs = NULL;
  2177. goto out;
  2178. }
  2179. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  2180. break;
  2181. }
  2182. case KVM_GET_MP_STATE: {
  2183. struct kvm_mp_state mp_state;
  2184. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  2185. if (r)
  2186. goto out;
  2187. r = -EFAULT;
  2188. if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
  2189. goto out;
  2190. r = 0;
  2191. break;
  2192. }
  2193. case KVM_SET_MP_STATE: {
  2194. struct kvm_mp_state mp_state;
  2195. r = -EFAULT;
  2196. if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
  2197. goto out;
  2198. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  2199. break;
  2200. }
  2201. case KVM_TRANSLATE: {
  2202. struct kvm_translation tr;
  2203. r = -EFAULT;
  2204. if (copy_from_user(&tr, argp, sizeof(tr)))
  2205. goto out;
  2206. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  2207. if (r)
  2208. goto out;
  2209. r = -EFAULT;
  2210. if (copy_to_user(argp, &tr, sizeof(tr)))
  2211. goto out;
  2212. r = 0;
  2213. break;
  2214. }
  2215. case KVM_SET_GUEST_DEBUG: {
  2216. struct kvm_guest_debug dbg;
  2217. r = -EFAULT;
  2218. if (copy_from_user(&dbg, argp, sizeof(dbg)))
  2219. goto out;
  2220. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  2221. break;
  2222. }
  2223. case KVM_SET_SIGNAL_MASK: {
  2224. struct kvm_signal_mask __user *sigmask_arg = argp;
  2225. struct kvm_signal_mask kvm_sigmask;
  2226. sigset_t sigset, *p;
  2227. p = NULL;
  2228. if (argp) {
  2229. r = -EFAULT;
  2230. if (copy_from_user(&kvm_sigmask, argp,
  2231. sizeof(kvm_sigmask)))
  2232. goto out;
  2233. r = -EINVAL;
  2234. if (kvm_sigmask.len != sizeof(sigset))
  2235. goto out;
  2236. r = -EFAULT;
  2237. if (copy_from_user(&sigset, sigmask_arg->sigset,
  2238. sizeof(sigset)))
  2239. goto out;
  2240. p = &sigset;
  2241. }
  2242. r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
  2243. break;
  2244. }
  2245. case KVM_GET_FPU: {
  2246. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  2247. r = -ENOMEM;
  2248. if (!fpu)
  2249. goto out;
  2250. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  2251. if (r)
  2252. goto out;
  2253. r = -EFAULT;
  2254. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  2255. goto out;
  2256. r = 0;
  2257. break;
  2258. }
  2259. case KVM_SET_FPU: {
  2260. fpu = memdup_user(argp, sizeof(*fpu));
  2261. if (IS_ERR(fpu)) {
  2262. r = PTR_ERR(fpu);
  2263. fpu = NULL;
  2264. goto out;
  2265. }
  2266. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  2267. break;
  2268. }
  2269. default:
  2270. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  2271. }
  2272. out:
  2273. vcpu_put(vcpu);
  2274. kfree(fpu);
  2275. kfree(kvm_sregs);
  2276. return r;
  2277. }
  2278. #ifdef CONFIG_KVM_COMPAT
  2279. static long kvm_vcpu_compat_ioctl(struct file *filp,
  2280. unsigned int ioctl, unsigned long arg)
  2281. {
  2282. struct kvm_vcpu *vcpu = filp->private_data;
  2283. void __user *argp = compat_ptr(arg);
  2284. int r;
  2285. if (vcpu->kvm->mm != current->mm)
  2286. return -EIO;
  2287. switch (ioctl) {
  2288. case KVM_SET_SIGNAL_MASK: {
  2289. struct kvm_signal_mask __user *sigmask_arg = argp;
  2290. struct kvm_signal_mask kvm_sigmask;
  2291. sigset_t sigset;
  2292. if (argp) {
  2293. r = -EFAULT;
  2294. if (copy_from_user(&kvm_sigmask, argp,
  2295. sizeof(kvm_sigmask)))
  2296. goto out;
  2297. r = -EINVAL;
  2298. if (kvm_sigmask.len != sizeof(compat_sigset_t))
  2299. goto out;
  2300. r = -EFAULT;
  2301. if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
  2302. goto out;
  2303. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  2304. } else
  2305. r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
  2306. break;
  2307. }
  2308. default:
  2309. r = kvm_vcpu_ioctl(filp, ioctl, arg);
  2310. }
  2311. out:
  2312. return r;
  2313. }
  2314. #endif
  2315. static int kvm_device_ioctl_attr(struct kvm_device *dev,
  2316. int (*accessor)(struct kvm_device *dev,
  2317. struct kvm_device_attr *attr),
  2318. unsigned long arg)
  2319. {
  2320. struct kvm_device_attr attr;
  2321. if (!accessor)
  2322. return -EPERM;
  2323. if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
  2324. return -EFAULT;
  2325. return accessor(dev, &attr);
  2326. }
  2327. static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
  2328. unsigned long arg)
  2329. {
  2330. struct kvm_device *dev = filp->private_data;
  2331. switch (ioctl) {
  2332. case KVM_SET_DEVICE_ATTR:
  2333. return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
  2334. case KVM_GET_DEVICE_ATTR:
  2335. return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
  2336. case KVM_HAS_DEVICE_ATTR:
  2337. return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
  2338. default:
  2339. if (dev->ops->ioctl)
  2340. return dev->ops->ioctl(dev, ioctl, arg);
  2341. return -ENOTTY;
  2342. }
  2343. }
  2344. static int kvm_device_release(struct inode *inode, struct file *filp)
  2345. {
  2346. struct kvm_device *dev = filp->private_data;
  2347. struct kvm *kvm = dev->kvm;
  2348. kvm_put_kvm(kvm);
  2349. return 0;
  2350. }
  2351. static const struct file_operations kvm_device_fops = {
  2352. .unlocked_ioctl = kvm_device_ioctl,
  2353. #ifdef CONFIG_KVM_COMPAT
  2354. .compat_ioctl = kvm_device_ioctl,
  2355. #endif
  2356. .release = kvm_device_release,
  2357. };
  2358. struct kvm_device *kvm_device_from_filp(struct file *filp)
  2359. {
  2360. if (filp->f_op != &kvm_device_fops)
  2361. return NULL;
  2362. return filp->private_data;
  2363. }
  2364. static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
  2365. #ifdef CONFIG_KVM_MPIC
  2366. [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
  2367. [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
  2368. #endif
  2369. };
  2370. int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
  2371. {
  2372. if (type >= ARRAY_SIZE(kvm_device_ops_table))
  2373. return -ENOSPC;
  2374. if (kvm_device_ops_table[type] != NULL)
  2375. return -EEXIST;
  2376. kvm_device_ops_table[type] = ops;
  2377. return 0;
  2378. }
  2379. void kvm_unregister_device_ops(u32 type)
  2380. {
  2381. if (kvm_device_ops_table[type] != NULL)
  2382. kvm_device_ops_table[type] = NULL;
  2383. }
  2384. static int kvm_ioctl_create_device(struct kvm *kvm,
  2385. struct kvm_create_device *cd)
  2386. {
  2387. struct kvm_device_ops *ops = NULL;
  2388. struct kvm_device *dev;
  2389. bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
  2390. int ret;
  2391. if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
  2392. return -ENODEV;
  2393. ops = kvm_device_ops_table[cd->type];
  2394. if (ops == NULL)
  2395. return -ENODEV;
  2396. if (test)
  2397. return 0;
  2398. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  2399. if (!dev)
  2400. return -ENOMEM;
  2401. dev->ops = ops;
  2402. dev->kvm = kvm;
  2403. mutex_lock(&kvm->lock);
  2404. ret = ops->create(dev, cd->type);
  2405. if (ret < 0) {
  2406. mutex_unlock(&kvm->lock);
  2407. kfree(dev);
  2408. return ret;
  2409. }
  2410. list_add(&dev->vm_node, &kvm->devices);
  2411. mutex_unlock(&kvm->lock);
  2412. if (ops->init)
  2413. ops->init(dev);
  2414. ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
  2415. if (ret < 0) {
  2416. mutex_lock(&kvm->lock);
  2417. list_del(&dev->vm_node);
  2418. mutex_unlock(&kvm->lock);
  2419. ops->destroy(dev);
  2420. return ret;
  2421. }
  2422. kvm_get_kvm(kvm);
  2423. cd->fd = ret;
  2424. return 0;
  2425. }
  2426. static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
  2427. {
  2428. switch (arg) {
  2429. case KVM_CAP_USER_MEMORY:
  2430. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  2431. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  2432. case KVM_CAP_INTERNAL_ERROR_DATA:
  2433. #ifdef CONFIG_HAVE_KVM_MSI
  2434. case KVM_CAP_SIGNAL_MSI:
  2435. #endif
  2436. #ifdef CONFIG_HAVE_KVM_IRQFD
  2437. case KVM_CAP_IRQFD:
  2438. case KVM_CAP_IRQFD_RESAMPLE:
  2439. #endif
  2440. case KVM_CAP_IOEVENTFD_ANY_LENGTH:
  2441. case KVM_CAP_CHECK_EXTENSION_VM:
  2442. return 1;
  2443. #ifdef CONFIG_KVM_MMIO
  2444. case KVM_CAP_COALESCED_MMIO:
  2445. return KVM_COALESCED_MMIO_PAGE_OFFSET;
  2446. #endif
  2447. #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
  2448. case KVM_CAP_IRQ_ROUTING:
  2449. return KVM_MAX_IRQ_ROUTES;
  2450. #endif
  2451. #if KVM_ADDRESS_SPACE_NUM > 1
  2452. case KVM_CAP_MULTI_ADDRESS_SPACE:
  2453. return KVM_ADDRESS_SPACE_NUM;
  2454. #endif
  2455. case KVM_CAP_MAX_VCPU_ID:
  2456. return KVM_MAX_VCPU_ID;
  2457. default:
  2458. break;
  2459. }
  2460. return kvm_vm_ioctl_check_extension(kvm, arg);
  2461. }
  2462. static long kvm_vm_ioctl(struct file *filp,
  2463. unsigned int ioctl, unsigned long arg)
  2464. {
  2465. struct kvm *kvm = filp->private_data;
  2466. void __user *argp = (void __user *)arg;
  2467. int r;
  2468. if (kvm->mm != current->mm)
  2469. return -EIO;
  2470. switch (ioctl) {
  2471. case KVM_CREATE_VCPU:
  2472. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  2473. break;
  2474. case KVM_SET_USER_MEMORY_REGION: {
  2475. struct kvm_userspace_memory_region kvm_userspace_mem;
  2476. r = -EFAULT;
  2477. if (copy_from_user(&kvm_userspace_mem, argp,
  2478. sizeof(kvm_userspace_mem)))
  2479. goto out;
  2480. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
  2481. break;
  2482. }
  2483. case KVM_GET_DIRTY_LOG: {
  2484. struct kvm_dirty_log log;
  2485. r = -EFAULT;
  2486. if (copy_from_user(&log, argp, sizeof(log)))
  2487. goto out;
  2488. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  2489. break;
  2490. }
  2491. #ifdef CONFIG_KVM_MMIO
  2492. case KVM_REGISTER_COALESCED_MMIO: {
  2493. struct kvm_coalesced_mmio_zone zone;
  2494. r = -EFAULT;
  2495. if (copy_from_user(&zone, argp, sizeof(zone)))
  2496. goto out;
  2497. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  2498. break;
  2499. }
  2500. case KVM_UNREGISTER_COALESCED_MMIO: {
  2501. struct kvm_coalesced_mmio_zone zone;
  2502. r = -EFAULT;
  2503. if (copy_from_user(&zone, argp, sizeof(zone)))
  2504. goto out;
  2505. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  2506. break;
  2507. }
  2508. #endif
  2509. case KVM_IRQFD: {
  2510. struct kvm_irqfd data;
  2511. r = -EFAULT;
  2512. if (copy_from_user(&data, argp, sizeof(data)))
  2513. goto out;
  2514. r = kvm_irqfd(kvm, &data);
  2515. break;
  2516. }
  2517. case KVM_IOEVENTFD: {
  2518. struct kvm_ioeventfd data;
  2519. r = -EFAULT;
  2520. if (copy_from_user(&data, argp, sizeof(data)))
  2521. goto out;
  2522. r = kvm_ioeventfd(kvm, &data);
  2523. break;
  2524. }
  2525. #ifdef CONFIG_HAVE_KVM_MSI
  2526. case KVM_SIGNAL_MSI: {
  2527. struct kvm_msi msi;
  2528. r = -EFAULT;
  2529. if (copy_from_user(&msi, argp, sizeof(msi)))
  2530. goto out;
  2531. r = kvm_send_userspace_msi(kvm, &msi);
  2532. break;
  2533. }
  2534. #endif
  2535. #ifdef __KVM_HAVE_IRQ_LINE
  2536. case KVM_IRQ_LINE_STATUS:
  2537. case KVM_IRQ_LINE: {
  2538. struct kvm_irq_level irq_event;
  2539. r = -EFAULT;
  2540. if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
  2541. goto out;
  2542. r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
  2543. ioctl == KVM_IRQ_LINE_STATUS);
  2544. if (r)
  2545. goto out;
  2546. r = -EFAULT;
  2547. if (ioctl == KVM_IRQ_LINE_STATUS) {
  2548. if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
  2549. goto out;
  2550. }
  2551. r = 0;
  2552. break;
  2553. }
  2554. #endif
  2555. #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
  2556. case KVM_SET_GSI_ROUTING: {
  2557. struct kvm_irq_routing routing;
  2558. struct kvm_irq_routing __user *urouting;
  2559. struct kvm_irq_routing_entry *entries = NULL;
  2560. r = -EFAULT;
  2561. if (copy_from_user(&routing, argp, sizeof(routing)))
  2562. goto out;
  2563. r = -EINVAL;
  2564. if (!kvm_arch_can_set_irq_routing(kvm))
  2565. goto out;
  2566. if (routing.nr > KVM_MAX_IRQ_ROUTES)
  2567. goto out;
  2568. if (routing.flags)
  2569. goto out;
  2570. if (routing.nr) {
  2571. r = -ENOMEM;
  2572. entries = vmalloc(routing.nr * sizeof(*entries));
  2573. if (!entries)
  2574. goto out;
  2575. r = -EFAULT;
  2576. urouting = argp;
  2577. if (copy_from_user(entries, urouting->entries,
  2578. routing.nr * sizeof(*entries)))
  2579. goto out_free_irq_routing;
  2580. }
  2581. r = kvm_set_irq_routing(kvm, entries, routing.nr,
  2582. routing.flags);
  2583. out_free_irq_routing:
  2584. vfree(entries);
  2585. break;
  2586. }
  2587. #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
  2588. case KVM_CREATE_DEVICE: {
  2589. struct kvm_create_device cd;
  2590. r = -EFAULT;
  2591. if (copy_from_user(&cd, argp, sizeof(cd)))
  2592. goto out;
  2593. r = kvm_ioctl_create_device(kvm, &cd);
  2594. if (r)
  2595. goto out;
  2596. r = -EFAULT;
  2597. if (copy_to_user(argp, &cd, sizeof(cd)))
  2598. goto out;
  2599. r = 0;
  2600. break;
  2601. }
  2602. case KVM_CHECK_EXTENSION:
  2603. r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
  2604. break;
  2605. default:
  2606. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  2607. }
  2608. out:
  2609. return r;
  2610. }
  2611. #ifdef CONFIG_KVM_COMPAT
  2612. struct compat_kvm_dirty_log {
  2613. __u32 slot;
  2614. __u32 padding1;
  2615. union {
  2616. compat_uptr_t dirty_bitmap; /* one bit per page */
  2617. __u64 padding2;
  2618. };
  2619. };
  2620. static long kvm_vm_compat_ioctl(struct file *filp,
  2621. unsigned int ioctl, unsigned long arg)
  2622. {
  2623. struct kvm *kvm = filp->private_data;
  2624. int r;
  2625. if (kvm->mm != current->mm)
  2626. return -EIO;
  2627. switch (ioctl) {
  2628. case KVM_GET_DIRTY_LOG: {
  2629. struct compat_kvm_dirty_log compat_log;
  2630. struct kvm_dirty_log log;
  2631. if (copy_from_user(&compat_log, (void __user *)arg,
  2632. sizeof(compat_log)))
  2633. return -EFAULT;
  2634. log.slot = compat_log.slot;
  2635. log.padding1 = compat_log.padding1;
  2636. log.padding2 = compat_log.padding2;
  2637. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  2638. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  2639. break;
  2640. }
  2641. default:
  2642. r = kvm_vm_ioctl(filp, ioctl, arg);
  2643. }
  2644. return r;
  2645. }
  2646. #endif
  2647. static struct file_operations kvm_vm_fops = {
  2648. .release = kvm_vm_release,
  2649. .unlocked_ioctl = kvm_vm_ioctl,
  2650. #ifdef CONFIG_KVM_COMPAT
  2651. .compat_ioctl = kvm_vm_compat_ioctl,
  2652. #endif
  2653. .llseek = noop_llseek,
  2654. };
  2655. static int kvm_dev_ioctl_create_vm(unsigned long type)
  2656. {
  2657. int r;
  2658. struct kvm *kvm;
  2659. struct file *file;
  2660. kvm = kvm_create_vm(type);
  2661. if (IS_ERR(kvm))
  2662. return PTR_ERR(kvm);
  2663. #ifdef CONFIG_KVM_MMIO
  2664. r = kvm_coalesced_mmio_init(kvm);
  2665. if (r < 0) {
  2666. kvm_put_kvm(kvm);
  2667. return r;
  2668. }
  2669. #endif
  2670. r = get_unused_fd_flags(O_CLOEXEC);
  2671. if (r < 0) {
  2672. kvm_put_kvm(kvm);
  2673. return r;
  2674. }
  2675. file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  2676. if (IS_ERR(file)) {
  2677. put_unused_fd(r);
  2678. kvm_put_kvm(kvm);
  2679. return PTR_ERR(file);
  2680. }
  2681. /*
  2682. * Don't call kvm_put_kvm anymore at this point; file->f_op is
  2683. * already set, with ->release() being kvm_vm_release(). In error
  2684. * cases it will be called by the final fput(file) and will take
  2685. * care of doing kvm_put_kvm(kvm).
  2686. */
  2687. if (kvm_create_vm_debugfs(kvm, r) < 0) {
  2688. put_unused_fd(r);
  2689. fput(file);
  2690. return -ENOMEM;
  2691. }
  2692. kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
  2693. fd_install(r, file);
  2694. return r;
  2695. }
  2696. static long kvm_dev_ioctl(struct file *filp,
  2697. unsigned int ioctl, unsigned long arg)
  2698. {
  2699. long r = -EINVAL;
  2700. switch (ioctl) {
  2701. case KVM_GET_API_VERSION:
  2702. if (arg)
  2703. goto out;
  2704. r = KVM_API_VERSION;
  2705. break;
  2706. case KVM_CREATE_VM:
  2707. r = kvm_dev_ioctl_create_vm(arg);
  2708. break;
  2709. case KVM_CHECK_EXTENSION:
  2710. r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
  2711. break;
  2712. case KVM_GET_VCPU_MMAP_SIZE:
  2713. if (arg)
  2714. goto out;
  2715. r = PAGE_SIZE; /* struct kvm_run */
  2716. #ifdef CONFIG_X86
  2717. r += PAGE_SIZE; /* pio data page */
  2718. #endif
  2719. #ifdef CONFIG_KVM_MMIO
  2720. r += PAGE_SIZE; /* coalesced mmio ring page */
  2721. #endif
  2722. break;
  2723. case KVM_TRACE_ENABLE:
  2724. case KVM_TRACE_PAUSE:
  2725. case KVM_TRACE_DISABLE:
  2726. r = -EOPNOTSUPP;
  2727. break;
  2728. default:
  2729. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  2730. }
  2731. out:
  2732. return r;
  2733. }
  2734. static struct file_operations kvm_chardev_ops = {
  2735. .unlocked_ioctl = kvm_dev_ioctl,
  2736. .compat_ioctl = kvm_dev_ioctl,
  2737. .llseek = noop_llseek,
  2738. };
  2739. static struct miscdevice kvm_dev = {
  2740. KVM_MINOR,
  2741. "kvm",
  2742. &kvm_chardev_ops,
  2743. };
  2744. static void hardware_enable_nolock(void *junk)
  2745. {
  2746. int cpu = raw_smp_processor_id();
  2747. int r;
  2748. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  2749. return;
  2750. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  2751. r = kvm_arch_hardware_enable();
  2752. if (r) {
  2753. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  2754. atomic_inc(&hardware_enable_failed);
  2755. pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
  2756. }
  2757. }
  2758. static int kvm_starting_cpu(unsigned int cpu)
  2759. {
  2760. raw_spin_lock(&kvm_count_lock);
  2761. if (kvm_usage_count)
  2762. hardware_enable_nolock(NULL);
  2763. raw_spin_unlock(&kvm_count_lock);
  2764. return 0;
  2765. }
  2766. static void hardware_disable_nolock(void *junk)
  2767. {
  2768. int cpu = raw_smp_processor_id();
  2769. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  2770. return;
  2771. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  2772. kvm_arch_hardware_disable();
  2773. }
  2774. static int kvm_dying_cpu(unsigned int cpu)
  2775. {
  2776. raw_spin_lock(&kvm_count_lock);
  2777. if (kvm_usage_count)
  2778. hardware_disable_nolock(NULL);
  2779. raw_spin_unlock(&kvm_count_lock);
  2780. return 0;
  2781. }
  2782. static void hardware_disable_all_nolock(void)
  2783. {
  2784. BUG_ON(!kvm_usage_count);
  2785. kvm_usage_count--;
  2786. if (!kvm_usage_count)
  2787. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2788. }
  2789. static void hardware_disable_all(void)
  2790. {
  2791. raw_spin_lock(&kvm_count_lock);
  2792. hardware_disable_all_nolock();
  2793. raw_spin_unlock(&kvm_count_lock);
  2794. }
  2795. static int hardware_enable_all(void)
  2796. {
  2797. int r = 0;
  2798. raw_spin_lock(&kvm_count_lock);
  2799. kvm_usage_count++;
  2800. if (kvm_usage_count == 1) {
  2801. atomic_set(&hardware_enable_failed, 0);
  2802. on_each_cpu(hardware_enable_nolock, NULL, 1);
  2803. if (atomic_read(&hardware_enable_failed)) {
  2804. hardware_disable_all_nolock();
  2805. r = -EBUSY;
  2806. }
  2807. }
  2808. raw_spin_unlock(&kvm_count_lock);
  2809. return r;
  2810. }
  2811. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  2812. void *v)
  2813. {
  2814. /*
  2815. * Some (well, at least mine) BIOSes hang on reboot if
  2816. * in vmx root mode.
  2817. *
  2818. * And Intel TXT required VMX off for all cpu when system shutdown.
  2819. */
  2820. pr_info("kvm: exiting hardware virtualization\n");
  2821. kvm_rebooting = true;
  2822. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2823. return NOTIFY_OK;
  2824. }
  2825. static struct notifier_block kvm_reboot_notifier = {
  2826. .notifier_call = kvm_reboot,
  2827. .priority = 0,
  2828. };
  2829. static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  2830. {
  2831. int i;
  2832. for (i = 0; i < bus->dev_count; i++) {
  2833. struct kvm_io_device *pos = bus->range[i].dev;
  2834. kvm_iodevice_destructor(pos);
  2835. }
  2836. kfree(bus);
  2837. }
  2838. static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
  2839. const struct kvm_io_range *r2)
  2840. {
  2841. gpa_t addr1 = r1->addr;
  2842. gpa_t addr2 = r2->addr;
  2843. if (addr1 < addr2)
  2844. return -1;
  2845. /* If r2->len == 0, match the exact address. If r2->len != 0,
  2846. * accept any overlapping write. Any order is acceptable for
  2847. * overlapping ranges, because kvm_io_bus_get_first_dev ensures
  2848. * we process all of them.
  2849. */
  2850. if (r2->len) {
  2851. addr1 += r1->len;
  2852. addr2 += r2->len;
  2853. }
  2854. if (addr1 > addr2)
  2855. return 1;
  2856. return 0;
  2857. }
  2858. static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
  2859. {
  2860. return kvm_io_bus_cmp(p1, p2);
  2861. }
  2862. static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
  2863. gpa_t addr, int len)
  2864. {
  2865. bus->range[bus->dev_count++] = (struct kvm_io_range) {
  2866. .addr = addr,
  2867. .len = len,
  2868. .dev = dev,
  2869. };
  2870. sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
  2871. kvm_io_bus_sort_cmp, NULL);
  2872. return 0;
  2873. }
  2874. static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
  2875. gpa_t addr, int len)
  2876. {
  2877. struct kvm_io_range *range, key;
  2878. int off;
  2879. key = (struct kvm_io_range) {
  2880. .addr = addr,
  2881. .len = len,
  2882. };
  2883. range = bsearch(&key, bus->range, bus->dev_count,
  2884. sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
  2885. if (range == NULL)
  2886. return -ENOENT;
  2887. off = range - bus->range;
  2888. while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
  2889. off--;
  2890. return off;
  2891. }
  2892. static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
  2893. struct kvm_io_range *range, const void *val)
  2894. {
  2895. int idx;
  2896. idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
  2897. if (idx < 0)
  2898. return -EOPNOTSUPP;
  2899. while (idx < bus->dev_count &&
  2900. kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
  2901. if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
  2902. range->len, val))
  2903. return idx;
  2904. idx++;
  2905. }
  2906. return -EOPNOTSUPP;
  2907. }
  2908. /* kvm_io_bus_write - called under kvm->slots_lock */
  2909. int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
  2910. int len, const void *val)
  2911. {
  2912. struct kvm_io_bus *bus;
  2913. struct kvm_io_range range;
  2914. int r;
  2915. range = (struct kvm_io_range) {
  2916. .addr = addr,
  2917. .len = len,
  2918. };
  2919. bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
  2920. if (!bus)
  2921. return -ENOMEM;
  2922. r = __kvm_io_bus_write(vcpu, bus, &range, val);
  2923. return r < 0 ? r : 0;
  2924. }
  2925. /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
  2926. int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
  2927. gpa_t addr, int len, const void *val, long cookie)
  2928. {
  2929. struct kvm_io_bus *bus;
  2930. struct kvm_io_range range;
  2931. range = (struct kvm_io_range) {
  2932. .addr = addr,
  2933. .len = len,
  2934. };
  2935. bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
  2936. if (!bus)
  2937. return -ENOMEM;
  2938. /* First try the device referenced by cookie. */
  2939. if ((cookie >= 0) && (cookie < bus->dev_count) &&
  2940. (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
  2941. if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
  2942. val))
  2943. return cookie;
  2944. /*
  2945. * cookie contained garbage; fall back to search and return the
  2946. * correct cookie value.
  2947. */
  2948. return __kvm_io_bus_write(vcpu, bus, &range, val);
  2949. }
  2950. static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
  2951. struct kvm_io_range *range, void *val)
  2952. {
  2953. int idx;
  2954. idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
  2955. if (idx < 0)
  2956. return -EOPNOTSUPP;
  2957. while (idx < bus->dev_count &&
  2958. kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
  2959. if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
  2960. range->len, val))
  2961. return idx;
  2962. idx++;
  2963. }
  2964. return -EOPNOTSUPP;
  2965. }
  2966. EXPORT_SYMBOL_GPL(kvm_io_bus_write);
  2967. /* kvm_io_bus_read - called under kvm->slots_lock */
  2968. int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
  2969. int len, void *val)
  2970. {
  2971. struct kvm_io_bus *bus;
  2972. struct kvm_io_range range;
  2973. int r;
  2974. range = (struct kvm_io_range) {
  2975. .addr = addr,
  2976. .len = len,
  2977. };
  2978. bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
  2979. if (!bus)
  2980. return -ENOMEM;
  2981. r = __kvm_io_bus_read(vcpu, bus, &range, val);
  2982. return r < 0 ? r : 0;
  2983. }
  2984. /* Caller must hold slots_lock. */
  2985. int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2986. int len, struct kvm_io_device *dev)
  2987. {
  2988. struct kvm_io_bus *new_bus, *bus;
  2989. bus = kvm_get_bus(kvm, bus_idx);
  2990. if (!bus)
  2991. return -ENOMEM;
  2992. /* exclude ioeventfd which is limited by maximum fd */
  2993. if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
  2994. return -ENOSPC;
  2995. new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
  2996. sizeof(struct kvm_io_range)), GFP_KERNEL);
  2997. if (!new_bus)
  2998. return -ENOMEM;
  2999. memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
  3000. sizeof(struct kvm_io_range)));
  3001. kvm_io_bus_insert_dev(new_bus, dev, addr, len);
  3002. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  3003. synchronize_srcu_expedited(&kvm->srcu);
  3004. kfree(bus);
  3005. return 0;
  3006. }
  3007. /* Caller must hold slots_lock. */
  3008. void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  3009. struct kvm_io_device *dev)
  3010. {
  3011. int i;
  3012. struct kvm_io_bus *new_bus, *bus;
  3013. bus = kvm_get_bus(kvm, bus_idx);
  3014. if (!bus)
  3015. return;
  3016. for (i = 0; i < bus->dev_count; i++)
  3017. if (bus->range[i].dev == dev) {
  3018. break;
  3019. }
  3020. if (i == bus->dev_count)
  3021. return;
  3022. new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
  3023. sizeof(struct kvm_io_range)), GFP_KERNEL);
  3024. if (!new_bus) {
  3025. pr_err("kvm: failed to shrink bus, removing it completely\n");
  3026. goto broken;
  3027. }
  3028. memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
  3029. new_bus->dev_count--;
  3030. memcpy(new_bus->range + i, bus->range + i + 1,
  3031. (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
  3032. broken:
  3033. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  3034. synchronize_srcu_expedited(&kvm->srcu);
  3035. kfree(bus);
  3036. return;
  3037. }
  3038. struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  3039. gpa_t addr)
  3040. {
  3041. struct kvm_io_bus *bus;
  3042. int dev_idx, srcu_idx;
  3043. struct kvm_io_device *iodev = NULL;
  3044. srcu_idx = srcu_read_lock(&kvm->srcu);
  3045. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  3046. if (!bus)
  3047. goto out_unlock;
  3048. dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
  3049. if (dev_idx < 0)
  3050. goto out_unlock;
  3051. iodev = bus->range[dev_idx].dev;
  3052. out_unlock:
  3053. srcu_read_unlock(&kvm->srcu, srcu_idx);
  3054. return iodev;
  3055. }
  3056. EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
  3057. static int kvm_debugfs_open(struct inode *inode, struct file *file,
  3058. int (*get)(void *, u64 *), int (*set)(void *, u64),
  3059. const char *fmt)
  3060. {
  3061. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
  3062. inode->i_private;
  3063. /* The debugfs files are a reference to the kvm struct which
  3064. * is still valid when kvm_destroy_vm is called.
  3065. * To avoid the race between open and the removal of the debugfs
  3066. * directory we test against the users count.
  3067. */
  3068. if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
  3069. return -ENOENT;
  3070. if (simple_attr_open(inode, file, get, set, fmt)) {
  3071. kvm_put_kvm(stat_data->kvm);
  3072. return -ENOMEM;
  3073. }
  3074. return 0;
  3075. }
  3076. static int kvm_debugfs_release(struct inode *inode, struct file *file)
  3077. {
  3078. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
  3079. inode->i_private;
  3080. simple_attr_release(inode, file);
  3081. kvm_put_kvm(stat_data->kvm);
  3082. return 0;
  3083. }
  3084. static int vm_stat_get_per_vm(void *data, u64 *val)
  3085. {
  3086. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3087. *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
  3088. return 0;
  3089. }
  3090. static int vm_stat_clear_per_vm(void *data, u64 val)
  3091. {
  3092. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3093. if (val)
  3094. return -EINVAL;
  3095. *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
  3096. return 0;
  3097. }
  3098. static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
  3099. {
  3100. __simple_attr_check_format("%llu\n", 0ull);
  3101. return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
  3102. vm_stat_clear_per_vm, "%llu\n");
  3103. }
  3104. static const struct file_operations vm_stat_get_per_vm_fops = {
  3105. .owner = THIS_MODULE,
  3106. .open = vm_stat_get_per_vm_open,
  3107. .release = kvm_debugfs_release,
  3108. .read = simple_attr_read,
  3109. .write = simple_attr_write,
  3110. .llseek = no_llseek,
  3111. };
  3112. static int vcpu_stat_get_per_vm(void *data, u64 *val)
  3113. {
  3114. int i;
  3115. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3116. struct kvm_vcpu *vcpu;
  3117. *val = 0;
  3118. kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
  3119. *val += *(u64 *)((void *)vcpu + stat_data->offset);
  3120. return 0;
  3121. }
  3122. static int vcpu_stat_clear_per_vm(void *data, u64 val)
  3123. {
  3124. int i;
  3125. struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
  3126. struct kvm_vcpu *vcpu;
  3127. if (val)
  3128. return -EINVAL;
  3129. kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
  3130. *(u64 *)((void *)vcpu + stat_data->offset) = 0;
  3131. return 0;
  3132. }
  3133. static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
  3134. {
  3135. __simple_attr_check_format("%llu\n", 0ull);
  3136. return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
  3137. vcpu_stat_clear_per_vm, "%llu\n");
  3138. }
  3139. static const struct file_operations vcpu_stat_get_per_vm_fops = {
  3140. .owner = THIS_MODULE,
  3141. .open = vcpu_stat_get_per_vm_open,
  3142. .release = kvm_debugfs_release,
  3143. .read = simple_attr_read,
  3144. .write = simple_attr_write,
  3145. .llseek = no_llseek,
  3146. };
  3147. static const struct file_operations *stat_fops_per_vm[] = {
  3148. [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
  3149. [KVM_STAT_VM] = &vm_stat_get_per_vm_fops,
  3150. };
  3151. static int vm_stat_get(void *_offset, u64 *val)
  3152. {
  3153. unsigned offset = (long)_offset;
  3154. struct kvm *kvm;
  3155. struct kvm_stat_data stat_tmp = {.offset = offset};
  3156. u64 tmp_val;
  3157. *val = 0;
  3158. spin_lock(&kvm_lock);
  3159. list_for_each_entry(kvm, &vm_list, vm_list) {
  3160. stat_tmp.kvm = kvm;
  3161. vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
  3162. *val += tmp_val;
  3163. }
  3164. spin_unlock(&kvm_lock);
  3165. return 0;
  3166. }
  3167. static int vm_stat_clear(void *_offset, u64 val)
  3168. {
  3169. unsigned offset = (long)_offset;
  3170. struct kvm *kvm;
  3171. struct kvm_stat_data stat_tmp = {.offset = offset};
  3172. if (val)
  3173. return -EINVAL;
  3174. spin_lock(&kvm_lock);
  3175. list_for_each_entry(kvm, &vm_list, vm_list) {
  3176. stat_tmp.kvm = kvm;
  3177. vm_stat_clear_per_vm((void *)&stat_tmp, 0);
  3178. }
  3179. spin_unlock(&kvm_lock);
  3180. return 0;
  3181. }
  3182. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
  3183. static int vcpu_stat_get(void *_offset, u64 *val)
  3184. {
  3185. unsigned offset = (long)_offset;
  3186. struct kvm *kvm;
  3187. struct kvm_stat_data stat_tmp = {.offset = offset};
  3188. u64 tmp_val;
  3189. *val = 0;
  3190. spin_lock(&kvm_lock);
  3191. list_for_each_entry(kvm, &vm_list, vm_list) {
  3192. stat_tmp.kvm = kvm;
  3193. vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
  3194. *val += tmp_val;
  3195. }
  3196. spin_unlock(&kvm_lock);
  3197. return 0;
  3198. }
  3199. static int vcpu_stat_clear(void *_offset, u64 val)
  3200. {
  3201. unsigned offset = (long)_offset;
  3202. struct kvm *kvm;
  3203. struct kvm_stat_data stat_tmp = {.offset = offset};
  3204. if (val)
  3205. return -EINVAL;
  3206. spin_lock(&kvm_lock);
  3207. list_for_each_entry(kvm, &vm_list, vm_list) {
  3208. stat_tmp.kvm = kvm;
  3209. vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
  3210. }
  3211. spin_unlock(&kvm_lock);
  3212. return 0;
  3213. }
  3214. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
  3215. "%llu\n");
  3216. static const struct file_operations *stat_fops[] = {
  3217. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  3218. [KVM_STAT_VM] = &vm_stat_fops,
  3219. };
  3220. static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
  3221. {
  3222. struct kobj_uevent_env *env;
  3223. unsigned long long created, active;
  3224. if (!kvm_dev.this_device || !kvm)
  3225. return;
  3226. spin_lock(&kvm_lock);
  3227. if (type == KVM_EVENT_CREATE_VM) {
  3228. kvm_createvm_count++;
  3229. kvm_active_vms++;
  3230. } else if (type == KVM_EVENT_DESTROY_VM) {
  3231. kvm_active_vms--;
  3232. }
  3233. created = kvm_createvm_count;
  3234. active = kvm_active_vms;
  3235. spin_unlock(&kvm_lock);
  3236. env = kzalloc(sizeof(*env), GFP_KERNEL);
  3237. if (!env)
  3238. return;
  3239. add_uevent_var(env, "CREATED=%llu", created);
  3240. add_uevent_var(env, "COUNT=%llu", active);
  3241. if (type == KVM_EVENT_CREATE_VM) {
  3242. add_uevent_var(env, "EVENT=create");
  3243. kvm->userspace_pid = task_pid_nr(current);
  3244. } else if (type == KVM_EVENT_DESTROY_VM) {
  3245. add_uevent_var(env, "EVENT=destroy");
  3246. }
  3247. add_uevent_var(env, "PID=%d", kvm->userspace_pid);
  3248. if (kvm->debugfs_dentry) {
  3249. char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
  3250. if (p) {
  3251. tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
  3252. if (!IS_ERR(tmp))
  3253. add_uevent_var(env, "STATS_PATH=%s", tmp);
  3254. kfree(p);
  3255. }
  3256. }
  3257. /* no need for checks, since we are adding at most only 5 keys */
  3258. env->envp[env->envp_idx++] = NULL;
  3259. kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
  3260. kfree(env);
  3261. }
  3262. static int kvm_init_debug(void)
  3263. {
  3264. int r = -EEXIST;
  3265. struct kvm_stats_debugfs_item *p;
  3266. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  3267. if (kvm_debugfs_dir == NULL)
  3268. goto out;
  3269. kvm_debugfs_num_entries = 0;
  3270. for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
  3271. if (!debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
  3272. (void *)(long)p->offset,
  3273. stat_fops[p->kind]))
  3274. goto out_dir;
  3275. }
  3276. return 0;
  3277. out_dir:
  3278. debugfs_remove_recursive(kvm_debugfs_dir);
  3279. out:
  3280. return r;
  3281. }
  3282. static int kvm_suspend(void)
  3283. {
  3284. if (kvm_usage_count)
  3285. hardware_disable_nolock(NULL);
  3286. return 0;
  3287. }
  3288. static void kvm_resume(void)
  3289. {
  3290. if (kvm_usage_count) {
  3291. WARN_ON(raw_spin_is_locked(&kvm_count_lock));
  3292. hardware_enable_nolock(NULL);
  3293. }
  3294. }
  3295. static struct syscore_ops kvm_syscore_ops = {
  3296. .suspend = kvm_suspend,
  3297. .resume = kvm_resume,
  3298. };
  3299. static inline
  3300. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  3301. {
  3302. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  3303. }
  3304. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  3305. {
  3306. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  3307. if (vcpu->preempted)
  3308. vcpu->preempted = false;
  3309. kvm_arch_sched_in(vcpu, cpu);
  3310. kvm_arch_vcpu_load(vcpu, cpu);
  3311. }
  3312. static void kvm_sched_out(struct preempt_notifier *pn,
  3313. struct task_struct *next)
  3314. {
  3315. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  3316. if (current->state == TASK_RUNNING)
  3317. vcpu->preempted = true;
  3318. kvm_arch_vcpu_put(vcpu);
  3319. }
  3320. int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  3321. struct module *module)
  3322. {
  3323. int r;
  3324. int cpu;
  3325. r = kvm_arch_init(opaque);
  3326. if (r)
  3327. goto out_fail;
  3328. /*
  3329. * kvm_arch_init makes sure there's at most one caller
  3330. * for architectures that support multiple implementations,
  3331. * like intel and amd on x86.
  3332. * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
  3333. * conflicts in case kvm is already setup for another implementation.
  3334. */
  3335. r = kvm_irqfd_init();
  3336. if (r)
  3337. goto out_irqfd;
  3338. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  3339. r = -ENOMEM;
  3340. goto out_free_0;
  3341. }
  3342. r = kvm_arch_hardware_setup();
  3343. if (r < 0)
  3344. goto out_free_0a;
  3345. for_each_online_cpu(cpu) {
  3346. smp_call_function_single(cpu,
  3347. kvm_arch_check_processor_compat,
  3348. &r, 1);
  3349. if (r < 0)
  3350. goto out_free_1;
  3351. }
  3352. r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
  3353. kvm_starting_cpu, kvm_dying_cpu);
  3354. if (r)
  3355. goto out_free_2;
  3356. register_reboot_notifier(&kvm_reboot_notifier);
  3357. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  3358. if (!vcpu_align)
  3359. vcpu_align = __alignof__(struct kvm_vcpu);
  3360. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
  3361. SLAB_ACCOUNT, NULL);
  3362. if (!kvm_vcpu_cache) {
  3363. r = -ENOMEM;
  3364. goto out_free_3;
  3365. }
  3366. r = kvm_async_pf_init();
  3367. if (r)
  3368. goto out_free;
  3369. kvm_chardev_ops.owner = module;
  3370. kvm_vm_fops.owner = module;
  3371. kvm_vcpu_fops.owner = module;
  3372. r = misc_register(&kvm_dev);
  3373. if (r) {
  3374. pr_err("kvm: misc device register failed\n");
  3375. goto out_unreg;
  3376. }
  3377. register_syscore_ops(&kvm_syscore_ops);
  3378. kvm_preempt_ops.sched_in = kvm_sched_in;
  3379. kvm_preempt_ops.sched_out = kvm_sched_out;
  3380. r = kvm_init_debug();
  3381. if (r) {
  3382. pr_err("kvm: create debugfs files failed\n");
  3383. goto out_undebugfs;
  3384. }
  3385. r = kvm_vfio_ops_init();
  3386. WARN_ON(r);
  3387. return 0;
  3388. out_undebugfs:
  3389. unregister_syscore_ops(&kvm_syscore_ops);
  3390. misc_deregister(&kvm_dev);
  3391. out_unreg:
  3392. kvm_async_pf_deinit();
  3393. out_free:
  3394. kmem_cache_destroy(kvm_vcpu_cache);
  3395. out_free_3:
  3396. unregister_reboot_notifier(&kvm_reboot_notifier);
  3397. cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
  3398. out_free_2:
  3399. out_free_1:
  3400. kvm_arch_hardware_unsetup();
  3401. out_free_0a:
  3402. free_cpumask_var(cpus_hardware_enabled);
  3403. out_free_0:
  3404. kvm_irqfd_exit();
  3405. out_irqfd:
  3406. kvm_arch_exit();
  3407. out_fail:
  3408. return r;
  3409. }
  3410. EXPORT_SYMBOL_GPL(kvm_init);
  3411. void kvm_exit(void)
  3412. {
  3413. debugfs_remove_recursive(kvm_debugfs_dir);
  3414. misc_deregister(&kvm_dev);
  3415. kmem_cache_destroy(kvm_vcpu_cache);
  3416. kvm_async_pf_deinit();
  3417. unregister_syscore_ops(&kvm_syscore_ops);
  3418. unregister_reboot_notifier(&kvm_reboot_notifier);
  3419. cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
  3420. on_each_cpu(hardware_disable_nolock, NULL, 1);
  3421. kvm_arch_hardware_unsetup();
  3422. kvm_arch_exit();
  3423. kvm_irqfd_exit();
  3424. free_cpumask_var(cpus_hardware_enabled);
  3425. kvm_vfio_ops_exit();
  3426. }
  3427. EXPORT_SYMBOL_GPL(kvm_exit);