memory.c 127 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733
  1. /*
  2. * linux/mm/memory.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * demand-loading started 01.12.91 - seems it is high on the list of
  8. * things wanted, and it should be easy to implement. - Linus
  9. */
  10. /*
  11. * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  12. * pages started 02.12.91, seems to work. - Linus.
  13. *
  14. * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  15. * would have taken more than the 6M I have free, but it worked well as
  16. * far as I could see.
  17. *
  18. * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  19. */
  20. /*
  21. * Real VM (paging to/from disk) started 18.12.91. Much more work and
  22. * thought has to go into this. Oh, well..
  23. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
  24. * Found it. Everything seems to work now.
  25. * 20.12.91 - Ok, making the swap-device changeable like the root.
  26. */
  27. /*
  28. * 05.04.94 - Multi-page memory management added for v1.1.
  29. * Idea by Alex Bligh (alex@cconcepts.co.uk)
  30. *
  31. * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
  32. * (Gerhard.Wichert@pdb.siemens.de)
  33. *
  34. * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  35. */
  36. #include <linux/kernel_stat.h>
  37. #include <linux/mm.h>
  38. #include <linux/sched/mm.h>
  39. #include <linux/sched/coredump.h>
  40. #include <linux/sched/numa_balancing.h>
  41. #include <linux/sched/task.h>
  42. #include <linux/hugetlb.h>
  43. #include <linux/mman.h>
  44. #include <linux/swap.h>
  45. #include <linux/highmem.h>
  46. #include <linux/pagemap.h>
  47. #include <linux/memremap.h>
  48. #include <linux/ksm.h>
  49. #include <linux/rmap.h>
  50. #include <linux/export.h>
  51. #include <linux/delayacct.h>
  52. #include <linux/init.h>
  53. #include <linux/pfn_t.h>
  54. #include <linux/writeback.h>
  55. #include <linux/memcontrol.h>
  56. #include <linux/mmu_notifier.h>
  57. #include <linux/swapops.h>
  58. #include <linux/elf.h>
  59. #include <linux/gfp.h>
  60. #include <linux/migrate.h>
  61. #include <linux/string.h>
  62. #include <linux/dma-debug.h>
  63. #include <linux/debugfs.h>
  64. #include <linux/userfaultfd_k.h>
  65. #include <linux/dax.h>
  66. #include <linux/oom.h>
  67. #include <asm/io.h>
  68. #include <asm/mmu_context.h>
  69. #include <asm/pgalloc.h>
  70. #include <linux/uaccess.h>
  71. #include <asm/tlb.h>
  72. #include <asm/tlbflush.h>
  73. #include <asm/pgtable.h>
  74. #include "internal.h"
  75. #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
  76. #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
  77. #endif
  78. #ifndef CONFIG_NEED_MULTIPLE_NODES
  79. /* use the per-pgdat data instead for discontigmem - mbligh */
  80. unsigned long max_mapnr;
  81. EXPORT_SYMBOL(max_mapnr);
  82. struct page *mem_map;
  83. EXPORT_SYMBOL(mem_map);
  84. #endif
  85. /*
  86. * A number of key systems in x86 including ioremap() rely on the assumption
  87. * that high_memory defines the upper bound on direct map memory, then end
  88. * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
  89. * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
  90. * and ZONE_HIGHMEM.
  91. */
  92. void *high_memory;
  93. EXPORT_SYMBOL(high_memory);
  94. /*
  95. * Randomize the address space (stacks, mmaps, brk, etc.).
  96. *
  97. * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
  98. * as ancient (libc5 based) binaries can segfault. )
  99. */
  100. int randomize_va_space __read_mostly =
  101. #ifdef CONFIG_COMPAT_BRK
  102. 1;
  103. #else
  104. 2;
  105. #endif
  106. static int __init disable_randmaps(char *s)
  107. {
  108. randomize_va_space = 0;
  109. return 1;
  110. }
  111. __setup("norandmaps", disable_randmaps);
  112. unsigned long zero_pfn __read_mostly;
  113. EXPORT_SYMBOL(zero_pfn);
  114. unsigned long highest_memmap_pfn __read_mostly;
  115. /*
  116. * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  117. */
  118. static int __init init_zero_pfn(void)
  119. {
  120. zero_pfn = page_to_pfn(ZERO_PAGE(0));
  121. return 0;
  122. }
  123. core_initcall(init_zero_pfn);
  124. #if defined(SPLIT_RSS_COUNTING)
  125. void sync_mm_rss(struct mm_struct *mm)
  126. {
  127. int i;
  128. for (i = 0; i < NR_MM_COUNTERS; i++) {
  129. if (current->rss_stat.count[i]) {
  130. add_mm_counter(mm, i, current->rss_stat.count[i]);
  131. current->rss_stat.count[i] = 0;
  132. }
  133. }
  134. current->rss_stat.events = 0;
  135. }
  136. static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  137. {
  138. struct task_struct *task = current;
  139. if (likely(task->mm == mm))
  140. task->rss_stat.count[member] += val;
  141. else
  142. add_mm_counter(mm, member, val);
  143. }
  144. #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  145. #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  146. /* sync counter once per 64 page faults */
  147. #define TASK_RSS_EVENTS_THRESH (64)
  148. static void check_sync_rss_stat(struct task_struct *task)
  149. {
  150. if (unlikely(task != current))
  151. return;
  152. if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
  153. sync_mm_rss(task->mm);
  154. }
  155. #else /* SPLIT_RSS_COUNTING */
  156. #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
  157. #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
  158. static void check_sync_rss_stat(struct task_struct *task)
  159. {
  160. }
  161. #endif /* SPLIT_RSS_COUNTING */
  162. #ifdef HAVE_GENERIC_MMU_GATHER
  163. static bool tlb_next_batch(struct mmu_gather *tlb)
  164. {
  165. struct mmu_gather_batch *batch;
  166. batch = tlb->active;
  167. if (batch->next) {
  168. tlb->active = batch->next;
  169. return true;
  170. }
  171. if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
  172. return false;
  173. batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  174. if (!batch)
  175. return false;
  176. tlb->batch_count++;
  177. batch->next = NULL;
  178. batch->nr = 0;
  179. batch->max = MAX_GATHER_BATCH;
  180. tlb->active->next = batch;
  181. tlb->active = batch;
  182. return true;
  183. }
  184. void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  185. unsigned long start, unsigned long end)
  186. {
  187. tlb->mm = mm;
  188. /* Is it from 0 to ~0? */
  189. tlb->fullmm = !(start | (end+1));
  190. tlb->need_flush_all = 0;
  191. tlb->local.next = NULL;
  192. tlb->local.nr = 0;
  193. tlb->local.max = ARRAY_SIZE(tlb->__pages);
  194. tlb->active = &tlb->local;
  195. tlb->batch_count = 0;
  196. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  197. tlb->batch = NULL;
  198. #endif
  199. tlb->page_size = 0;
  200. __tlb_reset_range(tlb);
  201. }
  202. static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  203. {
  204. if (!tlb->end)
  205. return;
  206. tlb_flush(tlb);
  207. mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
  208. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  209. tlb_table_flush(tlb);
  210. #endif
  211. __tlb_reset_range(tlb);
  212. }
  213. static void tlb_flush_mmu_free(struct mmu_gather *tlb)
  214. {
  215. struct mmu_gather_batch *batch;
  216. for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
  217. free_pages_and_swap_cache(batch->pages, batch->nr);
  218. batch->nr = 0;
  219. }
  220. tlb->active = &tlb->local;
  221. }
  222. void tlb_flush_mmu(struct mmu_gather *tlb)
  223. {
  224. tlb_flush_mmu_tlbonly(tlb);
  225. tlb_flush_mmu_free(tlb);
  226. }
  227. /* tlb_finish_mmu
  228. * Called at the end of the shootdown operation to free up any resources
  229. * that were required.
  230. */
  231. void arch_tlb_finish_mmu(struct mmu_gather *tlb,
  232. unsigned long start, unsigned long end, bool force)
  233. {
  234. struct mmu_gather_batch *batch, *next;
  235. if (force)
  236. __tlb_adjust_range(tlb, start, end - start);
  237. tlb_flush_mmu(tlb);
  238. /* keep the page table cache within bounds */
  239. check_pgt_cache();
  240. for (batch = tlb->local.next; batch; batch = next) {
  241. next = batch->next;
  242. free_pages((unsigned long)batch, 0);
  243. }
  244. tlb->local.next = NULL;
  245. }
  246. /* __tlb_remove_page
  247. * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
  248. * handling the additional races in SMP caused by other CPUs caching valid
  249. * mappings in their TLBs. Returns the number of free page slots left.
  250. * When out of page slots we must call tlb_flush_mmu().
  251. *returns true if the caller should flush.
  252. */
  253. bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
  254. {
  255. struct mmu_gather_batch *batch;
  256. VM_BUG_ON(!tlb->end);
  257. VM_WARN_ON(tlb->page_size != page_size);
  258. batch = tlb->active;
  259. /*
  260. * Add the page and check if we are full. If so
  261. * force a flush.
  262. */
  263. batch->pages[batch->nr++] = page;
  264. if (batch->nr == batch->max) {
  265. if (!tlb_next_batch(tlb))
  266. return true;
  267. batch = tlb->active;
  268. }
  269. VM_BUG_ON_PAGE(batch->nr > batch->max, page);
  270. return false;
  271. }
  272. #endif /* HAVE_GENERIC_MMU_GATHER */
  273. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  274. /*
  275. * See the comment near struct mmu_table_batch.
  276. */
  277. static void tlb_remove_table_smp_sync(void *arg)
  278. {
  279. /* Simply deliver the interrupt */
  280. }
  281. static void tlb_remove_table_one(void *table)
  282. {
  283. /*
  284. * This isn't an RCU grace period and hence the page-tables cannot be
  285. * assumed to be actually RCU-freed.
  286. *
  287. * It is however sufficient for software page-table walkers that rely on
  288. * IRQ disabling. See the comment near struct mmu_table_batch.
  289. */
  290. smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  291. __tlb_remove_table(table);
  292. }
  293. static void tlb_remove_table_rcu(struct rcu_head *head)
  294. {
  295. struct mmu_table_batch *batch;
  296. int i;
  297. batch = container_of(head, struct mmu_table_batch, rcu);
  298. for (i = 0; i < batch->nr; i++)
  299. __tlb_remove_table(batch->tables[i]);
  300. free_page((unsigned long)batch);
  301. }
  302. void tlb_table_flush(struct mmu_gather *tlb)
  303. {
  304. struct mmu_table_batch **batch = &tlb->batch;
  305. if (*batch) {
  306. call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  307. *batch = NULL;
  308. }
  309. }
  310. void tlb_remove_table(struct mmu_gather *tlb, void *table)
  311. {
  312. struct mmu_table_batch **batch = &tlb->batch;
  313. /*
  314. * When there's less then two users of this mm there cannot be a
  315. * concurrent page-table walk.
  316. */
  317. if (atomic_read(&tlb->mm->mm_users) < 2) {
  318. __tlb_remove_table(table);
  319. return;
  320. }
  321. if (*batch == NULL) {
  322. *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  323. if (*batch == NULL) {
  324. tlb_remove_table_one(table);
  325. return;
  326. }
  327. (*batch)->nr = 0;
  328. }
  329. (*batch)->tables[(*batch)->nr++] = table;
  330. if ((*batch)->nr == MAX_TABLE_BATCH)
  331. tlb_table_flush(tlb);
  332. }
  333. #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
  334. /**
  335. * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
  336. * @tlb: the mmu_gather structure to initialize
  337. * @mm: the mm_struct of the target address space
  338. * @start: start of the region that will be removed from the page-table
  339. * @end: end of the region that will be removed from the page-table
  340. *
  341. * Called to initialize an (on-stack) mmu_gather structure for page-table
  342. * tear-down from @mm. The @start and @end are set to 0 and -1
  343. * respectively when @mm is without users and we're going to destroy
  344. * the full address space (exit/execve).
  345. */
  346. void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  347. unsigned long start, unsigned long end)
  348. {
  349. arch_tlb_gather_mmu(tlb, mm, start, end);
  350. inc_tlb_flush_pending(tlb->mm);
  351. }
  352. void tlb_finish_mmu(struct mmu_gather *tlb,
  353. unsigned long start, unsigned long end)
  354. {
  355. /*
  356. * If there are parallel threads are doing PTE changes on same range
  357. * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
  358. * flush by batching, a thread has stable TLB entry can fail to flush
  359. * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
  360. * forcefully if we detect parallel PTE batching threads.
  361. */
  362. bool force = mm_tlb_flush_nested(tlb->mm);
  363. arch_tlb_finish_mmu(tlb, start, end, force);
  364. dec_tlb_flush_pending(tlb->mm);
  365. }
  366. /*
  367. * Note: this doesn't free the actual pages themselves. That
  368. * has been handled earlier when unmapping all the memory regions.
  369. */
  370. static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  371. unsigned long addr)
  372. {
  373. pgtable_t token = pmd_pgtable(*pmd);
  374. pmd_clear(pmd);
  375. pte_free_tlb(tlb, token, addr);
  376. mm_dec_nr_ptes(tlb->mm);
  377. }
  378. static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  379. unsigned long addr, unsigned long end,
  380. unsigned long floor, unsigned long ceiling)
  381. {
  382. pmd_t *pmd;
  383. unsigned long next;
  384. unsigned long start;
  385. start = addr;
  386. pmd = pmd_offset(pud, addr);
  387. do {
  388. next = pmd_addr_end(addr, end);
  389. if (pmd_none_or_clear_bad(pmd))
  390. continue;
  391. free_pte_range(tlb, pmd, addr);
  392. } while (pmd++, addr = next, addr != end);
  393. start &= PUD_MASK;
  394. if (start < floor)
  395. return;
  396. if (ceiling) {
  397. ceiling &= PUD_MASK;
  398. if (!ceiling)
  399. return;
  400. }
  401. if (end - 1 > ceiling - 1)
  402. return;
  403. pmd = pmd_offset(pud, start);
  404. pud_clear(pud);
  405. pmd_free_tlb(tlb, pmd, start);
  406. mm_dec_nr_pmds(tlb->mm);
  407. }
  408. static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
  409. unsigned long addr, unsigned long end,
  410. unsigned long floor, unsigned long ceiling)
  411. {
  412. pud_t *pud;
  413. unsigned long next;
  414. unsigned long start;
  415. start = addr;
  416. pud = pud_offset(p4d, addr);
  417. do {
  418. next = pud_addr_end(addr, end);
  419. if (pud_none_or_clear_bad(pud))
  420. continue;
  421. free_pmd_range(tlb, pud, addr, next, floor, ceiling);
  422. } while (pud++, addr = next, addr != end);
  423. start &= P4D_MASK;
  424. if (start < floor)
  425. return;
  426. if (ceiling) {
  427. ceiling &= P4D_MASK;
  428. if (!ceiling)
  429. return;
  430. }
  431. if (end - 1 > ceiling - 1)
  432. return;
  433. pud = pud_offset(p4d, start);
  434. p4d_clear(p4d);
  435. pud_free_tlb(tlb, pud, start);
  436. mm_dec_nr_puds(tlb->mm);
  437. }
  438. static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
  439. unsigned long addr, unsigned long end,
  440. unsigned long floor, unsigned long ceiling)
  441. {
  442. p4d_t *p4d;
  443. unsigned long next;
  444. unsigned long start;
  445. start = addr;
  446. p4d = p4d_offset(pgd, addr);
  447. do {
  448. next = p4d_addr_end(addr, end);
  449. if (p4d_none_or_clear_bad(p4d))
  450. continue;
  451. free_pud_range(tlb, p4d, addr, next, floor, ceiling);
  452. } while (p4d++, addr = next, addr != end);
  453. start &= PGDIR_MASK;
  454. if (start < floor)
  455. return;
  456. if (ceiling) {
  457. ceiling &= PGDIR_MASK;
  458. if (!ceiling)
  459. return;
  460. }
  461. if (end - 1 > ceiling - 1)
  462. return;
  463. p4d = p4d_offset(pgd, start);
  464. pgd_clear(pgd);
  465. p4d_free_tlb(tlb, p4d, start);
  466. }
  467. /*
  468. * This function frees user-level page tables of a process.
  469. */
  470. void free_pgd_range(struct mmu_gather *tlb,
  471. unsigned long addr, unsigned long end,
  472. unsigned long floor, unsigned long ceiling)
  473. {
  474. pgd_t *pgd;
  475. unsigned long next;
  476. /*
  477. * The next few lines have given us lots of grief...
  478. *
  479. * Why are we testing PMD* at this top level? Because often
  480. * there will be no work to do at all, and we'd prefer not to
  481. * go all the way down to the bottom just to discover that.
  482. *
  483. * Why all these "- 1"s? Because 0 represents both the bottom
  484. * of the address space and the top of it (using -1 for the
  485. * top wouldn't help much: the masks would do the wrong thing).
  486. * The rule is that addr 0 and floor 0 refer to the bottom of
  487. * the address space, but end 0 and ceiling 0 refer to the top
  488. * Comparisons need to use "end - 1" and "ceiling - 1" (though
  489. * that end 0 case should be mythical).
  490. *
  491. * Wherever addr is brought up or ceiling brought down, we must
  492. * be careful to reject "the opposite 0" before it confuses the
  493. * subsequent tests. But what about where end is brought down
  494. * by PMD_SIZE below? no, end can't go down to 0 there.
  495. *
  496. * Whereas we round start (addr) and ceiling down, by different
  497. * masks at different levels, in order to test whether a table
  498. * now has no other vmas using it, so can be freed, we don't
  499. * bother to round floor or end up - the tests don't need that.
  500. */
  501. addr &= PMD_MASK;
  502. if (addr < floor) {
  503. addr += PMD_SIZE;
  504. if (!addr)
  505. return;
  506. }
  507. if (ceiling) {
  508. ceiling &= PMD_MASK;
  509. if (!ceiling)
  510. return;
  511. }
  512. if (end - 1 > ceiling - 1)
  513. end -= PMD_SIZE;
  514. if (addr > end - 1)
  515. return;
  516. /*
  517. * We add page table cache pages with PAGE_SIZE,
  518. * (see pte_free_tlb()), flush the tlb if we need
  519. */
  520. tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
  521. pgd = pgd_offset(tlb->mm, addr);
  522. do {
  523. next = pgd_addr_end(addr, end);
  524. if (pgd_none_or_clear_bad(pgd))
  525. continue;
  526. free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
  527. } while (pgd++, addr = next, addr != end);
  528. }
  529. void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
  530. unsigned long floor, unsigned long ceiling)
  531. {
  532. while (vma) {
  533. struct vm_area_struct *next = vma->vm_next;
  534. unsigned long addr = vma->vm_start;
  535. /*
  536. * Hide vma from rmap and truncate_pagecache before freeing
  537. * pgtables
  538. */
  539. unlink_anon_vmas(vma);
  540. unlink_file_vma(vma);
  541. if (is_vm_hugetlb_page(vma)) {
  542. hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
  543. floor, next ? next->vm_start : ceiling);
  544. } else {
  545. /*
  546. * Optimization: gather nearby vmas into one call down
  547. */
  548. while (next && next->vm_start <= vma->vm_end + PMD_SIZE
  549. && !is_vm_hugetlb_page(next)) {
  550. vma = next;
  551. next = vma->vm_next;
  552. unlink_anon_vmas(vma);
  553. unlink_file_vma(vma);
  554. }
  555. free_pgd_range(tlb, addr, vma->vm_end,
  556. floor, next ? next->vm_start : ceiling);
  557. }
  558. vma = next;
  559. }
  560. }
  561. int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
  562. {
  563. spinlock_t *ptl;
  564. pgtable_t new = pte_alloc_one(mm, address);
  565. if (!new)
  566. return -ENOMEM;
  567. /*
  568. * Ensure all pte setup (eg. pte page lock and page clearing) are
  569. * visible before the pte is made visible to other CPUs by being
  570. * put into page tables.
  571. *
  572. * The other side of the story is the pointer chasing in the page
  573. * table walking code (when walking the page table without locking;
  574. * ie. most of the time). Fortunately, these data accesses consist
  575. * of a chain of data-dependent loads, meaning most CPUs (alpha
  576. * being the notable exception) will already guarantee loads are
  577. * seen in-order. See the alpha page table accessors for the
  578. * smp_read_barrier_depends() barriers in page table walking code.
  579. */
  580. smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
  581. ptl = pmd_lock(mm, pmd);
  582. if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
  583. mm_inc_nr_ptes(mm);
  584. pmd_populate(mm, pmd, new);
  585. new = NULL;
  586. }
  587. spin_unlock(ptl);
  588. if (new)
  589. pte_free(mm, new);
  590. return 0;
  591. }
  592. int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
  593. {
  594. pte_t *new = pte_alloc_one_kernel(&init_mm, address);
  595. if (!new)
  596. return -ENOMEM;
  597. smp_wmb(); /* See comment in __pte_alloc */
  598. spin_lock(&init_mm.page_table_lock);
  599. if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
  600. pmd_populate_kernel(&init_mm, pmd, new);
  601. new = NULL;
  602. }
  603. spin_unlock(&init_mm.page_table_lock);
  604. if (new)
  605. pte_free_kernel(&init_mm, new);
  606. return 0;
  607. }
  608. static inline void init_rss_vec(int *rss)
  609. {
  610. memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
  611. }
  612. static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
  613. {
  614. int i;
  615. if (current->mm == mm)
  616. sync_mm_rss(mm);
  617. for (i = 0; i < NR_MM_COUNTERS; i++)
  618. if (rss[i])
  619. add_mm_counter(mm, i, rss[i]);
  620. }
  621. /*
  622. * This function is called to print an error when a bad pte
  623. * is found. For example, we might have a PFN-mapped pte in
  624. * a region that doesn't allow it.
  625. *
  626. * The calling function must still handle the error.
  627. */
  628. static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  629. pte_t pte, struct page *page)
  630. {
  631. pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
  632. p4d_t *p4d = p4d_offset(pgd, addr);
  633. pud_t *pud = pud_offset(p4d, addr);
  634. pmd_t *pmd = pmd_offset(pud, addr);
  635. struct address_space *mapping;
  636. pgoff_t index;
  637. static unsigned long resume;
  638. static unsigned long nr_shown;
  639. static unsigned long nr_unshown;
  640. /*
  641. * Allow a burst of 60 reports, then keep quiet for that minute;
  642. * or allow a steady drip of one report per second.
  643. */
  644. if (nr_shown == 60) {
  645. if (time_before(jiffies, resume)) {
  646. nr_unshown++;
  647. return;
  648. }
  649. if (nr_unshown) {
  650. pr_alert("BUG: Bad page map: %lu messages suppressed\n",
  651. nr_unshown);
  652. nr_unshown = 0;
  653. }
  654. nr_shown = 0;
  655. }
  656. if (nr_shown++ == 0)
  657. resume = jiffies + 60 * HZ;
  658. mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
  659. index = linear_page_index(vma, addr);
  660. pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
  661. current->comm,
  662. (long long)pte_val(pte), (long long)pmd_val(*pmd));
  663. if (page)
  664. dump_page(page, "bad pte");
  665. pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
  666. (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
  667. pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
  668. vma->vm_file,
  669. vma->vm_ops ? vma->vm_ops->fault : NULL,
  670. vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
  671. mapping ? mapping->a_ops->readpage : NULL);
  672. dump_stack();
  673. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  674. }
  675. /*
  676. * vm_normal_page -- This function gets the "struct page" associated with a pte.
  677. *
  678. * "Special" mappings do not wish to be associated with a "struct page" (either
  679. * it doesn't exist, or it exists but they don't want to touch it). In this
  680. * case, NULL is returned here. "Normal" mappings do have a struct page.
  681. *
  682. * There are 2 broad cases. Firstly, an architecture may define a pte_special()
  683. * pte bit, in which case this function is trivial. Secondly, an architecture
  684. * may not have a spare pte bit, which requires a more complicated scheme,
  685. * described below.
  686. *
  687. * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
  688. * special mapping (even if there are underlying and valid "struct pages").
  689. * COWed pages of a VM_PFNMAP are always normal.
  690. *
  691. * The way we recognize COWed pages within VM_PFNMAP mappings is through the
  692. * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
  693. * set, and the vm_pgoff will point to the first PFN mapped: thus every special
  694. * mapping will always honor the rule
  695. *
  696. * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
  697. *
  698. * And for normal mappings this is false.
  699. *
  700. * This restricts such mappings to be a linear translation from virtual address
  701. * to pfn. To get around this restriction, we allow arbitrary mappings so long
  702. * as the vma is not a COW mapping; in that case, we know that all ptes are
  703. * special (because none can have been COWed).
  704. *
  705. *
  706. * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
  707. *
  708. * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
  709. * page" backing, however the difference is that _all_ pages with a struct
  710. * page (that is, those where pfn_valid is true) are refcounted and considered
  711. * normal pages by the VM. The disadvantage is that pages are refcounted
  712. * (which can be slower and simply not an option for some PFNMAP users). The
  713. * advantage is that we don't have to follow the strict linearity rule of
  714. * PFNMAP mappings in order to support COWable mappings.
  715. *
  716. */
  717. struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  718. pte_t pte, bool with_public_device)
  719. {
  720. unsigned long pfn = pte_pfn(pte);
  721. if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
  722. if (likely(!pte_special(pte)))
  723. goto check_pfn;
  724. if (vma->vm_ops && vma->vm_ops->find_special_page)
  725. return vma->vm_ops->find_special_page(vma, addr);
  726. if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
  727. return NULL;
  728. if (is_zero_pfn(pfn))
  729. return NULL;
  730. /*
  731. * Device public pages are special pages (they are ZONE_DEVICE
  732. * pages but different from persistent memory). They behave
  733. * allmost like normal pages. The difference is that they are
  734. * not on the lru and thus should never be involve with any-
  735. * thing that involve lru manipulation (mlock, numa balancing,
  736. * ...).
  737. *
  738. * This is why we still want to return NULL for such page from
  739. * vm_normal_page() so that we do not have to special case all
  740. * call site of vm_normal_page().
  741. */
  742. if (likely(pfn <= highest_memmap_pfn)) {
  743. struct page *page = pfn_to_page(pfn);
  744. if (is_device_public_page(page)) {
  745. if (with_public_device)
  746. return page;
  747. return NULL;
  748. }
  749. }
  750. print_bad_pte(vma, addr, pte, NULL);
  751. return NULL;
  752. }
  753. /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
  754. if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  755. if (vma->vm_flags & VM_MIXEDMAP) {
  756. if (!pfn_valid(pfn))
  757. return NULL;
  758. goto out;
  759. } else {
  760. unsigned long off;
  761. off = (addr - vma->vm_start) >> PAGE_SHIFT;
  762. if (pfn == vma->vm_pgoff + off)
  763. return NULL;
  764. if (!is_cow_mapping(vma->vm_flags))
  765. return NULL;
  766. }
  767. }
  768. if (is_zero_pfn(pfn))
  769. return NULL;
  770. check_pfn:
  771. if (unlikely(pfn > highest_memmap_pfn)) {
  772. print_bad_pte(vma, addr, pte, NULL);
  773. return NULL;
  774. }
  775. /*
  776. * NOTE! We still have PageReserved() pages in the page tables.
  777. * eg. VDSO mappings can cause them to exist.
  778. */
  779. out:
  780. return pfn_to_page(pfn);
  781. }
  782. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  783. struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
  784. pmd_t pmd)
  785. {
  786. unsigned long pfn = pmd_pfn(pmd);
  787. /*
  788. * There is no pmd_special() but there may be special pmds, e.g.
  789. * in a direct-access (dax) mapping, so let's just replicate the
  790. * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
  791. */
  792. if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  793. if (vma->vm_flags & VM_MIXEDMAP) {
  794. if (!pfn_valid(pfn))
  795. return NULL;
  796. goto out;
  797. } else {
  798. unsigned long off;
  799. off = (addr - vma->vm_start) >> PAGE_SHIFT;
  800. if (pfn == vma->vm_pgoff + off)
  801. return NULL;
  802. if (!is_cow_mapping(vma->vm_flags))
  803. return NULL;
  804. }
  805. }
  806. if (is_zero_pfn(pfn))
  807. return NULL;
  808. if (unlikely(pfn > highest_memmap_pfn))
  809. return NULL;
  810. /*
  811. * NOTE! We still have PageReserved() pages in the page tables.
  812. * eg. VDSO mappings can cause them to exist.
  813. */
  814. out:
  815. return pfn_to_page(pfn);
  816. }
  817. #endif
  818. /*
  819. * copy one vm_area from one task to the other. Assumes the page tables
  820. * already present in the new task to be cleared in the whole range
  821. * covered by this vma.
  822. */
  823. static inline unsigned long
  824. copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  825. pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
  826. unsigned long addr, int *rss)
  827. {
  828. unsigned long vm_flags = vma->vm_flags;
  829. pte_t pte = *src_pte;
  830. struct page *page;
  831. /* pte contains position in swap or file, so copy. */
  832. if (unlikely(!pte_present(pte))) {
  833. swp_entry_t entry = pte_to_swp_entry(pte);
  834. if (likely(!non_swap_entry(entry))) {
  835. if (swap_duplicate(entry) < 0)
  836. return entry.val;
  837. /* make sure dst_mm is on swapoff's mmlist. */
  838. if (unlikely(list_empty(&dst_mm->mmlist))) {
  839. spin_lock(&mmlist_lock);
  840. if (list_empty(&dst_mm->mmlist))
  841. list_add(&dst_mm->mmlist,
  842. &src_mm->mmlist);
  843. spin_unlock(&mmlist_lock);
  844. }
  845. rss[MM_SWAPENTS]++;
  846. } else if (is_migration_entry(entry)) {
  847. page = migration_entry_to_page(entry);
  848. rss[mm_counter(page)]++;
  849. if (is_write_migration_entry(entry) &&
  850. is_cow_mapping(vm_flags)) {
  851. /*
  852. * COW mappings require pages in both
  853. * parent and child to be set to read.
  854. */
  855. make_migration_entry_read(&entry);
  856. pte = swp_entry_to_pte(entry);
  857. if (pte_swp_soft_dirty(*src_pte))
  858. pte = pte_swp_mksoft_dirty(pte);
  859. set_pte_at(src_mm, addr, src_pte, pte);
  860. }
  861. } else if (is_device_private_entry(entry)) {
  862. page = device_private_entry_to_page(entry);
  863. /*
  864. * Update rss count even for unaddressable pages, as
  865. * they should treated just like normal pages in this
  866. * respect.
  867. *
  868. * We will likely want to have some new rss counters
  869. * for unaddressable pages, at some point. But for now
  870. * keep things as they are.
  871. */
  872. get_page(page);
  873. rss[mm_counter(page)]++;
  874. page_dup_rmap(page, false);
  875. /*
  876. * We do not preserve soft-dirty information, because so
  877. * far, checkpoint/restore is the only feature that
  878. * requires that. And checkpoint/restore does not work
  879. * when a device driver is involved (you cannot easily
  880. * save and restore device driver state).
  881. */
  882. if (is_write_device_private_entry(entry) &&
  883. is_cow_mapping(vm_flags)) {
  884. make_device_private_entry_read(&entry);
  885. pte = swp_entry_to_pte(entry);
  886. set_pte_at(src_mm, addr, src_pte, pte);
  887. }
  888. }
  889. goto out_set_pte;
  890. }
  891. /*
  892. * If it's a COW mapping, write protect it both
  893. * in the parent and the child
  894. */
  895. if (is_cow_mapping(vm_flags)) {
  896. ptep_set_wrprotect(src_mm, addr, src_pte);
  897. pte = pte_wrprotect(pte);
  898. }
  899. /*
  900. * If it's a shared mapping, mark it clean in
  901. * the child
  902. */
  903. if (vm_flags & VM_SHARED)
  904. pte = pte_mkclean(pte);
  905. pte = pte_mkold(pte);
  906. page = vm_normal_page(vma, addr, pte);
  907. if (page) {
  908. get_page(page);
  909. page_dup_rmap(page, false);
  910. rss[mm_counter(page)]++;
  911. } else if (pte_devmap(pte)) {
  912. page = pte_page(pte);
  913. /*
  914. * Cache coherent device memory behave like regular page and
  915. * not like persistent memory page. For more informations see
  916. * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
  917. */
  918. if (is_device_public_page(page)) {
  919. get_page(page);
  920. page_dup_rmap(page, false);
  921. rss[mm_counter(page)]++;
  922. }
  923. }
  924. out_set_pte:
  925. set_pte_at(dst_mm, addr, dst_pte, pte);
  926. return 0;
  927. }
  928. static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  929. pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
  930. unsigned long addr, unsigned long end)
  931. {
  932. pte_t *orig_src_pte, *orig_dst_pte;
  933. pte_t *src_pte, *dst_pte;
  934. spinlock_t *src_ptl, *dst_ptl;
  935. int progress = 0;
  936. int rss[NR_MM_COUNTERS];
  937. swp_entry_t entry = (swp_entry_t){0};
  938. again:
  939. init_rss_vec(rss);
  940. dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
  941. if (!dst_pte)
  942. return -ENOMEM;
  943. src_pte = pte_offset_map(src_pmd, addr);
  944. src_ptl = pte_lockptr(src_mm, src_pmd);
  945. spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
  946. orig_src_pte = src_pte;
  947. orig_dst_pte = dst_pte;
  948. arch_enter_lazy_mmu_mode();
  949. do {
  950. /*
  951. * We are holding two locks at this point - either of them
  952. * could generate latencies in another task on another CPU.
  953. */
  954. if (progress >= 32) {
  955. progress = 0;
  956. if (need_resched() ||
  957. spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
  958. break;
  959. }
  960. if (pte_none(*src_pte)) {
  961. progress++;
  962. continue;
  963. }
  964. entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
  965. vma, addr, rss);
  966. if (entry.val)
  967. break;
  968. progress += 8;
  969. } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
  970. arch_leave_lazy_mmu_mode();
  971. spin_unlock(src_ptl);
  972. pte_unmap(orig_src_pte);
  973. add_mm_rss_vec(dst_mm, rss);
  974. pte_unmap_unlock(orig_dst_pte, dst_ptl);
  975. cond_resched();
  976. if (entry.val) {
  977. if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
  978. return -ENOMEM;
  979. progress = 0;
  980. }
  981. if (addr != end)
  982. goto again;
  983. return 0;
  984. }
  985. static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  986. pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
  987. unsigned long addr, unsigned long end)
  988. {
  989. pmd_t *src_pmd, *dst_pmd;
  990. unsigned long next;
  991. dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
  992. if (!dst_pmd)
  993. return -ENOMEM;
  994. src_pmd = pmd_offset(src_pud, addr);
  995. do {
  996. next = pmd_addr_end(addr, end);
  997. if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
  998. || pmd_devmap(*src_pmd)) {
  999. int err;
  1000. VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
  1001. err = copy_huge_pmd(dst_mm, src_mm,
  1002. dst_pmd, src_pmd, addr, vma);
  1003. if (err == -ENOMEM)
  1004. return -ENOMEM;
  1005. if (!err)
  1006. continue;
  1007. /* fall through */
  1008. }
  1009. if (pmd_none_or_clear_bad(src_pmd))
  1010. continue;
  1011. if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
  1012. vma, addr, next))
  1013. return -ENOMEM;
  1014. } while (dst_pmd++, src_pmd++, addr = next, addr != end);
  1015. return 0;
  1016. }
  1017. static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  1018. p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
  1019. unsigned long addr, unsigned long end)
  1020. {
  1021. pud_t *src_pud, *dst_pud;
  1022. unsigned long next;
  1023. dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
  1024. if (!dst_pud)
  1025. return -ENOMEM;
  1026. src_pud = pud_offset(src_p4d, addr);
  1027. do {
  1028. next = pud_addr_end(addr, end);
  1029. if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
  1030. int err;
  1031. VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
  1032. err = copy_huge_pud(dst_mm, src_mm,
  1033. dst_pud, src_pud, addr, vma);
  1034. if (err == -ENOMEM)
  1035. return -ENOMEM;
  1036. if (!err)
  1037. continue;
  1038. /* fall through */
  1039. }
  1040. if (pud_none_or_clear_bad(src_pud))
  1041. continue;
  1042. if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
  1043. vma, addr, next))
  1044. return -ENOMEM;
  1045. } while (dst_pud++, src_pud++, addr = next, addr != end);
  1046. return 0;
  1047. }
  1048. static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  1049. pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
  1050. unsigned long addr, unsigned long end)
  1051. {
  1052. p4d_t *src_p4d, *dst_p4d;
  1053. unsigned long next;
  1054. dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
  1055. if (!dst_p4d)
  1056. return -ENOMEM;
  1057. src_p4d = p4d_offset(src_pgd, addr);
  1058. do {
  1059. next = p4d_addr_end(addr, end);
  1060. if (p4d_none_or_clear_bad(src_p4d))
  1061. continue;
  1062. if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
  1063. vma, addr, next))
  1064. return -ENOMEM;
  1065. } while (dst_p4d++, src_p4d++, addr = next, addr != end);
  1066. return 0;
  1067. }
  1068. int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  1069. struct vm_area_struct *vma)
  1070. {
  1071. pgd_t *src_pgd, *dst_pgd;
  1072. unsigned long next;
  1073. unsigned long addr = vma->vm_start;
  1074. unsigned long end = vma->vm_end;
  1075. unsigned long mmun_start; /* For mmu_notifiers */
  1076. unsigned long mmun_end; /* For mmu_notifiers */
  1077. bool is_cow;
  1078. int ret;
  1079. /*
  1080. * Don't copy ptes where a page fault will fill them correctly.
  1081. * Fork becomes much lighter when there are big shared or private
  1082. * readonly mappings. The tradeoff is that copy_page_range is more
  1083. * efficient than faulting.
  1084. */
  1085. if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
  1086. !vma->anon_vma)
  1087. return 0;
  1088. if (is_vm_hugetlb_page(vma))
  1089. return copy_hugetlb_page_range(dst_mm, src_mm, vma);
  1090. if (unlikely(vma->vm_flags & VM_PFNMAP)) {
  1091. /*
  1092. * We do not free on error cases below as remove_vma
  1093. * gets called on error from higher level routine
  1094. */
  1095. ret = track_pfn_copy(vma);
  1096. if (ret)
  1097. return ret;
  1098. }
  1099. /*
  1100. * We need to invalidate the secondary MMU mappings only when
  1101. * there could be a permission downgrade on the ptes of the
  1102. * parent mm. And a permission downgrade will only happen if
  1103. * is_cow_mapping() returns true.
  1104. */
  1105. is_cow = is_cow_mapping(vma->vm_flags);
  1106. mmun_start = addr;
  1107. mmun_end = end;
  1108. if (is_cow)
  1109. mmu_notifier_invalidate_range_start(src_mm, mmun_start,
  1110. mmun_end);
  1111. ret = 0;
  1112. dst_pgd = pgd_offset(dst_mm, addr);
  1113. src_pgd = pgd_offset(src_mm, addr);
  1114. do {
  1115. next = pgd_addr_end(addr, end);
  1116. if (pgd_none_or_clear_bad(src_pgd))
  1117. continue;
  1118. if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
  1119. vma, addr, next))) {
  1120. ret = -ENOMEM;
  1121. break;
  1122. }
  1123. } while (dst_pgd++, src_pgd++, addr = next, addr != end);
  1124. if (is_cow)
  1125. mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
  1126. return ret;
  1127. }
  1128. static unsigned long zap_pte_range(struct mmu_gather *tlb,
  1129. struct vm_area_struct *vma, pmd_t *pmd,
  1130. unsigned long addr, unsigned long end,
  1131. struct zap_details *details)
  1132. {
  1133. struct mm_struct *mm = tlb->mm;
  1134. int force_flush = 0;
  1135. int rss[NR_MM_COUNTERS];
  1136. spinlock_t *ptl;
  1137. pte_t *start_pte;
  1138. pte_t *pte;
  1139. swp_entry_t entry;
  1140. tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
  1141. again:
  1142. init_rss_vec(rss);
  1143. start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  1144. pte = start_pte;
  1145. flush_tlb_batched_pending(mm);
  1146. arch_enter_lazy_mmu_mode();
  1147. do {
  1148. pte_t ptent = *pte;
  1149. if (pte_none(ptent))
  1150. continue;
  1151. if (pte_present(ptent)) {
  1152. struct page *page;
  1153. page = _vm_normal_page(vma, addr, ptent, true);
  1154. if (unlikely(details) && page) {
  1155. /*
  1156. * unmap_shared_mapping_pages() wants to
  1157. * invalidate cache without truncating:
  1158. * unmap shared but keep private pages.
  1159. */
  1160. if (details->check_mapping &&
  1161. details->check_mapping != page_rmapping(page))
  1162. continue;
  1163. }
  1164. ptent = ptep_get_and_clear_full(mm, addr, pte,
  1165. tlb->fullmm);
  1166. tlb_remove_tlb_entry(tlb, pte, addr);
  1167. if (unlikely(!page))
  1168. continue;
  1169. if (!PageAnon(page)) {
  1170. if (pte_dirty(ptent)) {
  1171. force_flush = 1;
  1172. set_page_dirty(page);
  1173. }
  1174. if (pte_young(ptent) &&
  1175. likely(!(vma->vm_flags & VM_SEQ_READ)))
  1176. mark_page_accessed(page);
  1177. }
  1178. rss[mm_counter(page)]--;
  1179. page_remove_rmap(page, false);
  1180. if (unlikely(page_mapcount(page) < 0))
  1181. print_bad_pte(vma, addr, ptent, page);
  1182. if (unlikely(__tlb_remove_page(tlb, page))) {
  1183. force_flush = 1;
  1184. addr += PAGE_SIZE;
  1185. break;
  1186. }
  1187. continue;
  1188. }
  1189. entry = pte_to_swp_entry(ptent);
  1190. if (non_swap_entry(entry) && is_device_private_entry(entry)) {
  1191. struct page *page = device_private_entry_to_page(entry);
  1192. if (unlikely(details && details->check_mapping)) {
  1193. /*
  1194. * unmap_shared_mapping_pages() wants to
  1195. * invalidate cache without truncating:
  1196. * unmap shared but keep private pages.
  1197. */
  1198. if (details->check_mapping !=
  1199. page_rmapping(page))
  1200. continue;
  1201. }
  1202. pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
  1203. rss[mm_counter(page)]--;
  1204. page_remove_rmap(page, false);
  1205. put_page(page);
  1206. continue;
  1207. }
  1208. /* If details->check_mapping, we leave swap entries. */
  1209. if (unlikely(details))
  1210. continue;
  1211. entry = pte_to_swp_entry(ptent);
  1212. if (!non_swap_entry(entry))
  1213. rss[MM_SWAPENTS]--;
  1214. else if (is_migration_entry(entry)) {
  1215. struct page *page;
  1216. page = migration_entry_to_page(entry);
  1217. rss[mm_counter(page)]--;
  1218. }
  1219. if (unlikely(!free_swap_and_cache(entry)))
  1220. print_bad_pte(vma, addr, ptent, NULL);
  1221. pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
  1222. } while (pte++, addr += PAGE_SIZE, addr != end);
  1223. add_mm_rss_vec(mm, rss);
  1224. arch_leave_lazy_mmu_mode();
  1225. /* Do the actual TLB flush before dropping ptl */
  1226. if (force_flush)
  1227. tlb_flush_mmu_tlbonly(tlb);
  1228. pte_unmap_unlock(start_pte, ptl);
  1229. /*
  1230. * If we forced a TLB flush (either due to running out of
  1231. * batch buffers or because we needed to flush dirty TLB
  1232. * entries before releasing the ptl), free the batched
  1233. * memory too. Restart if we didn't do everything.
  1234. */
  1235. if (force_flush) {
  1236. force_flush = 0;
  1237. tlb_flush_mmu_free(tlb);
  1238. if (addr != end)
  1239. goto again;
  1240. }
  1241. return addr;
  1242. }
  1243. static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
  1244. struct vm_area_struct *vma, pud_t *pud,
  1245. unsigned long addr, unsigned long end,
  1246. struct zap_details *details)
  1247. {
  1248. pmd_t *pmd;
  1249. unsigned long next;
  1250. pmd = pmd_offset(pud, addr);
  1251. do {
  1252. next = pmd_addr_end(addr, end);
  1253. if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
  1254. if (next - addr != HPAGE_PMD_SIZE) {
  1255. VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
  1256. !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
  1257. __split_huge_pmd(vma, pmd, addr, false, NULL);
  1258. } else if (zap_huge_pmd(tlb, vma, pmd, addr))
  1259. goto next;
  1260. /* fall through */
  1261. }
  1262. /*
  1263. * Here there can be other concurrent MADV_DONTNEED or
  1264. * trans huge page faults running, and if the pmd is
  1265. * none or trans huge it can change under us. This is
  1266. * because MADV_DONTNEED holds the mmap_sem in read
  1267. * mode.
  1268. */
  1269. if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  1270. goto next;
  1271. next = zap_pte_range(tlb, vma, pmd, addr, next, details);
  1272. next:
  1273. cond_resched();
  1274. } while (pmd++, addr = next, addr != end);
  1275. return addr;
  1276. }
  1277. static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
  1278. struct vm_area_struct *vma, p4d_t *p4d,
  1279. unsigned long addr, unsigned long end,
  1280. struct zap_details *details)
  1281. {
  1282. pud_t *pud;
  1283. unsigned long next;
  1284. pud = pud_offset(p4d, addr);
  1285. do {
  1286. next = pud_addr_end(addr, end);
  1287. if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
  1288. if (next - addr != HPAGE_PUD_SIZE) {
  1289. VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
  1290. split_huge_pud(vma, pud, addr);
  1291. } else if (zap_huge_pud(tlb, vma, pud, addr))
  1292. goto next;
  1293. /* fall through */
  1294. }
  1295. if (pud_none_or_clear_bad(pud))
  1296. continue;
  1297. next = zap_pmd_range(tlb, vma, pud, addr, next, details);
  1298. next:
  1299. cond_resched();
  1300. } while (pud++, addr = next, addr != end);
  1301. return addr;
  1302. }
  1303. static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
  1304. struct vm_area_struct *vma, pgd_t *pgd,
  1305. unsigned long addr, unsigned long end,
  1306. struct zap_details *details)
  1307. {
  1308. p4d_t *p4d;
  1309. unsigned long next;
  1310. p4d = p4d_offset(pgd, addr);
  1311. do {
  1312. next = p4d_addr_end(addr, end);
  1313. if (p4d_none_or_clear_bad(p4d))
  1314. continue;
  1315. next = zap_pud_range(tlb, vma, p4d, addr, next, details);
  1316. } while (p4d++, addr = next, addr != end);
  1317. return addr;
  1318. }
  1319. void unmap_page_range(struct mmu_gather *tlb,
  1320. struct vm_area_struct *vma,
  1321. unsigned long addr, unsigned long end,
  1322. struct zap_details *details)
  1323. {
  1324. pgd_t *pgd;
  1325. unsigned long next;
  1326. BUG_ON(addr >= end);
  1327. tlb_start_vma(tlb, vma);
  1328. pgd = pgd_offset(vma->vm_mm, addr);
  1329. do {
  1330. next = pgd_addr_end(addr, end);
  1331. if (pgd_none_or_clear_bad(pgd))
  1332. continue;
  1333. next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
  1334. } while (pgd++, addr = next, addr != end);
  1335. tlb_end_vma(tlb, vma);
  1336. }
  1337. static void unmap_single_vma(struct mmu_gather *tlb,
  1338. struct vm_area_struct *vma, unsigned long start_addr,
  1339. unsigned long end_addr,
  1340. struct zap_details *details)
  1341. {
  1342. unsigned long start = max(vma->vm_start, start_addr);
  1343. unsigned long end;
  1344. if (start >= vma->vm_end)
  1345. return;
  1346. end = min(vma->vm_end, end_addr);
  1347. if (end <= vma->vm_start)
  1348. return;
  1349. if (vma->vm_file)
  1350. uprobe_munmap(vma, start, end);
  1351. if (unlikely(vma->vm_flags & VM_PFNMAP))
  1352. untrack_pfn(vma, 0, 0);
  1353. if (start != end) {
  1354. if (unlikely(is_vm_hugetlb_page(vma))) {
  1355. /*
  1356. * It is undesirable to test vma->vm_file as it
  1357. * should be non-null for valid hugetlb area.
  1358. * However, vm_file will be NULL in the error
  1359. * cleanup path of mmap_region. When
  1360. * hugetlbfs ->mmap method fails,
  1361. * mmap_region() nullifies vma->vm_file
  1362. * before calling this function to clean up.
  1363. * Since no pte has actually been setup, it is
  1364. * safe to do nothing in this case.
  1365. */
  1366. if (vma->vm_file) {
  1367. i_mmap_lock_write(vma->vm_file->f_mapping);
  1368. __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
  1369. i_mmap_unlock_write(vma->vm_file->f_mapping);
  1370. }
  1371. } else
  1372. unmap_page_range(tlb, vma, start, end, details);
  1373. }
  1374. }
  1375. /**
  1376. * unmap_vmas - unmap a range of memory covered by a list of vma's
  1377. * @tlb: address of the caller's struct mmu_gather
  1378. * @vma: the starting vma
  1379. * @start_addr: virtual address at which to start unmapping
  1380. * @end_addr: virtual address at which to end unmapping
  1381. *
  1382. * Unmap all pages in the vma list.
  1383. *
  1384. * Only addresses between `start' and `end' will be unmapped.
  1385. *
  1386. * The VMA list must be sorted in ascending virtual address order.
  1387. *
  1388. * unmap_vmas() assumes that the caller will flush the whole unmapped address
  1389. * range after unmap_vmas() returns. So the only responsibility here is to
  1390. * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
  1391. * drops the lock and schedules.
  1392. */
  1393. void unmap_vmas(struct mmu_gather *tlb,
  1394. struct vm_area_struct *vma, unsigned long start_addr,
  1395. unsigned long end_addr)
  1396. {
  1397. struct mm_struct *mm = vma->vm_mm;
  1398. mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
  1399. for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
  1400. unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
  1401. mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
  1402. }
  1403. /**
  1404. * zap_page_range - remove user pages in a given range
  1405. * @vma: vm_area_struct holding the applicable pages
  1406. * @start: starting address of pages to zap
  1407. * @size: number of bytes to zap
  1408. *
  1409. * Caller must protect the VMA list
  1410. */
  1411. void zap_page_range(struct vm_area_struct *vma, unsigned long start,
  1412. unsigned long size)
  1413. {
  1414. struct mm_struct *mm = vma->vm_mm;
  1415. struct mmu_gather tlb;
  1416. unsigned long end = start + size;
  1417. lru_add_drain();
  1418. tlb_gather_mmu(&tlb, mm, start, end);
  1419. update_hiwater_rss(mm);
  1420. mmu_notifier_invalidate_range_start(mm, start, end);
  1421. for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
  1422. unmap_single_vma(&tlb, vma, start, end, NULL);
  1423. /*
  1424. * zap_page_range does not specify whether mmap_sem should be
  1425. * held for read or write. That allows parallel zap_page_range
  1426. * operations to unmap a PTE and defer a flush meaning that
  1427. * this call observes pte_none and fails to flush the TLB.
  1428. * Rather than adding a complex API, ensure that no stale
  1429. * TLB entries exist when this call returns.
  1430. */
  1431. flush_tlb_range(vma, start, end);
  1432. }
  1433. mmu_notifier_invalidate_range_end(mm, start, end);
  1434. tlb_finish_mmu(&tlb, start, end);
  1435. }
  1436. /**
  1437. * zap_page_range_single - remove user pages in a given range
  1438. * @vma: vm_area_struct holding the applicable pages
  1439. * @address: starting address of pages to zap
  1440. * @size: number of bytes to zap
  1441. * @details: details of shared cache invalidation
  1442. *
  1443. * The range must fit into one VMA.
  1444. */
  1445. static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
  1446. unsigned long size, struct zap_details *details)
  1447. {
  1448. struct mm_struct *mm = vma->vm_mm;
  1449. struct mmu_gather tlb;
  1450. unsigned long end = address + size;
  1451. lru_add_drain();
  1452. tlb_gather_mmu(&tlb, mm, address, end);
  1453. update_hiwater_rss(mm);
  1454. mmu_notifier_invalidate_range_start(mm, address, end);
  1455. unmap_single_vma(&tlb, vma, address, end, details);
  1456. mmu_notifier_invalidate_range_end(mm, address, end);
  1457. tlb_finish_mmu(&tlb, address, end);
  1458. }
  1459. /**
  1460. * zap_vma_ptes - remove ptes mapping the vma
  1461. * @vma: vm_area_struct holding ptes to be zapped
  1462. * @address: starting address of pages to zap
  1463. * @size: number of bytes to zap
  1464. *
  1465. * This function only unmaps ptes assigned to VM_PFNMAP vmas.
  1466. *
  1467. * The entire address range must be fully contained within the vma.
  1468. *
  1469. * Returns 0 if successful.
  1470. */
  1471. int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
  1472. unsigned long size)
  1473. {
  1474. if (address < vma->vm_start || address + size > vma->vm_end ||
  1475. !(vma->vm_flags & VM_PFNMAP))
  1476. return -1;
  1477. zap_page_range_single(vma, address, size, NULL);
  1478. return 0;
  1479. }
  1480. EXPORT_SYMBOL_GPL(zap_vma_ptes);
  1481. pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
  1482. spinlock_t **ptl)
  1483. {
  1484. pgd_t *pgd;
  1485. p4d_t *p4d;
  1486. pud_t *pud;
  1487. pmd_t *pmd;
  1488. pgd = pgd_offset(mm, addr);
  1489. p4d = p4d_alloc(mm, pgd, addr);
  1490. if (!p4d)
  1491. return NULL;
  1492. pud = pud_alloc(mm, p4d, addr);
  1493. if (!pud)
  1494. return NULL;
  1495. pmd = pmd_alloc(mm, pud, addr);
  1496. if (!pmd)
  1497. return NULL;
  1498. VM_BUG_ON(pmd_trans_huge(*pmd));
  1499. return pte_alloc_map_lock(mm, pmd, addr, ptl);
  1500. }
  1501. /*
  1502. * This is the old fallback for page remapping.
  1503. *
  1504. * For historical reasons, it only allows reserved pages. Only
  1505. * old drivers should use this, and they needed to mark their
  1506. * pages reserved for the old functions anyway.
  1507. */
  1508. static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  1509. struct page *page, pgprot_t prot)
  1510. {
  1511. struct mm_struct *mm = vma->vm_mm;
  1512. int retval;
  1513. pte_t *pte;
  1514. spinlock_t *ptl;
  1515. retval = -EINVAL;
  1516. if (PageAnon(page))
  1517. goto out;
  1518. retval = -ENOMEM;
  1519. flush_dcache_page(page);
  1520. pte = get_locked_pte(mm, addr, &ptl);
  1521. if (!pte)
  1522. goto out;
  1523. retval = -EBUSY;
  1524. if (!pte_none(*pte))
  1525. goto out_unlock;
  1526. /* Ok, finally just insert the thing.. */
  1527. get_page(page);
  1528. inc_mm_counter_fast(mm, mm_counter_file(page));
  1529. page_add_file_rmap(page, false);
  1530. set_pte_at(mm, addr, pte, mk_pte(page, prot));
  1531. retval = 0;
  1532. pte_unmap_unlock(pte, ptl);
  1533. return retval;
  1534. out_unlock:
  1535. pte_unmap_unlock(pte, ptl);
  1536. out:
  1537. return retval;
  1538. }
  1539. /**
  1540. * vm_insert_page - insert single page into user vma
  1541. * @vma: user vma to map to
  1542. * @addr: target user address of this page
  1543. * @page: source kernel page
  1544. *
  1545. * This allows drivers to insert individual pages they've allocated
  1546. * into a user vma.
  1547. *
  1548. * The page has to be a nice clean _individual_ kernel allocation.
  1549. * If you allocate a compound page, you need to have marked it as
  1550. * such (__GFP_COMP), or manually just split the page up yourself
  1551. * (see split_page()).
  1552. *
  1553. * NOTE! Traditionally this was done with "remap_pfn_range()" which
  1554. * took an arbitrary page protection parameter. This doesn't allow
  1555. * that. Your vma protection will have to be set up correctly, which
  1556. * means that if you want a shared writable mapping, you'd better
  1557. * ask for a shared writable mapping!
  1558. *
  1559. * The page does not need to be reserved.
  1560. *
  1561. * Usually this function is called from f_op->mmap() handler
  1562. * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
  1563. * Caller must set VM_MIXEDMAP on vma if it wants to call this
  1564. * function from other places, for example from page-fault handler.
  1565. */
  1566. int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  1567. struct page *page)
  1568. {
  1569. if (addr < vma->vm_start || addr >= vma->vm_end)
  1570. return -EFAULT;
  1571. if (!page_count(page))
  1572. return -EINVAL;
  1573. if (!(vma->vm_flags & VM_MIXEDMAP)) {
  1574. BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
  1575. BUG_ON(vma->vm_flags & VM_PFNMAP);
  1576. vma->vm_flags |= VM_MIXEDMAP;
  1577. }
  1578. return insert_page(vma, addr, page, vma->vm_page_prot);
  1579. }
  1580. EXPORT_SYMBOL(vm_insert_page);
  1581. static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  1582. pfn_t pfn, pgprot_t prot, bool mkwrite)
  1583. {
  1584. struct mm_struct *mm = vma->vm_mm;
  1585. int retval;
  1586. pte_t *pte, entry;
  1587. spinlock_t *ptl;
  1588. retval = -ENOMEM;
  1589. pte = get_locked_pte(mm, addr, &ptl);
  1590. if (!pte)
  1591. goto out;
  1592. retval = -EBUSY;
  1593. if (!pte_none(*pte)) {
  1594. if (mkwrite) {
  1595. /*
  1596. * For read faults on private mappings the PFN passed
  1597. * in may not match the PFN we have mapped if the
  1598. * mapped PFN is a writeable COW page. In the mkwrite
  1599. * case we are creating a writable PTE for a shared
  1600. * mapping and we expect the PFNs to match.
  1601. */
  1602. if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
  1603. goto out_unlock;
  1604. entry = *pte;
  1605. goto out_mkwrite;
  1606. } else
  1607. goto out_unlock;
  1608. }
  1609. /* Ok, finally just insert the thing.. */
  1610. if (pfn_t_devmap(pfn))
  1611. entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
  1612. else
  1613. entry = pte_mkspecial(pfn_t_pte(pfn, prot));
  1614. out_mkwrite:
  1615. if (mkwrite) {
  1616. entry = pte_mkyoung(entry);
  1617. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1618. }
  1619. set_pte_at(mm, addr, pte, entry);
  1620. update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
  1621. retval = 0;
  1622. out_unlock:
  1623. pte_unmap_unlock(pte, ptl);
  1624. out:
  1625. return retval;
  1626. }
  1627. /**
  1628. * vm_insert_pfn - insert single pfn into user vma
  1629. * @vma: user vma to map to
  1630. * @addr: target user address of this page
  1631. * @pfn: source kernel pfn
  1632. *
  1633. * Similar to vm_insert_page, this allows drivers to insert individual pages
  1634. * they've allocated into a user vma. Same comments apply.
  1635. *
  1636. * This function should only be called from a vm_ops->fault handler, and
  1637. * in that case the handler should return NULL.
  1638. *
  1639. * vma cannot be a COW mapping.
  1640. *
  1641. * As this is called only for pages that do not currently exist, we
  1642. * do not need to flush old virtual caches or the TLB.
  1643. */
  1644. int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  1645. unsigned long pfn)
  1646. {
  1647. return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
  1648. }
  1649. EXPORT_SYMBOL(vm_insert_pfn);
  1650. /**
  1651. * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
  1652. * @vma: user vma to map to
  1653. * @addr: target user address of this page
  1654. * @pfn: source kernel pfn
  1655. * @pgprot: pgprot flags for the inserted page
  1656. *
  1657. * This is exactly like vm_insert_pfn, except that it allows drivers to
  1658. * to override pgprot on a per-page basis.
  1659. *
  1660. * This only makes sense for IO mappings, and it makes no sense for
  1661. * cow mappings. In general, using multiple vmas is preferable;
  1662. * vm_insert_pfn_prot should only be used if using multiple VMAs is
  1663. * impractical.
  1664. */
  1665. int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
  1666. unsigned long pfn, pgprot_t pgprot)
  1667. {
  1668. int ret;
  1669. /*
  1670. * Technically, architectures with pte_special can avoid all these
  1671. * restrictions (same for remap_pfn_range). However we would like
  1672. * consistency in testing and feature parity among all, so we should
  1673. * try to keep these invariants in place for everybody.
  1674. */
  1675. BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
  1676. BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
  1677. (VM_PFNMAP|VM_MIXEDMAP));
  1678. BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
  1679. BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
  1680. if (addr < vma->vm_start || addr >= vma->vm_end)
  1681. return -EFAULT;
  1682. track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
  1683. ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
  1684. false);
  1685. return ret;
  1686. }
  1687. EXPORT_SYMBOL(vm_insert_pfn_prot);
  1688. static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
  1689. {
  1690. /* these checks mirror the abort conditions in vm_normal_page */
  1691. if (vma->vm_flags & VM_MIXEDMAP)
  1692. return true;
  1693. if (pfn_t_devmap(pfn))
  1694. return true;
  1695. if (pfn_t_special(pfn))
  1696. return true;
  1697. if (is_zero_pfn(pfn_t_to_pfn(pfn)))
  1698. return true;
  1699. return false;
  1700. }
  1701. static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  1702. pfn_t pfn, bool mkwrite)
  1703. {
  1704. pgprot_t pgprot = vma->vm_page_prot;
  1705. BUG_ON(!vm_mixed_ok(vma, pfn));
  1706. if (addr < vma->vm_start || addr >= vma->vm_end)
  1707. return -EFAULT;
  1708. track_pfn_insert(vma, &pgprot, pfn);
  1709. /*
  1710. * If we don't have pte special, then we have to use the pfn_valid()
  1711. * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
  1712. * refcount the page if pfn_valid is true (hence insert_page rather
  1713. * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
  1714. * without pte special, it would there be refcounted as a normal page.
  1715. */
  1716. if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
  1717. !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
  1718. struct page *page;
  1719. /*
  1720. * At this point we are committed to insert_page()
  1721. * regardless of whether the caller specified flags that
  1722. * result in pfn_t_has_page() == false.
  1723. */
  1724. page = pfn_to_page(pfn_t_to_pfn(pfn));
  1725. return insert_page(vma, addr, page, pgprot);
  1726. }
  1727. return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
  1728. }
  1729. int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  1730. pfn_t pfn)
  1731. {
  1732. return __vm_insert_mixed(vma, addr, pfn, false);
  1733. }
  1734. EXPORT_SYMBOL(vm_insert_mixed);
  1735. /*
  1736. * If the insertion of PTE failed because someone else already added a
  1737. * different entry in the mean time, we treat that as success as we assume
  1738. * the same entry was actually inserted.
  1739. */
  1740. vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
  1741. unsigned long addr, pfn_t pfn)
  1742. {
  1743. int err;
  1744. err = __vm_insert_mixed(vma, addr, pfn, true);
  1745. if (err == -ENOMEM)
  1746. return VM_FAULT_OOM;
  1747. if (err < 0 && err != -EBUSY)
  1748. return VM_FAULT_SIGBUS;
  1749. return VM_FAULT_NOPAGE;
  1750. }
  1751. EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
  1752. /*
  1753. * maps a range of physical memory into the requested pages. the old
  1754. * mappings are removed. any references to nonexistent pages results
  1755. * in null mappings (currently treated as "copy-on-access")
  1756. */
  1757. static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  1758. unsigned long addr, unsigned long end,
  1759. unsigned long pfn, pgprot_t prot)
  1760. {
  1761. pte_t *pte;
  1762. spinlock_t *ptl;
  1763. pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
  1764. if (!pte)
  1765. return -ENOMEM;
  1766. arch_enter_lazy_mmu_mode();
  1767. do {
  1768. BUG_ON(!pte_none(*pte));
  1769. set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
  1770. pfn++;
  1771. } while (pte++, addr += PAGE_SIZE, addr != end);
  1772. arch_leave_lazy_mmu_mode();
  1773. pte_unmap_unlock(pte - 1, ptl);
  1774. return 0;
  1775. }
  1776. static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
  1777. unsigned long addr, unsigned long end,
  1778. unsigned long pfn, pgprot_t prot)
  1779. {
  1780. pmd_t *pmd;
  1781. unsigned long next;
  1782. pfn -= addr >> PAGE_SHIFT;
  1783. pmd = pmd_alloc(mm, pud, addr);
  1784. if (!pmd)
  1785. return -ENOMEM;
  1786. VM_BUG_ON(pmd_trans_huge(*pmd));
  1787. do {
  1788. next = pmd_addr_end(addr, end);
  1789. if (remap_pte_range(mm, pmd, addr, next,
  1790. pfn + (addr >> PAGE_SHIFT), prot))
  1791. return -ENOMEM;
  1792. } while (pmd++, addr = next, addr != end);
  1793. return 0;
  1794. }
  1795. static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
  1796. unsigned long addr, unsigned long end,
  1797. unsigned long pfn, pgprot_t prot)
  1798. {
  1799. pud_t *pud;
  1800. unsigned long next;
  1801. pfn -= addr >> PAGE_SHIFT;
  1802. pud = pud_alloc(mm, p4d, addr);
  1803. if (!pud)
  1804. return -ENOMEM;
  1805. do {
  1806. next = pud_addr_end(addr, end);
  1807. if (remap_pmd_range(mm, pud, addr, next,
  1808. pfn + (addr >> PAGE_SHIFT), prot))
  1809. return -ENOMEM;
  1810. } while (pud++, addr = next, addr != end);
  1811. return 0;
  1812. }
  1813. static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
  1814. unsigned long addr, unsigned long end,
  1815. unsigned long pfn, pgprot_t prot)
  1816. {
  1817. p4d_t *p4d;
  1818. unsigned long next;
  1819. pfn -= addr >> PAGE_SHIFT;
  1820. p4d = p4d_alloc(mm, pgd, addr);
  1821. if (!p4d)
  1822. return -ENOMEM;
  1823. do {
  1824. next = p4d_addr_end(addr, end);
  1825. if (remap_pud_range(mm, p4d, addr, next,
  1826. pfn + (addr >> PAGE_SHIFT), prot))
  1827. return -ENOMEM;
  1828. } while (p4d++, addr = next, addr != end);
  1829. return 0;
  1830. }
  1831. /**
  1832. * remap_pfn_range - remap kernel memory to userspace
  1833. * @vma: user vma to map to
  1834. * @addr: target user address to start at
  1835. * @pfn: physical address of kernel memory
  1836. * @size: size of map area
  1837. * @prot: page protection flags for this mapping
  1838. *
  1839. * Note: this is only safe if the mm semaphore is held when called.
  1840. */
  1841. int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
  1842. unsigned long pfn, unsigned long size, pgprot_t prot)
  1843. {
  1844. pgd_t *pgd;
  1845. unsigned long next;
  1846. unsigned long end = addr + PAGE_ALIGN(size);
  1847. struct mm_struct *mm = vma->vm_mm;
  1848. unsigned long remap_pfn = pfn;
  1849. int err;
  1850. /*
  1851. * Physically remapped pages are special. Tell the
  1852. * rest of the world about it:
  1853. * VM_IO tells people not to look at these pages
  1854. * (accesses can have side effects).
  1855. * VM_PFNMAP tells the core MM that the base pages are just
  1856. * raw PFN mappings, and do not have a "struct page" associated
  1857. * with them.
  1858. * VM_DONTEXPAND
  1859. * Disable vma merging and expanding with mremap().
  1860. * VM_DONTDUMP
  1861. * Omit vma from core dump, even when VM_IO turned off.
  1862. *
  1863. * There's a horrible special case to handle copy-on-write
  1864. * behaviour that some programs depend on. We mark the "original"
  1865. * un-COW'ed pages by matching them up with "vma->vm_pgoff".
  1866. * See vm_normal_page() for details.
  1867. */
  1868. if (is_cow_mapping(vma->vm_flags)) {
  1869. if (addr != vma->vm_start || end != vma->vm_end)
  1870. return -EINVAL;
  1871. vma->vm_pgoff = pfn;
  1872. }
  1873. err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
  1874. if (err)
  1875. return -EINVAL;
  1876. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
  1877. BUG_ON(addr >= end);
  1878. pfn -= addr >> PAGE_SHIFT;
  1879. pgd = pgd_offset(mm, addr);
  1880. flush_cache_range(vma, addr, end);
  1881. do {
  1882. next = pgd_addr_end(addr, end);
  1883. err = remap_p4d_range(mm, pgd, addr, next,
  1884. pfn + (addr >> PAGE_SHIFT), prot);
  1885. if (err)
  1886. break;
  1887. } while (pgd++, addr = next, addr != end);
  1888. if (err)
  1889. untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
  1890. return err;
  1891. }
  1892. EXPORT_SYMBOL(remap_pfn_range);
  1893. /**
  1894. * vm_iomap_memory - remap memory to userspace
  1895. * @vma: user vma to map to
  1896. * @start: start of area
  1897. * @len: size of area
  1898. *
  1899. * This is a simplified io_remap_pfn_range() for common driver use. The
  1900. * driver just needs to give us the physical memory range to be mapped,
  1901. * we'll figure out the rest from the vma information.
  1902. *
  1903. * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
  1904. * whatever write-combining details or similar.
  1905. */
  1906. int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
  1907. {
  1908. unsigned long vm_len, pfn, pages;
  1909. /* Check that the physical memory area passed in looks valid */
  1910. if (start + len < start)
  1911. return -EINVAL;
  1912. /*
  1913. * You *really* shouldn't map things that aren't page-aligned,
  1914. * but we've historically allowed it because IO memory might
  1915. * just have smaller alignment.
  1916. */
  1917. len += start & ~PAGE_MASK;
  1918. pfn = start >> PAGE_SHIFT;
  1919. pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
  1920. if (pfn + pages < pfn)
  1921. return -EINVAL;
  1922. /* We start the mapping 'vm_pgoff' pages into the area */
  1923. if (vma->vm_pgoff > pages)
  1924. return -EINVAL;
  1925. pfn += vma->vm_pgoff;
  1926. pages -= vma->vm_pgoff;
  1927. /* Can we fit all of the mapping? */
  1928. vm_len = vma->vm_end - vma->vm_start;
  1929. if (vm_len >> PAGE_SHIFT > pages)
  1930. return -EINVAL;
  1931. /* Ok, let it rip */
  1932. return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
  1933. }
  1934. EXPORT_SYMBOL(vm_iomap_memory);
  1935. static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
  1936. unsigned long addr, unsigned long end,
  1937. pte_fn_t fn, void *data)
  1938. {
  1939. pte_t *pte;
  1940. int err;
  1941. pgtable_t token;
  1942. spinlock_t *uninitialized_var(ptl);
  1943. pte = (mm == &init_mm) ?
  1944. pte_alloc_kernel(pmd, addr) :
  1945. pte_alloc_map_lock(mm, pmd, addr, &ptl);
  1946. if (!pte)
  1947. return -ENOMEM;
  1948. BUG_ON(pmd_huge(*pmd));
  1949. arch_enter_lazy_mmu_mode();
  1950. token = pmd_pgtable(*pmd);
  1951. do {
  1952. err = fn(pte++, token, addr, data);
  1953. if (err)
  1954. break;
  1955. } while (addr += PAGE_SIZE, addr != end);
  1956. arch_leave_lazy_mmu_mode();
  1957. if (mm != &init_mm)
  1958. pte_unmap_unlock(pte-1, ptl);
  1959. return err;
  1960. }
  1961. static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  1962. unsigned long addr, unsigned long end,
  1963. pte_fn_t fn, void *data)
  1964. {
  1965. pmd_t *pmd;
  1966. unsigned long next;
  1967. int err;
  1968. BUG_ON(pud_huge(*pud));
  1969. pmd = pmd_alloc(mm, pud, addr);
  1970. if (!pmd)
  1971. return -ENOMEM;
  1972. do {
  1973. next = pmd_addr_end(addr, end);
  1974. err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
  1975. if (err)
  1976. break;
  1977. } while (pmd++, addr = next, addr != end);
  1978. return err;
  1979. }
  1980. static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
  1981. unsigned long addr, unsigned long end,
  1982. pte_fn_t fn, void *data)
  1983. {
  1984. pud_t *pud;
  1985. unsigned long next;
  1986. int err;
  1987. pud = pud_alloc(mm, p4d, addr);
  1988. if (!pud)
  1989. return -ENOMEM;
  1990. do {
  1991. next = pud_addr_end(addr, end);
  1992. err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
  1993. if (err)
  1994. break;
  1995. } while (pud++, addr = next, addr != end);
  1996. return err;
  1997. }
  1998. static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
  1999. unsigned long addr, unsigned long end,
  2000. pte_fn_t fn, void *data)
  2001. {
  2002. p4d_t *p4d;
  2003. unsigned long next;
  2004. int err;
  2005. p4d = p4d_alloc(mm, pgd, addr);
  2006. if (!p4d)
  2007. return -ENOMEM;
  2008. do {
  2009. next = p4d_addr_end(addr, end);
  2010. err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
  2011. if (err)
  2012. break;
  2013. } while (p4d++, addr = next, addr != end);
  2014. return err;
  2015. }
  2016. /*
  2017. * Scan a region of virtual memory, filling in page tables as necessary
  2018. * and calling a provided function on each leaf page table.
  2019. */
  2020. int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
  2021. unsigned long size, pte_fn_t fn, void *data)
  2022. {
  2023. pgd_t *pgd;
  2024. unsigned long next;
  2025. unsigned long end = addr + size;
  2026. int err;
  2027. if (WARN_ON(addr >= end))
  2028. return -EINVAL;
  2029. pgd = pgd_offset(mm, addr);
  2030. do {
  2031. next = pgd_addr_end(addr, end);
  2032. err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
  2033. if (err)
  2034. break;
  2035. } while (pgd++, addr = next, addr != end);
  2036. return err;
  2037. }
  2038. EXPORT_SYMBOL_GPL(apply_to_page_range);
  2039. /*
  2040. * handle_pte_fault chooses page fault handler according to an entry which was
  2041. * read non-atomically. Before making any commitment, on those architectures
  2042. * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
  2043. * parts, do_swap_page must check under lock before unmapping the pte and
  2044. * proceeding (but do_wp_page is only called after already making such a check;
  2045. * and do_anonymous_page can safely check later on).
  2046. */
  2047. static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
  2048. pte_t *page_table, pte_t orig_pte)
  2049. {
  2050. int same = 1;
  2051. #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  2052. if (sizeof(pte_t) > sizeof(unsigned long)) {
  2053. spinlock_t *ptl = pte_lockptr(mm, pmd);
  2054. spin_lock(ptl);
  2055. same = pte_same(*page_table, orig_pte);
  2056. spin_unlock(ptl);
  2057. }
  2058. #endif
  2059. pte_unmap(page_table);
  2060. return same;
  2061. }
  2062. static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
  2063. {
  2064. debug_dma_assert_idle(src);
  2065. /*
  2066. * If the source page was a PFN mapping, we don't have
  2067. * a "struct page" for it. We do a best-effort copy by
  2068. * just copying from the original user address. If that
  2069. * fails, we just zero-fill it. Live with it.
  2070. */
  2071. if (unlikely(!src)) {
  2072. void *kaddr = kmap_atomic(dst);
  2073. void __user *uaddr = (void __user *)(va & PAGE_MASK);
  2074. /*
  2075. * This really shouldn't fail, because the page is there
  2076. * in the page tables. But it might just be unreadable,
  2077. * in which case we just give up and fill the result with
  2078. * zeroes.
  2079. */
  2080. if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
  2081. clear_page(kaddr);
  2082. kunmap_atomic(kaddr);
  2083. flush_dcache_page(dst);
  2084. } else
  2085. copy_user_highpage(dst, src, va, vma);
  2086. }
  2087. static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
  2088. {
  2089. struct file *vm_file = vma->vm_file;
  2090. if (vm_file)
  2091. return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
  2092. /*
  2093. * Special mappings (e.g. VDSO) do not have any file so fake
  2094. * a default GFP_KERNEL for them.
  2095. */
  2096. return GFP_KERNEL;
  2097. }
  2098. /*
  2099. * Notify the address space that the page is about to become writable so that
  2100. * it can prohibit this or wait for the page to get into an appropriate state.
  2101. *
  2102. * We do this without the lock held, so that it can sleep if it needs to.
  2103. */
  2104. static int do_page_mkwrite(struct vm_fault *vmf)
  2105. {
  2106. int ret;
  2107. struct page *page = vmf->page;
  2108. unsigned int old_flags = vmf->flags;
  2109. vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
  2110. ret = vmf->vma->vm_ops->page_mkwrite(vmf);
  2111. /* Restore original flags so that caller is not surprised */
  2112. vmf->flags = old_flags;
  2113. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
  2114. return ret;
  2115. if (unlikely(!(ret & VM_FAULT_LOCKED))) {
  2116. lock_page(page);
  2117. if (!page->mapping) {
  2118. unlock_page(page);
  2119. return 0; /* retry */
  2120. }
  2121. ret |= VM_FAULT_LOCKED;
  2122. } else
  2123. VM_BUG_ON_PAGE(!PageLocked(page), page);
  2124. return ret;
  2125. }
  2126. /*
  2127. * Handle dirtying of a page in shared file mapping on a write fault.
  2128. *
  2129. * The function expects the page to be locked and unlocks it.
  2130. */
  2131. static void fault_dirty_shared_page(struct vm_area_struct *vma,
  2132. struct page *page)
  2133. {
  2134. struct address_space *mapping;
  2135. bool dirtied;
  2136. bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
  2137. dirtied = set_page_dirty(page);
  2138. VM_BUG_ON_PAGE(PageAnon(page), page);
  2139. /*
  2140. * Take a local copy of the address_space - page.mapping may be zeroed
  2141. * by truncate after unlock_page(). The address_space itself remains
  2142. * pinned by vma->vm_file's reference. We rely on unlock_page()'s
  2143. * release semantics to prevent the compiler from undoing this copying.
  2144. */
  2145. mapping = page_rmapping(page);
  2146. unlock_page(page);
  2147. if ((dirtied || page_mkwrite) && mapping) {
  2148. /*
  2149. * Some device drivers do not set page.mapping
  2150. * but still dirty their pages
  2151. */
  2152. balance_dirty_pages_ratelimited(mapping);
  2153. }
  2154. if (!page_mkwrite)
  2155. file_update_time(vma->vm_file);
  2156. }
  2157. /*
  2158. * Handle write page faults for pages that can be reused in the current vma
  2159. *
  2160. * This can happen either due to the mapping being with the VM_SHARED flag,
  2161. * or due to us being the last reference standing to the page. In either
  2162. * case, all we need to do here is to mark the page as writable and update
  2163. * any related book-keeping.
  2164. */
  2165. static inline void wp_page_reuse(struct vm_fault *vmf)
  2166. __releases(vmf->ptl)
  2167. {
  2168. struct vm_area_struct *vma = vmf->vma;
  2169. struct page *page = vmf->page;
  2170. pte_t entry;
  2171. /*
  2172. * Clear the pages cpupid information as the existing
  2173. * information potentially belongs to a now completely
  2174. * unrelated process.
  2175. */
  2176. if (page)
  2177. page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
  2178. flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
  2179. entry = pte_mkyoung(vmf->orig_pte);
  2180. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  2181. if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
  2182. update_mmu_cache(vma, vmf->address, vmf->pte);
  2183. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2184. }
  2185. /*
  2186. * Handle the case of a page which we actually need to copy to a new page.
  2187. *
  2188. * Called with mmap_sem locked and the old page referenced, but
  2189. * without the ptl held.
  2190. *
  2191. * High level logic flow:
  2192. *
  2193. * - Allocate a page, copy the content of the old page to the new one.
  2194. * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
  2195. * - Take the PTL. If the pte changed, bail out and release the allocated page
  2196. * - If the pte is still the way we remember it, update the page table and all
  2197. * relevant references. This includes dropping the reference the page-table
  2198. * held to the old page, as well as updating the rmap.
  2199. * - In any case, unlock the PTL and drop the reference we took to the old page.
  2200. */
  2201. static int wp_page_copy(struct vm_fault *vmf)
  2202. {
  2203. struct vm_area_struct *vma = vmf->vma;
  2204. struct mm_struct *mm = vma->vm_mm;
  2205. struct page *old_page = vmf->page;
  2206. struct page *new_page = NULL;
  2207. pte_t entry;
  2208. int page_copied = 0;
  2209. const unsigned long mmun_start = vmf->address & PAGE_MASK;
  2210. const unsigned long mmun_end = mmun_start + PAGE_SIZE;
  2211. struct mem_cgroup *memcg;
  2212. if (unlikely(anon_vma_prepare(vma)))
  2213. goto oom;
  2214. if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
  2215. new_page = alloc_zeroed_user_highpage_movable(vma,
  2216. vmf->address);
  2217. if (!new_page)
  2218. goto oom;
  2219. } else {
  2220. new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
  2221. vmf->address);
  2222. if (!new_page)
  2223. goto oom;
  2224. cow_user_page(new_page, old_page, vmf->address, vma);
  2225. }
  2226. if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
  2227. goto oom_free_new;
  2228. __SetPageUptodate(new_page);
  2229. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2230. /*
  2231. * Re-check the pte - we dropped the lock
  2232. */
  2233. vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
  2234. if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
  2235. if (old_page) {
  2236. if (!PageAnon(old_page)) {
  2237. dec_mm_counter_fast(mm,
  2238. mm_counter_file(old_page));
  2239. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2240. }
  2241. } else {
  2242. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2243. }
  2244. flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
  2245. entry = mk_pte(new_page, vma->vm_page_prot);
  2246. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  2247. /*
  2248. * Clear the pte entry and flush it first, before updating the
  2249. * pte with the new entry. This will avoid a race condition
  2250. * seen in the presence of one thread doing SMC and another
  2251. * thread doing COW.
  2252. */
  2253. ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
  2254. page_add_new_anon_rmap(new_page, vma, vmf->address, false);
  2255. mem_cgroup_commit_charge(new_page, memcg, false, false);
  2256. lru_cache_add_active_or_unevictable(new_page, vma);
  2257. /*
  2258. * We call the notify macro here because, when using secondary
  2259. * mmu page tables (such as kvm shadow page tables), we want the
  2260. * new page to be mapped directly into the secondary page table.
  2261. */
  2262. set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
  2263. update_mmu_cache(vma, vmf->address, vmf->pte);
  2264. if (old_page) {
  2265. /*
  2266. * Only after switching the pte to the new page may
  2267. * we remove the mapcount here. Otherwise another
  2268. * process may come and find the rmap count decremented
  2269. * before the pte is switched to the new page, and
  2270. * "reuse" the old page writing into it while our pte
  2271. * here still points into it and can be read by other
  2272. * threads.
  2273. *
  2274. * The critical issue is to order this
  2275. * page_remove_rmap with the ptp_clear_flush above.
  2276. * Those stores are ordered by (if nothing else,)
  2277. * the barrier present in the atomic_add_negative
  2278. * in page_remove_rmap.
  2279. *
  2280. * Then the TLB flush in ptep_clear_flush ensures that
  2281. * no process can access the old page before the
  2282. * decremented mapcount is visible. And the old page
  2283. * cannot be reused until after the decremented
  2284. * mapcount is visible. So transitively, TLBs to
  2285. * old page will be flushed before it can be reused.
  2286. */
  2287. page_remove_rmap(old_page, false);
  2288. }
  2289. /* Free the old page.. */
  2290. new_page = old_page;
  2291. page_copied = 1;
  2292. } else {
  2293. mem_cgroup_cancel_charge(new_page, memcg, false);
  2294. }
  2295. if (new_page)
  2296. put_page(new_page);
  2297. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2298. /*
  2299. * No need to double call mmu_notifier->invalidate_range() callback as
  2300. * the above ptep_clear_flush_notify() did already call it.
  2301. */
  2302. mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
  2303. if (old_page) {
  2304. /*
  2305. * Don't let another task, with possibly unlocked vma,
  2306. * keep the mlocked page.
  2307. */
  2308. if (page_copied && (vma->vm_flags & VM_LOCKED)) {
  2309. lock_page(old_page); /* LRU manipulation */
  2310. if (PageMlocked(old_page))
  2311. munlock_vma_page(old_page);
  2312. unlock_page(old_page);
  2313. }
  2314. put_page(old_page);
  2315. }
  2316. return page_copied ? VM_FAULT_WRITE : 0;
  2317. oom_free_new:
  2318. put_page(new_page);
  2319. oom:
  2320. if (old_page)
  2321. put_page(old_page);
  2322. return VM_FAULT_OOM;
  2323. }
  2324. /**
  2325. * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
  2326. * writeable once the page is prepared
  2327. *
  2328. * @vmf: structure describing the fault
  2329. *
  2330. * This function handles all that is needed to finish a write page fault in a
  2331. * shared mapping due to PTE being read-only once the mapped page is prepared.
  2332. * It handles locking of PTE and modifying it. The function returns
  2333. * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
  2334. * lock.
  2335. *
  2336. * The function expects the page to be locked or other protection against
  2337. * concurrent faults / writeback (such as DAX radix tree locks).
  2338. */
  2339. int finish_mkwrite_fault(struct vm_fault *vmf)
  2340. {
  2341. WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
  2342. vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
  2343. &vmf->ptl);
  2344. /*
  2345. * We might have raced with another page fault while we released the
  2346. * pte_offset_map_lock.
  2347. */
  2348. if (!pte_same(*vmf->pte, vmf->orig_pte)) {
  2349. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2350. return VM_FAULT_NOPAGE;
  2351. }
  2352. wp_page_reuse(vmf);
  2353. return 0;
  2354. }
  2355. /*
  2356. * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
  2357. * mapping
  2358. */
  2359. static int wp_pfn_shared(struct vm_fault *vmf)
  2360. {
  2361. struct vm_area_struct *vma = vmf->vma;
  2362. if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
  2363. int ret;
  2364. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2365. vmf->flags |= FAULT_FLAG_MKWRITE;
  2366. ret = vma->vm_ops->pfn_mkwrite(vmf);
  2367. if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
  2368. return ret;
  2369. return finish_mkwrite_fault(vmf);
  2370. }
  2371. wp_page_reuse(vmf);
  2372. return VM_FAULT_WRITE;
  2373. }
  2374. static int wp_page_shared(struct vm_fault *vmf)
  2375. __releases(vmf->ptl)
  2376. {
  2377. struct vm_area_struct *vma = vmf->vma;
  2378. get_page(vmf->page);
  2379. if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
  2380. int tmp;
  2381. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2382. tmp = do_page_mkwrite(vmf);
  2383. if (unlikely(!tmp || (tmp &
  2384. (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
  2385. put_page(vmf->page);
  2386. return tmp;
  2387. }
  2388. tmp = finish_mkwrite_fault(vmf);
  2389. if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  2390. unlock_page(vmf->page);
  2391. put_page(vmf->page);
  2392. return tmp;
  2393. }
  2394. } else {
  2395. wp_page_reuse(vmf);
  2396. lock_page(vmf->page);
  2397. }
  2398. fault_dirty_shared_page(vma, vmf->page);
  2399. put_page(vmf->page);
  2400. return VM_FAULT_WRITE;
  2401. }
  2402. /*
  2403. * This routine handles present pages, when users try to write
  2404. * to a shared page. It is done by copying the page to a new address
  2405. * and decrementing the shared-page counter for the old page.
  2406. *
  2407. * Note that this routine assumes that the protection checks have been
  2408. * done by the caller (the low-level page fault routine in most cases).
  2409. * Thus we can safely just mark it writable once we've done any necessary
  2410. * COW.
  2411. *
  2412. * We also mark the page dirty at this point even though the page will
  2413. * change only once the write actually happens. This avoids a few races,
  2414. * and potentially makes it more efficient.
  2415. *
  2416. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2417. * but allow concurrent faults), with pte both mapped and locked.
  2418. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2419. */
  2420. static int do_wp_page(struct vm_fault *vmf)
  2421. __releases(vmf->ptl)
  2422. {
  2423. struct vm_area_struct *vma = vmf->vma;
  2424. vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
  2425. if (!vmf->page) {
  2426. /*
  2427. * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
  2428. * VM_PFNMAP VMA.
  2429. *
  2430. * We should not cow pages in a shared writeable mapping.
  2431. * Just mark the pages writable and/or call ops->pfn_mkwrite.
  2432. */
  2433. if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  2434. (VM_WRITE|VM_SHARED))
  2435. return wp_pfn_shared(vmf);
  2436. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2437. return wp_page_copy(vmf);
  2438. }
  2439. /*
  2440. * Take out anonymous pages first, anonymous shared vmas are
  2441. * not dirty accountable.
  2442. */
  2443. if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
  2444. int total_map_swapcount;
  2445. if (!trylock_page(vmf->page)) {
  2446. get_page(vmf->page);
  2447. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2448. lock_page(vmf->page);
  2449. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
  2450. vmf->address, &vmf->ptl);
  2451. if (!pte_same(*vmf->pte, vmf->orig_pte)) {
  2452. unlock_page(vmf->page);
  2453. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2454. put_page(vmf->page);
  2455. return 0;
  2456. }
  2457. put_page(vmf->page);
  2458. }
  2459. if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
  2460. if (total_map_swapcount == 1) {
  2461. /*
  2462. * The page is all ours. Move it to
  2463. * our anon_vma so the rmap code will
  2464. * not search our parent or siblings.
  2465. * Protected against the rmap code by
  2466. * the page lock.
  2467. */
  2468. page_move_anon_rmap(vmf->page, vma);
  2469. }
  2470. unlock_page(vmf->page);
  2471. wp_page_reuse(vmf);
  2472. return VM_FAULT_WRITE;
  2473. }
  2474. unlock_page(vmf->page);
  2475. } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  2476. (VM_WRITE|VM_SHARED))) {
  2477. return wp_page_shared(vmf);
  2478. }
  2479. /*
  2480. * Ok, we need to copy. Oh, well..
  2481. */
  2482. get_page(vmf->page);
  2483. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2484. return wp_page_copy(vmf);
  2485. }
  2486. static void unmap_mapping_range_vma(struct vm_area_struct *vma,
  2487. unsigned long start_addr, unsigned long end_addr,
  2488. struct zap_details *details)
  2489. {
  2490. zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
  2491. }
  2492. static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
  2493. struct zap_details *details)
  2494. {
  2495. struct vm_area_struct *vma;
  2496. pgoff_t vba, vea, zba, zea;
  2497. vma_interval_tree_foreach(vma, root,
  2498. details->first_index, details->last_index) {
  2499. vba = vma->vm_pgoff;
  2500. vea = vba + vma_pages(vma) - 1;
  2501. zba = details->first_index;
  2502. if (zba < vba)
  2503. zba = vba;
  2504. zea = details->last_index;
  2505. if (zea > vea)
  2506. zea = vea;
  2507. unmap_mapping_range_vma(vma,
  2508. ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
  2509. ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
  2510. details);
  2511. }
  2512. }
  2513. /**
  2514. * unmap_mapping_pages() - Unmap pages from processes.
  2515. * @mapping: The address space containing pages to be unmapped.
  2516. * @start: Index of first page to be unmapped.
  2517. * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
  2518. * @even_cows: Whether to unmap even private COWed pages.
  2519. *
  2520. * Unmap the pages in this address space from any userspace process which
  2521. * has them mmaped. Generally, you want to remove COWed pages as well when
  2522. * a file is being truncated, but not when invalidating pages from the page
  2523. * cache.
  2524. */
  2525. void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
  2526. pgoff_t nr, bool even_cows)
  2527. {
  2528. struct zap_details details = { };
  2529. details.check_mapping = even_cows ? NULL : mapping;
  2530. details.first_index = start;
  2531. details.last_index = start + nr - 1;
  2532. if (details.last_index < details.first_index)
  2533. details.last_index = ULONG_MAX;
  2534. i_mmap_lock_write(mapping);
  2535. if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
  2536. unmap_mapping_range_tree(&mapping->i_mmap, &details);
  2537. i_mmap_unlock_write(mapping);
  2538. }
  2539. /**
  2540. * unmap_mapping_range - unmap the portion of all mmaps in the specified
  2541. * address_space corresponding to the specified byte range in the underlying
  2542. * file.
  2543. *
  2544. * @mapping: the address space containing mmaps to be unmapped.
  2545. * @holebegin: byte in first page to unmap, relative to the start of
  2546. * the underlying file. This will be rounded down to a PAGE_SIZE
  2547. * boundary. Note that this is different from truncate_pagecache(), which
  2548. * must keep the partial page. In contrast, we must get rid of
  2549. * partial pages.
  2550. * @holelen: size of prospective hole in bytes. This will be rounded
  2551. * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
  2552. * end of the file.
  2553. * @even_cows: 1 when truncating a file, unmap even private COWed pages;
  2554. * but 0 when invalidating pagecache, don't throw away private data.
  2555. */
  2556. void unmap_mapping_range(struct address_space *mapping,
  2557. loff_t const holebegin, loff_t const holelen, int even_cows)
  2558. {
  2559. pgoff_t hba = holebegin >> PAGE_SHIFT;
  2560. pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2561. /* Check for overflow. */
  2562. if (sizeof(holelen) > sizeof(hlen)) {
  2563. long long holeend =
  2564. (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2565. if (holeend & ~(long long)ULONG_MAX)
  2566. hlen = ULONG_MAX - hba + 1;
  2567. }
  2568. unmap_mapping_pages(mapping, hba, hlen, even_cows);
  2569. }
  2570. EXPORT_SYMBOL(unmap_mapping_range);
  2571. /*
  2572. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2573. * but allow concurrent faults), and pte mapped but not yet locked.
  2574. * We return with pte unmapped and unlocked.
  2575. *
  2576. * We return with the mmap_sem locked or unlocked in the same cases
  2577. * as does filemap_fault().
  2578. */
  2579. int do_swap_page(struct vm_fault *vmf)
  2580. {
  2581. struct vm_area_struct *vma = vmf->vma;
  2582. struct page *page = NULL, *swapcache;
  2583. struct mem_cgroup *memcg;
  2584. swp_entry_t entry;
  2585. pte_t pte;
  2586. int locked;
  2587. int exclusive = 0;
  2588. int ret = 0;
  2589. if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
  2590. goto out;
  2591. entry = pte_to_swp_entry(vmf->orig_pte);
  2592. if (unlikely(non_swap_entry(entry))) {
  2593. if (is_migration_entry(entry)) {
  2594. migration_entry_wait(vma->vm_mm, vmf->pmd,
  2595. vmf->address);
  2596. } else if (is_device_private_entry(entry)) {
  2597. /*
  2598. * For un-addressable device memory we call the pgmap
  2599. * fault handler callback. The callback must migrate
  2600. * the page back to some CPU accessible page.
  2601. */
  2602. ret = device_private_entry_fault(vma, vmf->address, entry,
  2603. vmf->flags, vmf->pmd);
  2604. } else if (is_hwpoison_entry(entry)) {
  2605. ret = VM_FAULT_HWPOISON;
  2606. } else {
  2607. print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
  2608. ret = VM_FAULT_SIGBUS;
  2609. }
  2610. goto out;
  2611. }
  2612. delayacct_set_flag(DELAYACCT_PF_SWAPIN);
  2613. page = lookup_swap_cache(entry, vma, vmf->address);
  2614. swapcache = page;
  2615. if (!page) {
  2616. struct swap_info_struct *si = swp_swap_info(entry);
  2617. if (si->flags & SWP_SYNCHRONOUS_IO &&
  2618. __swap_count(si, entry) == 1) {
  2619. /* skip swapcache */
  2620. page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
  2621. vmf->address);
  2622. if (page) {
  2623. __SetPageLocked(page);
  2624. __SetPageSwapBacked(page);
  2625. set_page_private(page, entry.val);
  2626. lru_cache_add_anon(page);
  2627. swap_readpage(page, true);
  2628. }
  2629. } else {
  2630. page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
  2631. vmf);
  2632. swapcache = page;
  2633. }
  2634. if (!page) {
  2635. /*
  2636. * Back out if somebody else faulted in this pte
  2637. * while we released the pte lock.
  2638. */
  2639. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
  2640. vmf->address, &vmf->ptl);
  2641. if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
  2642. ret = VM_FAULT_OOM;
  2643. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2644. goto unlock;
  2645. }
  2646. /* Had to read the page from swap area: Major fault */
  2647. ret = VM_FAULT_MAJOR;
  2648. count_vm_event(PGMAJFAULT);
  2649. count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
  2650. } else if (PageHWPoison(page)) {
  2651. /*
  2652. * hwpoisoned dirty swapcache pages are kept for killing
  2653. * owner processes (which may be unknown at hwpoison time)
  2654. */
  2655. ret = VM_FAULT_HWPOISON;
  2656. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2657. goto out_release;
  2658. }
  2659. locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
  2660. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2661. if (!locked) {
  2662. ret |= VM_FAULT_RETRY;
  2663. goto out_release;
  2664. }
  2665. /*
  2666. * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
  2667. * release the swapcache from under us. The page pin, and pte_same
  2668. * test below, are not enough to exclude that. Even if it is still
  2669. * swapcache, we need to check that the page's swap has not changed.
  2670. */
  2671. if (unlikely((!PageSwapCache(page) ||
  2672. page_private(page) != entry.val)) && swapcache)
  2673. goto out_page;
  2674. page = ksm_might_need_to_copy(page, vma, vmf->address);
  2675. if (unlikely(!page)) {
  2676. ret = VM_FAULT_OOM;
  2677. page = swapcache;
  2678. goto out_page;
  2679. }
  2680. if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
  2681. &memcg, false)) {
  2682. ret = VM_FAULT_OOM;
  2683. goto out_page;
  2684. }
  2685. /*
  2686. * Back out if somebody else already faulted in this pte.
  2687. */
  2688. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
  2689. &vmf->ptl);
  2690. if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
  2691. goto out_nomap;
  2692. if (unlikely(!PageUptodate(page))) {
  2693. ret = VM_FAULT_SIGBUS;
  2694. goto out_nomap;
  2695. }
  2696. /*
  2697. * The page isn't present yet, go ahead with the fault.
  2698. *
  2699. * Be careful about the sequence of operations here.
  2700. * To get its accounting right, reuse_swap_page() must be called
  2701. * while the page is counted on swap but not yet in mapcount i.e.
  2702. * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
  2703. * must be called after the swap_free(), or it will never succeed.
  2704. */
  2705. inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  2706. dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
  2707. pte = mk_pte(page, vma->vm_page_prot);
  2708. if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
  2709. pte = maybe_mkwrite(pte_mkdirty(pte), vma);
  2710. vmf->flags &= ~FAULT_FLAG_WRITE;
  2711. ret |= VM_FAULT_WRITE;
  2712. exclusive = RMAP_EXCLUSIVE;
  2713. }
  2714. flush_icache_page(vma, page);
  2715. if (pte_swp_soft_dirty(vmf->orig_pte))
  2716. pte = pte_mksoft_dirty(pte);
  2717. set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
  2718. arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
  2719. vmf->orig_pte = pte;
  2720. /* ksm created a completely new copy */
  2721. if (unlikely(page != swapcache && swapcache)) {
  2722. page_add_new_anon_rmap(page, vma, vmf->address, false);
  2723. mem_cgroup_commit_charge(page, memcg, false, false);
  2724. lru_cache_add_active_or_unevictable(page, vma);
  2725. } else {
  2726. do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
  2727. mem_cgroup_commit_charge(page, memcg, true, false);
  2728. activate_page(page);
  2729. }
  2730. swap_free(entry);
  2731. if (mem_cgroup_swap_full(page) ||
  2732. (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  2733. try_to_free_swap(page);
  2734. unlock_page(page);
  2735. if (page != swapcache && swapcache) {
  2736. /*
  2737. * Hold the lock to avoid the swap entry to be reused
  2738. * until we take the PT lock for the pte_same() check
  2739. * (to avoid false positives from pte_same). For
  2740. * further safety release the lock after the swap_free
  2741. * so that the swap count won't change under a
  2742. * parallel locked swapcache.
  2743. */
  2744. unlock_page(swapcache);
  2745. put_page(swapcache);
  2746. }
  2747. if (vmf->flags & FAULT_FLAG_WRITE) {
  2748. ret |= do_wp_page(vmf);
  2749. if (ret & VM_FAULT_ERROR)
  2750. ret &= VM_FAULT_ERROR;
  2751. goto out;
  2752. }
  2753. /* No need to invalidate - it was non-present before */
  2754. update_mmu_cache(vma, vmf->address, vmf->pte);
  2755. unlock:
  2756. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2757. out:
  2758. return ret;
  2759. out_nomap:
  2760. mem_cgroup_cancel_charge(page, memcg, false);
  2761. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2762. out_page:
  2763. unlock_page(page);
  2764. out_release:
  2765. put_page(page);
  2766. if (page != swapcache && swapcache) {
  2767. unlock_page(swapcache);
  2768. put_page(swapcache);
  2769. }
  2770. return ret;
  2771. }
  2772. /*
  2773. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2774. * but allow concurrent faults), and pte mapped but not yet locked.
  2775. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2776. */
  2777. static int do_anonymous_page(struct vm_fault *vmf)
  2778. {
  2779. struct vm_area_struct *vma = vmf->vma;
  2780. struct mem_cgroup *memcg;
  2781. struct page *page;
  2782. int ret = 0;
  2783. pte_t entry;
  2784. /* File mapping without ->vm_ops ? */
  2785. if (vma->vm_flags & VM_SHARED)
  2786. return VM_FAULT_SIGBUS;
  2787. /*
  2788. * Use pte_alloc() instead of pte_alloc_map(). We can't run
  2789. * pte_offset_map() on pmds where a huge pmd might be created
  2790. * from a different thread.
  2791. *
  2792. * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
  2793. * parallel threads are excluded by other means.
  2794. *
  2795. * Here we only have down_read(mmap_sem).
  2796. */
  2797. if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
  2798. return VM_FAULT_OOM;
  2799. /* See the comment in pte_alloc_one_map() */
  2800. if (unlikely(pmd_trans_unstable(vmf->pmd)))
  2801. return 0;
  2802. /* Use the zero-page for reads */
  2803. if (!(vmf->flags & FAULT_FLAG_WRITE) &&
  2804. !mm_forbids_zeropage(vma->vm_mm)) {
  2805. entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
  2806. vma->vm_page_prot));
  2807. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
  2808. vmf->address, &vmf->ptl);
  2809. if (!pte_none(*vmf->pte))
  2810. goto unlock;
  2811. ret = check_stable_address_space(vma->vm_mm);
  2812. if (ret)
  2813. goto unlock;
  2814. /* Deliver the page fault to userland, check inside PT lock */
  2815. if (userfaultfd_missing(vma)) {
  2816. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2817. return handle_userfault(vmf, VM_UFFD_MISSING);
  2818. }
  2819. goto setpte;
  2820. }
  2821. /* Allocate our own private page. */
  2822. if (unlikely(anon_vma_prepare(vma)))
  2823. goto oom;
  2824. page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
  2825. if (!page)
  2826. goto oom;
  2827. if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
  2828. goto oom_free_page;
  2829. /*
  2830. * The memory barrier inside __SetPageUptodate makes sure that
  2831. * preceeding stores to the page contents become visible before
  2832. * the set_pte_at() write.
  2833. */
  2834. __SetPageUptodate(page);
  2835. entry = mk_pte(page, vma->vm_page_prot);
  2836. if (vma->vm_flags & VM_WRITE)
  2837. entry = pte_mkwrite(pte_mkdirty(entry));
  2838. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
  2839. &vmf->ptl);
  2840. if (!pte_none(*vmf->pte))
  2841. goto release;
  2842. ret = check_stable_address_space(vma->vm_mm);
  2843. if (ret)
  2844. goto release;
  2845. /* Deliver the page fault to userland, check inside PT lock */
  2846. if (userfaultfd_missing(vma)) {
  2847. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2848. mem_cgroup_cancel_charge(page, memcg, false);
  2849. put_page(page);
  2850. return handle_userfault(vmf, VM_UFFD_MISSING);
  2851. }
  2852. inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  2853. page_add_new_anon_rmap(page, vma, vmf->address, false);
  2854. mem_cgroup_commit_charge(page, memcg, false, false);
  2855. lru_cache_add_active_or_unevictable(page, vma);
  2856. setpte:
  2857. set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
  2858. /* No need to invalidate - it was non-present before */
  2859. update_mmu_cache(vma, vmf->address, vmf->pte);
  2860. unlock:
  2861. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2862. return ret;
  2863. release:
  2864. mem_cgroup_cancel_charge(page, memcg, false);
  2865. put_page(page);
  2866. goto unlock;
  2867. oom_free_page:
  2868. put_page(page);
  2869. oom:
  2870. return VM_FAULT_OOM;
  2871. }
  2872. /*
  2873. * The mmap_sem must have been held on entry, and may have been
  2874. * released depending on flags and vma->vm_ops->fault() return value.
  2875. * See filemap_fault() and __lock_page_retry().
  2876. */
  2877. static int __do_fault(struct vm_fault *vmf)
  2878. {
  2879. struct vm_area_struct *vma = vmf->vma;
  2880. int ret;
  2881. ret = vma->vm_ops->fault(vmf);
  2882. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
  2883. VM_FAULT_DONE_COW)))
  2884. return ret;
  2885. if (unlikely(PageHWPoison(vmf->page))) {
  2886. if (ret & VM_FAULT_LOCKED)
  2887. unlock_page(vmf->page);
  2888. put_page(vmf->page);
  2889. vmf->page = NULL;
  2890. return VM_FAULT_HWPOISON;
  2891. }
  2892. if (unlikely(!(ret & VM_FAULT_LOCKED)))
  2893. lock_page(vmf->page);
  2894. else
  2895. VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
  2896. return ret;
  2897. }
  2898. /*
  2899. * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
  2900. * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
  2901. * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
  2902. * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
  2903. */
  2904. static int pmd_devmap_trans_unstable(pmd_t *pmd)
  2905. {
  2906. return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
  2907. }
  2908. static int pte_alloc_one_map(struct vm_fault *vmf)
  2909. {
  2910. struct vm_area_struct *vma = vmf->vma;
  2911. if (!pmd_none(*vmf->pmd))
  2912. goto map_pte;
  2913. if (vmf->prealloc_pte) {
  2914. vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
  2915. if (unlikely(!pmd_none(*vmf->pmd))) {
  2916. spin_unlock(vmf->ptl);
  2917. goto map_pte;
  2918. }
  2919. mm_inc_nr_ptes(vma->vm_mm);
  2920. pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
  2921. spin_unlock(vmf->ptl);
  2922. vmf->prealloc_pte = NULL;
  2923. } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
  2924. return VM_FAULT_OOM;
  2925. }
  2926. map_pte:
  2927. /*
  2928. * If a huge pmd materialized under us just retry later. Use
  2929. * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
  2930. * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
  2931. * under us and then back to pmd_none, as a result of MADV_DONTNEED
  2932. * running immediately after a huge pmd fault in a different thread of
  2933. * this mm, in turn leading to a misleading pmd_trans_huge() retval.
  2934. * All we have to ensure is that it is a regular pmd that we can walk
  2935. * with pte_offset_map() and we can do that through an atomic read in
  2936. * C, which is what pmd_trans_unstable() provides.
  2937. */
  2938. if (pmd_devmap_trans_unstable(vmf->pmd))
  2939. return VM_FAULT_NOPAGE;
  2940. /*
  2941. * At this point we know that our vmf->pmd points to a page of ptes
  2942. * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
  2943. * for the duration of the fault. If a racing MADV_DONTNEED runs and
  2944. * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
  2945. * be valid and we will re-check to make sure the vmf->pte isn't
  2946. * pte_none() under vmf->ptl protection when we return to
  2947. * alloc_set_pte().
  2948. */
  2949. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
  2950. &vmf->ptl);
  2951. return 0;
  2952. }
  2953. #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
  2954. #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
  2955. static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
  2956. unsigned long haddr)
  2957. {
  2958. if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
  2959. (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
  2960. return false;
  2961. if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
  2962. return false;
  2963. return true;
  2964. }
  2965. static void deposit_prealloc_pte(struct vm_fault *vmf)
  2966. {
  2967. struct vm_area_struct *vma = vmf->vma;
  2968. pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
  2969. /*
  2970. * We are going to consume the prealloc table,
  2971. * count that as nr_ptes.
  2972. */
  2973. mm_inc_nr_ptes(vma->vm_mm);
  2974. vmf->prealloc_pte = NULL;
  2975. }
  2976. static int do_set_pmd(struct vm_fault *vmf, struct page *page)
  2977. {
  2978. struct vm_area_struct *vma = vmf->vma;
  2979. bool write = vmf->flags & FAULT_FLAG_WRITE;
  2980. unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
  2981. pmd_t entry;
  2982. int i, ret;
  2983. if (!transhuge_vma_suitable(vma, haddr))
  2984. return VM_FAULT_FALLBACK;
  2985. ret = VM_FAULT_FALLBACK;
  2986. page = compound_head(page);
  2987. /*
  2988. * Archs like ppc64 need additonal space to store information
  2989. * related to pte entry. Use the preallocated table for that.
  2990. */
  2991. if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
  2992. vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
  2993. if (!vmf->prealloc_pte)
  2994. return VM_FAULT_OOM;
  2995. smp_wmb(); /* See comment in __pte_alloc() */
  2996. }
  2997. vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
  2998. if (unlikely(!pmd_none(*vmf->pmd)))
  2999. goto out;
  3000. for (i = 0; i < HPAGE_PMD_NR; i++)
  3001. flush_icache_page(vma, page + i);
  3002. entry = mk_huge_pmd(page, vma->vm_page_prot);
  3003. if (write)
  3004. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  3005. add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
  3006. page_add_file_rmap(page, true);
  3007. /*
  3008. * deposit and withdraw with pmd lock held
  3009. */
  3010. if (arch_needs_pgtable_deposit())
  3011. deposit_prealloc_pte(vmf);
  3012. set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
  3013. update_mmu_cache_pmd(vma, haddr, vmf->pmd);
  3014. /* fault is handled */
  3015. ret = 0;
  3016. count_vm_event(THP_FILE_MAPPED);
  3017. out:
  3018. spin_unlock(vmf->ptl);
  3019. return ret;
  3020. }
  3021. #else
  3022. static int do_set_pmd(struct vm_fault *vmf, struct page *page)
  3023. {
  3024. BUILD_BUG();
  3025. return 0;
  3026. }
  3027. #endif
  3028. /**
  3029. * alloc_set_pte - setup new PTE entry for given page and add reverse page
  3030. * mapping. If needed, the fucntion allocates page table or use pre-allocated.
  3031. *
  3032. * @vmf: fault environment
  3033. * @memcg: memcg to charge page (only for private mappings)
  3034. * @page: page to map
  3035. *
  3036. * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
  3037. * return.
  3038. *
  3039. * Target users are page handler itself and implementations of
  3040. * vm_ops->map_pages.
  3041. */
  3042. int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
  3043. struct page *page)
  3044. {
  3045. struct vm_area_struct *vma = vmf->vma;
  3046. bool write = vmf->flags & FAULT_FLAG_WRITE;
  3047. pte_t entry;
  3048. int ret;
  3049. if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
  3050. IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
  3051. /* THP on COW? */
  3052. VM_BUG_ON_PAGE(memcg, page);
  3053. ret = do_set_pmd(vmf, page);
  3054. if (ret != VM_FAULT_FALLBACK)
  3055. return ret;
  3056. }
  3057. if (!vmf->pte) {
  3058. ret = pte_alloc_one_map(vmf);
  3059. if (ret)
  3060. return ret;
  3061. }
  3062. /* Re-check under ptl */
  3063. if (unlikely(!pte_none(*vmf->pte)))
  3064. return VM_FAULT_NOPAGE;
  3065. flush_icache_page(vma, page);
  3066. entry = mk_pte(page, vma->vm_page_prot);
  3067. if (write)
  3068. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  3069. /* copy-on-write page */
  3070. if (write && !(vma->vm_flags & VM_SHARED)) {
  3071. inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  3072. page_add_new_anon_rmap(page, vma, vmf->address, false);
  3073. mem_cgroup_commit_charge(page, memcg, false, false);
  3074. lru_cache_add_active_or_unevictable(page, vma);
  3075. } else {
  3076. inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
  3077. page_add_file_rmap(page, false);
  3078. }
  3079. set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
  3080. /* no need to invalidate: a not-present page won't be cached */
  3081. update_mmu_cache(vma, vmf->address, vmf->pte);
  3082. return 0;
  3083. }
  3084. /**
  3085. * finish_fault - finish page fault once we have prepared the page to fault
  3086. *
  3087. * @vmf: structure describing the fault
  3088. *
  3089. * This function handles all that is needed to finish a page fault once the
  3090. * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
  3091. * given page, adds reverse page mapping, handles memcg charges and LRU
  3092. * addition. The function returns 0 on success, VM_FAULT_ code in case of
  3093. * error.
  3094. *
  3095. * The function expects the page to be locked and on success it consumes a
  3096. * reference of a page being mapped (for the PTE which maps it).
  3097. */
  3098. int finish_fault(struct vm_fault *vmf)
  3099. {
  3100. struct page *page;
  3101. int ret = 0;
  3102. /* Did we COW the page? */
  3103. if ((vmf->flags & FAULT_FLAG_WRITE) &&
  3104. !(vmf->vma->vm_flags & VM_SHARED))
  3105. page = vmf->cow_page;
  3106. else
  3107. page = vmf->page;
  3108. /*
  3109. * check even for read faults because we might have lost our CoWed
  3110. * page
  3111. */
  3112. if (!(vmf->vma->vm_flags & VM_SHARED))
  3113. ret = check_stable_address_space(vmf->vma->vm_mm);
  3114. if (!ret)
  3115. ret = alloc_set_pte(vmf, vmf->memcg, page);
  3116. if (vmf->pte)
  3117. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3118. return ret;
  3119. }
  3120. static unsigned long fault_around_bytes __read_mostly =
  3121. rounddown_pow_of_two(65536);
  3122. #ifdef CONFIG_DEBUG_FS
  3123. static int fault_around_bytes_get(void *data, u64 *val)
  3124. {
  3125. *val = fault_around_bytes;
  3126. return 0;
  3127. }
  3128. /*
  3129. * fault_around_bytes must be rounded down to the nearest page order as it's
  3130. * what do_fault_around() expects to see.
  3131. */
  3132. static int fault_around_bytes_set(void *data, u64 val)
  3133. {
  3134. if (val / PAGE_SIZE > PTRS_PER_PTE)
  3135. return -EINVAL;
  3136. if (val > PAGE_SIZE)
  3137. fault_around_bytes = rounddown_pow_of_two(val);
  3138. else
  3139. fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
  3140. return 0;
  3141. }
  3142. DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
  3143. fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
  3144. static int __init fault_around_debugfs(void)
  3145. {
  3146. void *ret;
  3147. ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
  3148. &fault_around_bytes_fops);
  3149. if (!ret)
  3150. pr_warn("Failed to create fault_around_bytes in debugfs");
  3151. return 0;
  3152. }
  3153. late_initcall(fault_around_debugfs);
  3154. #endif
  3155. /*
  3156. * do_fault_around() tries to map few pages around the fault address. The hope
  3157. * is that the pages will be needed soon and this will lower the number of
  3158. * faults to handle.
  3159. *
  3160. * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
  3161. * not ready to be mapped: not up-to-date, locked, etc.
  3162. *
  3163. * This function is called with the page table lock taken. In the split ptlock
  3164. * case the page table lock only protects only those entries which belong to
  3165. * the page table corresponding to the fault address.
  3166. *
  3167. * This function doesn't cross the VMA boundaries, in order to call map_pages()
  3168. * only once.
  3169. *
  3170. * fault_around_bytes defines how many bytes we'll try to map.
  3171. * do_fault_around() expects it to be set to a power of two less than or equal
  3172. * to PTRS_PER_PTE.
  3173. *
  3174. * The virtual address of the area that we map is naturally aligned to
  3175. * fault_around_bytes rounded down to the machine page size
  3176. * (and therefore to page order). This way it's easier to guarantee
  3177. * that we don't cross page table boundaries.
  3178. */
  3179. static int do_fault_around(struct vm_fault *vmf)
  3180. {
  3181. unsigned long address = vmf->address, nr_pages, mask;
  3182. pgoff_t start_pgoff = vmf->pgoff;
  3183. pgoff_t end_pgoff;
  3184. int off, ret = 0;
  3185. nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
  3186. mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
  3187. vmf->address = max(address & mask, vmf->vma->vm_start);
  3188. off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  3189. start_pgoff -= off;
  3190. /*
  3191. * end_pgoff is either the end of the page table, the end of
  3192. * the vma or nr_pages from start_pgoff, depending what is nearest.
  3193. */
  3194. end_pgoff = start_pgoff -
  3195. ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
  3196. PTRS_PER_PTE - 1;
  3197. end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
  3198. start_pgoff + nr_pages - 1);
  3199. if (pmd_none(*vmf->pmd)) {
  3200. vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
  3201. vmf->address);
  3202. if (!vmf->prealloc_pte)
  3203. goto out;
  3204. smp_wmb(); /* See comment in __pte_alloc() */
  3205. }
  3206. vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
  3207. /* Huge page is mapped? Page fault is solved */
  3208. if (pmd_trans_huge(*vmf->pmd)) {
  3209. ret = VM_FAULT_NOPAGE;
  3210. goto out;
  3211. }
  3212. /* ->map_pages() haven't done anything useful. Cold page cache? */
  3213. if (!vmf->pte)
  3214. goto out;
  3215. /* check if the page fault is solved */
  3216. vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
  3217. if (!pte_none(*vmf->pte))
  3218. ret = VM_FAULT_NOPAGE;
  3219. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3220. out:
  3221. vmf->address = address;
  3222. vmf->pte = NULL;
  3223. return ret;
  3224. }
  3225. static int do_read_fault(struct vm_fault *vmf)
  3226. {
  3227. struct vm_area_struct *vma = vmf->vma;
  3228. int ret = 0;
  3229. /*
  3230. * Let's call ->map_pages() first and use ->fault() as fallback
  3231. * if page by the offset is not ready to be mapped (cold cache or
  3232. * something).
  3233. */
  3234. if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
  3235. ret = do_fault_around(vmf);
  3236. if (ret)
  3237. return ret;
  3238. }
  3239. ret = __do_fault(vmf);
  3240. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3241. return ret;
  3242. ret |= finish_fault(vmf);
  3243. unlock_page(vmf->page);
  3244. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3245. put_page(vmf->page);
  3246. return ret;
  3247. }
  3248. static int do_cow_fault(struct vm_fault *vmf)
  3249. {
  3250. struct vm_area_struct *vma = vmf->vma;
  3251. int ret;
  3252. if (unlikely(anon_vma_prepare(vma)))
  3253. return VM_FAULT_OOM;
  3254. vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
  3255. if (!vmf->cow_page)
  3256. return VM_FAULT_OOM;
  3257. if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
  3258. &vmf->memcg, false)) {
  3259. put_page(vmf->cow_page);
  3260. return VM_FAULT_OOM;
  3261. }
  3262. ret = __do_fault(vmf);
  3263. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3264. goto uncharge_out;
  3265. if (ret & VM_FAULT_DONE_COW)
  3266. return ret;
  3267. copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
  3268. __SetPageUptodate(vmf->cow_page);
  3269. ret |= finish_fault(vmf);
  3270. unlock_page(vmf->page);
  3271. put_page(vmf->page);
  3272. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3273. goto uncharge_out;
  3274. return ret;
  3275. uncharge_out:
  3276. mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
  3277. put_page(vmf->cow_page);
  3278. return ret;
  3279. }
  3280. static int do_shared_fault(struct vm_fault *vmf)
  3281. {
  3282. struct vm_area_struct *vma = vmf->vma;
  3283. int ret, tmp;
  3284. ret = __do_fault(vmf);
  3285. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3286. return ret;
  3287. /*
  3288. * Check if the backing address space wants to know that the page is
  3289. * about to become writable
  3290. */
  3291. if (vma->vm_ops->page_mkwrite) {
  3292. unlock_page(vmf->page);
  3293. tmp = do_page_mkwrite(vmf);
  3294. if (unlikely(!tmp ||
  3295. (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
  3296. put_page(vmf->page);
  3297. return tmp;
  3298. }
  3299. }
  3300. ret |= finish_fault(vmf);
  3301. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
  3302. VM_FAULT_RETRY))) {
  3303. unlock_page(vmf->page);
  3304. put_page(vmf->page);
  3305. return ret;
  3306. }
  3307. fault_dirty_shared_page(vma, vmf->page);
  3308. return ret;
  3309. }
  3310. /*
  3311. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  3312. * but allow concurrent faults).
  3313. * The mmap_sem may have been released depending on flags and our
  3314. * return value. See filemap_fault() and __lock_page_or_retry().
  3315. */
  3316. static int do_fault(struct vm_fault *vmf)
  3317. {
  3318. struct vm_area_struct *vma = vmf->vma;
  3319. int ret;
  3320. /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
  3321. if (!vma->vm_ops->fault)
  3322. ret = VM_FAULT_SIGBUS;
  3323. else if (!(vmf->flags & FAULT_FLAG_WRITE))
  3324. ret = do_read_fault(vmf);
  3325. else if (!(vma->vm_flags & VM_SHARED))
  3326. ret = do_cow_fault(vmf);
  3327. else
  3328. ret = do_shared_fault(vmf);
  3329. /* preallocated pagetable is unused: free it */
  3330. if (vmf->prealloc_pte) {
  3331. pte_free(vma->vm_mm, vmf->prealloc_pte);
  3332. vmf->prealloc_pte = NULL;
  3333. }
  3334. return ret;
  3335. }
  3336. static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
  3337. unsigned long addr, int page_nid,
  3338. int *flags)
  3339. {
  3340. get_page(page);
  3341. count_vm_numa_event(NUMA_HINT_FAULTS);
  3342. if (page_nid == numa_node_id()) {
  3343. count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
  3344. *flags |= TNF_FAULT_LOCAL;
  3345. }
  3346. return mpol_misplaced(page, vma, addr);
  3347. }
  3348. static int do_numa_page(struct vm_fault *vmf)
  3349. {
  3350. struct vm_area_struct *vma = vmf->vma;
  3351. struct page *page = NULL;
  3352. int page_nid = -1;
  3353. int last_cpupid;
  3354. int target_nid;
  3355. bool migrated = false;
  3356. pte_t pte;
  3357. bool was_writable = pte_savedwrite(vmf->orig_pte);
  3358. int flags = 0;
  3359. /*
  3360. * The "pte" at this point cannot be used safely without
  3361. * validation through pte_unmap_same(). It's of NUMA type but
  3362. * the pfn may be screwed if the read is non atomic.
  3363. */
  3364. vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
  3365. spin_lock(vmf->ptl);
  3366. if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
  3367. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3368. goto out;
  3369. }
  3370. /*
  3371. * Make it present again, Depending on how arch implementes non
  3372. * accessible ptes, some can allow access by kernel mode.
  3373. */
  3374. pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
  3375. pte = pte_modify(pte, vma->vm_page_prot);
  3376. pte = pte_mkyoung(pte);
  3377. if (was_writable)
  3378. pte = pte_mkwrite(pte);
  3379. ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
  3380. update_mmu_cache(vma, vmf->address, vmf->pte);
  3381. page = vm_normal_page(vma, vmf->address, pte);
  3382. if (!page) {
  3383. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3384. return 0;
  3385. }
  3386. /* TODO: handle PTE-mapped THP */
  3387. if (PageCompound(page)) {
  3388. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3389. return 0;
  3390. }
  3391. /*
  3392. * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
  3393. * much anyway since they can be in shared cache state. This misses
  3394. * the case where a mapping is writable but the process never writes
  3395. * to it but pte_write gets cleared during protection updates and
  3396. * pte_dirty has unpredictable behaviour between PTE scan updates,
  3397. * background writeback, dirty balancing and application behaviour.
  3398. */
  3399. if (!pte_write(pte))
  3400. flags |= TNF_NO_GROUP;
  3401. /*
  3402. * Flag if the page is shared between multiple address spaces. This
  3403. * is later used when determining whether to group tasks together
  3404. */
  3405. if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
  3406. flags |= TNF_SHARED;
  3407. last_cpupid = page_cpupid_last(page);
  3408. page_nid = page_to_nid(page);
  3409. target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
  3410. &flags);
  3411. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3412. if (target_nid == -1) {
  3413. put_page(page);
  3414. goto out;
  3415. }
  3416. /* Migrate to the requested node */
  3417. migrated = migrate_misplaced_page(page, vma, target_nid);
  3418. if (migrated) {
  3419. page_nid = target_nid;
  3420. flags |= TNF_MIGRATED;
  3421. } else
  3422. flags |= TNF_MIGRATE_FAIL;
  3423. out:
  3424. if (page_nid != -1)
  3425. task_numa_fault(last_cpupid, page_nid, 1, flags);
  3426. return 0;
  3427. }
  3428. static inline int create_huge_pmd(struct vm_fault *vmf)
  3429. {
  3430. if (vma_is_anonymous(vmf->vma))
  3431. return do_huge_pmd_anonymous_page(vmf);
  3432. if (vmf->vma->vm_ops->huge_fault)
  3433. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
  3434. return VM_FAULT_FALLBACK;
  3435. }
  3436. /* `inline' is required to avoid gcc 4.1.2 build error */
  3437. static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
  3438. {
  3439. if (vma_is_anonymous(vmf->vma))
  3440. return do_huge_pmd_wp_page(vmf, orig_pmd);
  3441. if (vmf->vma->vm_ops->huge_fault)
  3442. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
  3443. /* COW handled on pte level: split pmd */
  3444. VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
  3445. __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
  3446. return VM_FAULT_FALLBACK;
  3447. }
  3448. static inline bool vma_is_accessible(struct vm_area_struct *vma)
  3449. {
  3450. return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
  3451. }
  3452. static int create_huge_pud(struct vm_fault *vmf)
  3453. {
  3454. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3455. /* No support for anonymous transparent PUD pages yet */
  3456. if (vma_is_anonymous(vmf->vma))
  3457. return VM_FAULT_FALLBACK;
  3458. if (vmf->vma->vm_ops->huge_fault)
  3459. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
  3460. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  3461. return VM_FAULT_FALLBACK;
  3462. }
  3463. static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
  3464. {
  3465. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3466. /* No support for anonymous transparent PUD pages yet */
  3467. if (vma_is_anonymous(vmf->vma))
  3468. return VM_FAULT_FALLBACK;
  3469. if (vmf->vma->vm_ops->huge_fault)
  3470. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
  3471. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  3472. return VM_FAULT_FALLBACK;
  3473. }
  3474. /*
  3475. * These routines also need to handle stuff like marking pages dirty
  3476. * and/or accessed for architectures that don't do it in hardware (most
  3477. * RISC architectures). The early dirtying is also good on the i386.
  3478. *
  3479. * There is also a hook called "update_mmu_cache()" that architectures
  3480. * with external mmu caches can use to update those (ie the Sparc or
  3481. * PowerPC hashed page tables that act as extended TLBs).
  3482. *
  3483. * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
  3484. * concurrent faults).
  3485. *
  3486. * The mmap_sem may have been released depending on flags and our return value.
  3487. * See filemap_fault() and __lock_page_or_retry().
  3488. */
  3489. static int handle_pte_fault(struct vm_fault *vmf)
  3490. {
  3491. pte_t entry;
  3492. if (unlikely(pmd_none(*vmf->pmd))) {
  3493. /*
  3494. * Leave __pte_alloc() until later: because vm_ops->fault may
  3495. * want to allocate huge page, and if we expose page table
  3496. * for an instant, it will be difficult to retract from
  3497. * concurrent faults and from rmap lookups.
  3498. */
  3499. vmf->pte = NULL;
  3500. } else {
  3501. /* See comment in pte_alloc_one_map() */
  3502. if (pmd_devmap_trans_unstable(vmf->pmd))
  3503. return 0;
  3504. /*
  3505. * A regular pmd is established and it can't morph into a huge
  3506. * pmd from under us anymore at this point because we hold the
  3507. * mmap_sem read mode and khugepaged takes it in write mode.
  3508. * So now it's safe to run pte_offset_map().
  3509. */
  3510. vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
  3511. vmf->orig_pte = *vmf->pte;
  3512. /*
  3513. * some architectures can have larger ptes than wordsize,
  3514. * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
  3515. * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
  3516. * accesses. The code below just needs a consistent view
  3517. * for the ifs and we later double check anyway with the
  3518. * ptl lock held. So here a barrier will do.
  3519. */
  3520. barrier();
  3521. if (pte_none(vmf->orig_pte)) {
  3522. pte_unmap(vmf->pte);
  3523. vmf->pte = NULL;
  3524. }
  3525. }
  3526. if (!vmf->pte) {
  3527. if (vma_is_anonymous(vmf->vma))
  3528. return do_anonymous_page(vmf);
  3529. else
  3530. return do_fault(vmf);
  3531. }
  3532. if (!pte_present(vmf->orig_pte))
  3533. return do_swap_page(vmf);
  3534. if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
  3535. return do_numa_page(vmf);
  3536. vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
  3537. spin_lock(vmf->ptl);
  3538. entry = vmf->orig_pte;
  3539. if (unlikely(!pte_same(*vmf->pte, entry)))
  3540. goto unlock;
  3541. if (vmf->flags & FAULT_FLAG_WRITE) {
  3542. if (!pte_write(entry))
  3543. return do_wp_page(vmf);
  3544. entry = pte_mkdirty(entry);
  3545. }
  3546. entry = pte_mkyoung(entry);
  3547. if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
  3548. vmf->flags & FAULT_FLAG_WRITE)) {
  3549. update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
  3550. } else {
  3551. /*
  3552. * This is needed only for protection faults but the arch code
  3553. * is not yet telling us if this is a protection fault or not.
  3554. * This still avoids useless tlb flushes for .text page faults
  3555. * with threads.
  3556. */
  3557. if (vmf->flags & FAULT_FLAG_WRITE)
  3558. flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
  3559. }
  3560. unlock:
  3561. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3562. return 0;
  3563. }
  3564. /*
  3565. * By the time we get here, we already hold the mm semaphore
  3566. *
  3567. * The mmap_sem may have been released depending on flags and our
  3568. * return value. See filemap_fault() and __lock_page_or_retry().
  3569. */
  3570. static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
  3571. unsigned int flags)
  3572. {
  3573. struct vm_fault vmf = {
  3574. .vma = vma,
  3575. .address = address & PAGE_MASK,
  3576. .flags = flags,
  3577. .pgoff = linear_page_index(vma, address),
  3578. .gfp_mask = __get_fault_gfp_mask(vma),
  3579. };
  3580. unsigned int dirty = flags & FAULT_FLAG_WRITE;
  3581. struct mm_struct *mm = vma->vm_mm;
  3582. pgd_t *pgd;
  3583. p4d_t *p4d;
  3584. int ret;
  3585. pgd = pgd_offset(mm, address);
  3586. p4d = p4d_alloc(mm, pgd, address);
  3587. if (!p4d)
  3588. return VM_FAULT_OOM;
  3589. vmf.pud = pud_alloc(mm, p4d, address);
  3590. if (!vmf.pud)
  3591. return VM_FAULT_OOM;
  3592. if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
  3593. ret = create_huge_pud(&vmf);
  3594. if (!(ret & VM_FAULT_FALLBACK))
  3595. return ret;
  3596. } else {
  3597. pud_t orig_pud = *vmf.pud;
  3598. barrier();
  3599. if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
  3600. /* NUMA case for anonymous PUDs would go here */
  3601. if (dirty && !pud_write(orig_pud)) {
  3602. ret = wp_huge_pud(&vmf, orig_pud);
  3603. if (!(ret & VM_FAULT_FALLBACK))
  3604. return ret;
  3605. } else {
  3606. huge_pud_set_accessed(&vmf, orig_pud);
  3607. return 0;
  3608. }
  3609. }
  3610. }
  3611. vmf.pmd = pmd_alloc(mm, vmf.pud, address);
  3612. if (!vmf.pmd)
  3613. return VM_FAULT_OOM;
  3614. if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
  3615. ret = create_huge_pmd(&vmf);
  3616. if (!(ret & VM_FAULT_FALLBACK))
  3617. return ret;
  3618. } else {
  3619. pmd_t orig_pmd = *vmf.pmd;
  3620. barrier();
  3621. if (unlikely(is_swap_pmd(orig_pmd))) {
  3622. VM_BUG_ON(thp_migration_supported() &&
  3623. !is_pmd_migration_entry(orig_pmd));
  3624. if (is_pmd_migration_entry(orig_pmd))
  3625. pmd_migration_entry_wait(mm, vmf.pmd);
  3626. return 0;
  3627. }
  3628. if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
  3629. if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
  3630. return do_huge_pmd_numa_page(&vmf, orig_pmd);
  3631. if (dirty && !pmd_write(orig_pmd)) {
  3632. ret = wp_huge_pmd(&vmf, orig_pmd);
  3633. if (!(ret & VM_FAULT_FALLBACK))
  3634. return ret;
  3635. } else {
  3636. huge_pmd_set_accessed(&vmf, orig_pmd);
  3637. return 0;
  3638. }
  3639. }
  3640. }
  3641. return handle_pte_fault(&vmf);
  3642. }
  3643. /*
  3644. * By the time we get here, we already hold the mm semaphore
  3645. *
  3646. * The mmap_sem may have been released depending on flags and our
  3647. * return value. See filemap_fault() and __lock_page_or_retry().
  3648. */
  3649. int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
  3650. unsigned int flags)
  3651. {
  3652. int ret;
  3653. __set_current_state(TASK_RUNNING);
  3654. count_vm_event(PGFAULT);
  3655. count_memcg_event_mm(vma->vm_mm, PGFAULT);
  3656. /* do counter updates before entering really critical section. */
  3657. check_sync_rss_stat(current);
  3658. if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
  3659. flags & FAULT_FLAG_INSTRUCTION,
  3660. flags & FAULT_FLAG_REMOTE))
  3661. return VM_FAULT_SIGSEGV;
  3662. /*
  3663. * Enable the memcg OOM handling for faults triggered in user
  3664. * space. Kernel faults are handled more gracefully.
  3665. */
  3666. if (flags & FAULT_FLAG_USER)
  3667. mem_cgroup_oom_enable();
  3668. if (unlikely(is_vm_hugetlb_page(vma)))
  3669. ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
  3670. else
  3671. ret = __handle_mm_fault(vma, address, flags);
  3672. if (flags & FAULT_FLAG_USER) {
  3673. mem_cgroup_oom_disable();
  3674. /*
  3675. * The task may have entered a memcg OOM situation but
  3676. * if the allocation error was handled gracefully (no
  3677. * VM_FAULT_OOM), there is no need to kill anything.
  3678. * Just clean up the OOM state peacefully.
  3679. */
  3680. if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
  3681. mem_cgroup_oom_synchronize(false);
  3682. }
  3683. return ret;
  3684. }
  3685. EXPORT_SYMBOL_GPL(handle_mm_fault);
  3686. #ifndef __PAGETABLE_P4D_FOLDED
  3687. /*
  3688. * Allocate p4d page table.
  3689. * We've already handled the fast-path in-line.
  3690. */
  3691. int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  3692. {
  3693. p4d_t *new = p4d_alloc_one(mm, address);
  3694. if (!new)
  3695. return -ENOMEM;
  3696. smp_wmb(); /* See comment in __pte_alloc */
  3697. spin_lock(&mm->page_table_lock);
  3698. if (pgd_present(*pgd)) /* Another has populated it */
  3699. p4d_free(mm, new);
  3700. else
  3701. pgd_populate(mm, pgd, new);
  3702. spin_unlock(&mm->page_table_lock);
  3703. return 0;
  3704. }
  3705. #endif /* __PAGETABLE_P4D_FOLDED */
  3706. #ifndef __PAGETABLE_PUD_FOLDED
  3707. /*
  3708. * Allocate page upper directory.
  3709. * We've already handled the fast-path in-line.
  3710. */
  3711. int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
  3712. {
  3713. pud_t *new = pud_alloc_one(mm, address);
  3714. if (!new)
  3715. return -ENOMEM;
  3716. smp_wmb(); /* See comment in __pte_alloc */
  3717. spin_lock(&mm->page_table_lock);
  3718. #ifndef __ARCH_HAS_5LEVEL_HACK
  3719. if (!p4d_present(*p4d)) {
  3720. mm_inc_nr_puds(mm);
  3721. p4d_populate(mm, p4d, new);
  3722. } else /* Another has populated it */
  3723. pud_free(mm, new);
  3724. #else
  3725. if (!pgd_present(*p4d)) {
  3726. mm_inc_nr_puds(mm);
  3727. pgd_populate(mm, p4d, new);
  3728. } else /* Another has populated it */
  3729. pud_free(mm, new);
  3730. #endif /* __ARCH_HAS_5LEVEL_HACK */
  3731. spin_unlock(&mm->page_table_lock);
  3732. return 0;
  3733. }
  3734. #endif /* __PAGETABLE_PUD_FOLDED */
  3735. #ifndef __PAGETABLE_PMD_FOLDED
  3736. /*
  3737. * Allocate page middle directory.
  3738. * We've already handled the fast-path in-line.
  3739. */
  3740. int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  3741. {
  3742. spinlock_t *ptl;
  3743. pmd_t *new = pmd_alloc_one(mm, address);
  3744. if (!new)
  3745. return -ENOMEM;
  3746. smp_wmb(); /* See comment in __pte_alloc */
  3747. ptl = pud_lock(mm, pud);
  3748. #ifndef __ARCH_HAS_4LEVEL_HACK
  3749. if (!pud_present(*pud)) {
  3750. mm_inc_nr_pmds(mm);
  3751. pud_populate(mm, pud, new);
  3752. } else /* Another has populated it */
  3753. pmd_free(mm, new);
  3754. #else
  3755. if (!pgd_present(*pud)) {
  3756. mm_inc_nr_pmds(mm);
  3757. pgd_populate(mm, pud, new);
  3758. } else /* Another has populated it */
  3759. pmd_free(mm, new);
  3760. #endif /* __ARCH_HAS_4LEVEL_HACK */
  3761. spin_unlock(ptl);
  3762. return 0;
  3763. }
  3764. #endif /* __PAGETABLE_PMD_FOLDED */
  3765. static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
  3766. unsigned long *start, unsigned long *end,
  3767. pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
  3768. {
  3769. pgd_t *pgd;
  3770. p4d_t *p4d;
  3771. pud_t *pud;
  3772. pmd_t *pmd;
  3773. pte_t *ptep;
  3774. pgd = pgd_offset(mm, address);
  3775. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  3776. goto out;
  3777. p4d = p4d_offset(pgd, address);
  3778. if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
  3779. goto out;
  3780. pud = pud_offset(p4d, address);
  3781. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  3782. goto out;
  3783. pmd = pmd_offset(pud, address);
  3784. VM_BUG_ON(pmd_trans_huge(*pmd));
  3785. if (pmd_huge(*pmd)) {
  3786. if (!pmdpp)
  3787. goto out;
  3788. if (start && end) {
  3789. *start = address & PMD_MASK;
  3790. *end = *start + PMD_SIZE;
  3791. mmu_notifier_invalidate_range_start(mm, *start, *end);
  3792. }
  3793. *ptlp = pmd_lock(mm, pmd);
  3794. if (pmd_huge(*pmd)) {
  3795. *pmdpp = pmd;
  3796. return 0;
  3797. }
  3798. spin_unlock(*ptlp);
  3799. if (start && end)
  3800. mmu_notifier_invalidate_range_end(mm, *start, *end);
  3801. }
  3802. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  3803. goto out;
  3804. if (start && end) {
  3805. *start = address & PAGE_MASK;
  3806. *end = *start + PAGE_SIZE;
  3807. mmu_notifier_invalidate_range_start(mm, *start, *end);
  3808. }
  3809. ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
  3810. if (!pte_present(*ptep))
  3811. goto unlock;
  3812. *ptepp = ptep;
  3813. return 0;
  3814. unlock:
  3815. pte_unmap_unlock(ptep, *ptlp);
  3816. if (start && end)
  3817. mmu_notifier_invalidate_range_end(mm, *start, *end);
  3818. out:
  3819. return -EINVAL;
  3820. }
  3821. static inline int follow_pte(struct mm_struct *mm, unsigned long address,
  3822. pte_t **ptepp, spinlock_t **ptlp)
  3823. {
  3824. int res;
  3825. /* (void) is needed to make gcc happy */
  3826. (void) __cond_lock(*ptlp,
  3827. !(res = __follow_pte_pmd(mm, address, NULL, NULL,
  3828. ptepp, NULL, ptlp)));
  3829. return res;
  3830. }
  3831. int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
  3832. unsigned long *start, unsigned long *end,
  3833. pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
  3834. {
  3835. int res;
  3836. /* (void) is needed to make gcc happy */
  3837. (void) __cond_lock(*ptlp,
  3838. !(res = __follow_pte_pmd(mm, address, start, end,
  3839. ptepp, pmdpp, ptlp)));
  3840. return res;
  3841. }
  3842. EXPORT_SYMBOL(follow_pte_pmd);
  3843. /**
  3844. * follow_pfn - look up PFN at a user virtual address
  3845. * @vma: memory mapping
  3846. * @address: user virtual address
  3847. * @pfn: location to store found PFN
  3848. *
  3849. * Only IO mappings and raw PFN mappings are allowed.
  3850. *
  3851. * Returns zero and the pfn at @pfn on success, -ve otherwise.
  3852. */
  3853. int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  3854. unsigned long *pfn)
  3855. {
  3856. int ret = -EINVAL;
  3857. spinlock_t *ptl;
  3858. pte_t *ptep;
  3859. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  3860. return ret;
  3861. ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
  3862. if (ret)
  3863. return ret;
  3864. *pfn = pte_pfn(*ptep);
  3865. pte_unmap_unlock(ptep, ptl);
  3866. return 0;
  3867. }
  3868. EXPORT_SYMBOL(follow_pfn);
  3869. #ifdef CONFIG_HAVE_IOREMAP_PROT
  3870. int follow_phys(struct vm_area_struct *vma,
  3871. unsigned long address, unsigned int flags,
  3872. unsigned long *prot, resource_size_t *phys)
  3873. {
  3874. int ret = -EINVAL;
  3875. pte_t *ptep, pte;
  3876. spinlock_t *ptl;
  3877. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  3878. goto out;
  3879. if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
  3880. goto out;
  3881. pte = *ptep;
  3882. if ((flags & FOLL_WRITE) && !pte_write(pte))
  3883. goto unlock;
  3884. *prot = pgprot_val(pte_pgprot(pte));
  3885. *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
  3886. ret = 0;
  3887. unlock:
  3888. pte_unmap_unlock(ptep, ptl);
  3889. out:
  3890. return ret;
  3891. }
  3892. int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
  3893. void *buf, int len, int write)
  3894. {
  3895. resource_size_t phys_addr;
  3896. unsigned long prot = 0;
  3897. void __iomem *maddr;
  3898. int offset = addr & (PAGE_SIZE-1);
  3899. if (follow_phys(vma, addr, write, &prot, &phys_addr))
  3900. return -EINVAL;
  3901. maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
  3902. if (write)
  3903. memcpy_toio(maddr + offset, buf, len);
  3904. else
  3905. memcpy_fromio(buf, maddr + offset, len);
  3906. iounmap(maddr);
  3907. return len;
  3908. }
  3909. EXPORT_SYMBOL_GPL(generic_access_phys);
  3910. #endif
  3911. /*
  3912. * Access another process' address space as given in mm. If non-NULL, use the
  3913. * given task for page fault accounting.
  3914. */
  3915. int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  3916. unsigned long addr, void *buf, int len, unsigned int gup_flags)
  3917. {
  3918. struct vm_area_struct *vma;
  3919. void *old_buf = buf;
  3920. int write = gup_flags & FOLL_WRITE;
  3921. down_read(&mm->mmap_sem);
  3922. /* ignore errors, just check how much was successfully transferred */
  3923. while (len) {
  3924. int bytes, ret, offset;
  3925. void *maddr;
  3926. struct page *page = NULL;
  3927. ret = get_user_pages_remote(tsk, mm, addr, 1,
  3928. gup_flags, &page, &vma, NULL);
  3929. if (ret <= 0) {
  3930. #ifndef CONFIG_HAVE_IOREMAP_PROT
  3931. break;
  3932. #else
  3933. /*
  3934. * Check if this is a VM_IO | VM_PFNMAP VMA, which
  3935. * we can access using slightly different code.
  3936. */
  3937. vma = find_vma(mm, addr);
  3938. if (!vma || vma->vm_start > addr)
  3939. break;
  3940. if (vma->vm_ops && vma->vm_ops->access)
  3941. ret = vma->vm_ops->access(vma, addr, buf,
  3942. len, write);
  3943. if (ret <= 0)
  3944. break;
  3945. bytes = ret;
  3946. #endif
  3947. } else {
  3948. bytes = len;
  3949. offset = addr & (PAGE_SIZE-1);
  3950. if (bytes > PAGE_SIZE-offset)
  3951. bytes = PAGE_SIZE-offset;
  3952. maddr = kmap(page);
  3953. if (write) {
  3954. copy_to_user_page(vma, page, addr,
  3955. maddr + offset, buf, bytes);
  3956. set_page_dirty_lock(page);
  3957. } else {
  3958. copy_from_user_page(vma, page, addr,
  3959. buf, maddr + offset, bytes);
  3960. }
  3961. kunmap(page);
  3962. put_page(page);
  3963. }
  3964. len -= bytes;
  3965. buf += bytes;
  3966. addr += bytes;
  3967. }
  3968. up_read(&mm->mmap_sem);
  3969. return buf - old_buf;
  3970. }
  3971. /**
  3972. * access_remote_vm - access another process' address space
  3973. * @mm: the mm_struct of the target address space
  3974. * @addr: start address to access
  3975. * @buf: source or destination buffer
  3976. * @len: number of bytes to transfer
  3977. * @gup_flags: flags modifying lookup behaviour
  3978. *
  3979. * The caller must hold a reference on @mm.
  3980. */
  3981. int access_remote_vm(struct mm_struct *mm, unsigned long addr,
  3982. void *buf, int len, unsigned int gup_flags)
  3983. {
  3984. return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
  3985. }
  3986. /*
  3987. * Access another process' address space.
  3988. * Source/target buffer must be kernel space,
  3989. * Do not walk the page table directly, use get_user_pages
  3990. */
  3991. int access_process_vm(struct task_struct *tsk, unsigned long addr,
  3992. void *buf, int len, unsigned int gup_flags)
  3993. {
  3994. struct mm_struct *mm;
  3995. int ret;
  3996. mm = get_task_mm(tsk);
  3997. if (!mm)
  3998. return 0;
  3999. ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  4000. mmput(mm);
  4001. return ret;
  4002. }
  4003. EXPORT_SYMBOL_GPL(access_process_vm);
  4004. /*
  4005. * Print the name of a VMA.
  4006. */
  4007. void print_vma_addr(char *prefix, unsigned long ip)
  4008. {
  4009. struct mm_struct *mm = current->mm;
  4010. struct vm_area_struct *vma;
  4011. /*
  4012. * we might be running from an atomic context so we cannot sleep
  4013. */
  4014. if (!down_read_trylock(&mm->mmap_sem))
  4015. return;
  4016. vma = find_vma(mm, ip);
  4017. if (vma && vma->vm_file) {
  4018. struct file *f = vma->vm_file;
  4019. char *buf = (char *)__get_free_page(GFP_NOWAIT);
  4020. if (buf) {
  4021. char *p;
  4022. p = file_path(f, buf, PAGE_SIZE);
  4023. if (IS_ERR(p))
  4024. p = "?";
  4025. printk("%s%s[%lx+%lx]", prefix, kbasename(p),
  4026. vma->vm_start,
  4027. vma->vm_end - vma->vm_start);
  4028. free_page((unsigned long)buf);
  4029. }
  4030. }
  4031. up_read(&mm->mmap_sem);
  4032. }
  4033. #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
  4034. void __might_fault(const char *file, int line)
  4035. {
  4036. /*
  4037. * Some code (nfs/sunrpc) uses socket ops on kernel memory while
  4038. * holding the mmap_sem, this is safe because kernel memory doesn't
  4039. * get paged out, therefore we'll never actually fault, and the
  4040. * below annotations will generate false positives.
  4041. */
  4042. if (uaccess_kernel())
  4043. return;
  4044. if (pagefault_disabled())
  4045. return;
  4046. __might_sleep(file, line, 0);
  4047. #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
  4048. if (current->mm)
  4049. might_lock_read(&current->mm->mmap_sem);
  4050. #endif
  4051. }
  4052. EXPORT_SYMBOL(__might_fault);
  4053. #endif
  4054. #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
  4055. static void clear_gigantic_page(struct page *page,
  4056. unsigned long addr,
  4057. unsigned int pages_per_huge_page)
  4058. {
  4059. int i;
  4060. struct page *p = page;
  4061. might_sleep();
  4062. for (i = 0; i < pages_per_huge_page;
  4063. i++, p = mem_map_next(p, page, i)) {
  4064. cond_resched();
  4065. clear_user_highpage(p, addr + i * PAGE_SIZE);
  4066. }
  4067. }
  4068. void clear_huge_page(struct page *page,
  4069. unsigned long addr_hint, unsigned int pages_per_huge_page)
  4070. {
  4071. int i, n, base, l;
  4072. unsigned long addr = addr_hint &
  4073. ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
  4074. if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  4075. clear_gigantic_page(page, addr, pages_per_huge_page);
  4076. return;
  4077. }
  4078. /* Clear sub-page to access last to keep its cache lines hot */
  4079. might_sleep();
  4080. n = (addr_hint - addr) / PAGE_SIZE;
  4081. if (2 * n <= pages_per_huge_page) {
  4082. /* If sub-page to access in first half of huge page */
  4083. base = 0;
  4084. l = n;
  4085. /* Clear sub-pages at the end of huge page */
  4086. for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
  4087. cond_resched();
  4088. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  4089. }
  4090. } else {
  4091. /* If sub-page to access in second half of huge page */
  4092. base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
  4093. l = pages_per_huge_page - n;
  4094. /* Clear sub-pages at the begin of huge page */
  4095. for (i = 0; i < base; i++) {
  4096. cond_resched();
  4097. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  4098. }
  4099. }
  4100. /*
  4101. * Clear remaining sub-pages in left-right-left-right pattern
  4102. * towards the sub-page to access
  4103. */
  4104. for (i = 0; i < l; i++) {
  4105. int left_idx = base + i;
  4106. int right_idx = base + 2 * l - 1 - i;
  4107. cond_resched();
  4108. clear_user_highpage(page + left_idx,
  4109. addr + left_idx * PAGE_SIZE);
  4110. cond_resched();
  4111. clear_user_highpage(page + right_idx,
  4112. addr + right_idx * PAGE_SIZE);
  4113. }
  4114. }
  4115. static void copy_user_gigantic_page(struct page *dst, struct page *src,
  4116. unsigned long addr,
  4117. struct vm_area_struct *vma,
  4118. unsigned int pages_per_huge_page)
  4119. {
  4120. int i;
  4121. struct page *dst_base = dst;
  4122. struct page *src_base = src;
  4123. for (i = 0; i < pages_per_huge_page; ) {
  4124. cond_resched();
  4125. copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  4126. i++;
  4127. dst = mem_map_next(dst, dst_base, i);
  4128. src = mem_map_next(src, src_base, i);
  4129. }
  4130. }
  4131. void copy_user_huge_page(struct page *dst, struct page *src,
  4132. unsigned long addr, struct vm_area_struct *vma,
  4133. unsigned int pages_per_huge_page)
  4134. {
  4135. int i;
  4136. if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  4137. copy_user_gigantic_page(dst, src, addr, vma,
  4138. pages_per_huge_page);
  4139. return;
  4140. }
  4141. might_sleep();
  4142. for (i = 0; i < pages_per_huge_page; i++) {
  4143. cond_resched();
  4144. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  4145. }
  4146. }
  4147. long copy_huge_page_from_user(struct page *dst_page,
  4148. const void __user *usr_src,
  4149. unsigned int pages_per_huge_page,
  4150. bool allow_pagefault)
  4151. {
  4152. void *src = (void *)usr_src;
  4153. void *page_kaddr;
  4154. unsigned long i, rc = 0;
  4155. unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
  4156. for (i = 0; i < pages_per_huge_page; i++) {
  4157. if (allow_pagefault)
  4158. page_kaddr = kmap(dst_page + i);
  4159. else
  4160. page_kaddr = kmap_atomic(dst_page + i);
  4161. rc = copy_from_user(page_kaddr,
  4162. (const void __user *)(src + i * PAGE_SIZE),
  4163. PAGE_SIZE);
  4164. if (allow_pagefault)
  4165. kunmap(dst_page + i);
  4166. else
  4167. kunmap_atomic(page_kaddr);
  4168. ret_val -= (PAGE_SIZE - rc);
  4169. if (rc)
  4170. break;
  4171. cond_resched();
  4172. }
  4173. return ret_val;
  4174. }
  4175. #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
  4176. #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
  4177. static struct kmem_cache *page_ptl_cachep;
  4178. void __init ptlock_cache_init(void)
  4179. {
  4180. page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
  4181. SLAB_PANIC, NULL);
  4182. }
  4183. bool ptlock_alloc(struct page *page)
  4184. {
  4185. spinlock_t *ptl;
  4186. ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
  4187. if (!ptl)
  4188. return false;
  4189. page->ptl = ptl;
  4190. return true;
  4191. }
  4192. void ptlock_free(struct page *page)
  4193. {
  4194. kmem_cache_free(page_ptl_cachep, page->ptl);
  4195. }
  4196. #endif