memory.c 128 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792
  1. /*
  2. * linux/mm/memory.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * demand-loading started 01.12.91 - seems it is high on the list of
  8. * things wanted, and it should be easy to implement. - Linus
  9. */
  10. /*
  11. * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  12. * pages started 02.12.91, seems to work. - Linus.
  13. *
  14. * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  15. * would have taken more than the 6M I have free, but it worked well as
  16. * far as I could see.
  17. *
  18. * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  19. */
  20. /*
  21. * Real VM (paging to/from disk) started 18.12.91. Much more work and
  22. * thought has to go into this. Oh, well..
  23. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
  24. * Found it. Everything seems to work now.
  25. * 20.12.91 - Ok, making the swap-device changeable like the root.
  26. */
  27. /*
  28. * 05.04.94 - Multi-page memory management added for v1.1.
  29. * Idea by Alex Bligh (alex@cconcepts.co.uk)
  30. *
  31. * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
  32. * (Gerhard.Wichert@pdb.siemens.de)
  33. *
  34. * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  35. */
  36. #include <linux/kernel_stat.h>
  37. #include <linux/mm.h>
  38. #include <linux/sched/mm.h>
  39. #include <linux/sched/coredump.h>
  40. #include <linux/sched/numa_balancing.h>
  41. #include <linux/sched/task.h>
  42. #include <linux/hugetlb.h>
  43. #include <linux/mman.h>
  44. #include <linux/swap.h>
  45. #include <linux/highmem.h>
  46. #include <linux/pagemap.h>
  47. #include <linux/memremap.h>
  48. #include <linux/ksm.h>
  49. #include <linux/rmap.h>
  50. #include <linux/export.h>
  51. #include <linux/delayacct.h>
  52. #include <linux/init.h>
  53. #include <linux/pfn_t.h>
  54. #include <linux/writeback.h>
  55. #include <linux/memcontrol.h>
  56. #include <linux/mmu_notifier.h>
  57. #include <linux/swapops.h>
  58. #include <linux/elf.h>
  59. #include <linux/gfp.h>
  60. #include <linux/migrate.h>
  61. #include <linux/string.h>
  62. #include <linux/dma-debug.h>
  63. #include <linux/debugfs.h>
  64. #include <linux/userfaultfd_k.h>
  65. #include <linux/dax.h>
  66. #include <linux/oom.h>
  67. #include <asm/io.h>
  68. #include <asm/mmu_context.h>
  69. #include <asm/pgalloc.h>
  70. #include <linux/uaccess.h>
  71. #include <asm/tlb.h>
  72. #include <asm/tlbflush.h>
  73. #include <asm/pgtable.h>
  74. #include "internal.h"
  75. #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
  76. #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
  77. #endif
  78. #ifndef CONFIG_NEED_MULTIPLE_NODES
  79. /* use the per-pgdat data instead for discontigmem - mbligh */
  80. unsigned long max_mapnr;
  81. EXPORT_SYMBOL(max_mapnr);
  82. struct page *mem_map;
  83. EXPORT_SYMBOL(mem_map);
  84. #endif
  85. /*
  86. * A number of key systems in x86 including ioremap() rely on the assumption
  87. * that high_memory defines the upper bound on direct map memory, then end
  88. * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
  89. * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
  90. * and ZONE_HIGHMEM.
  91. */
  92. void *high_memory;
  93. EXPORT_SYMBOL(high_memory);
  94. /*
  95. * Randomize the address space (stacks, mmaps, brk, etc.).
  96. *
  97. * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
  98. * as ancient (libc5 based) binaries can segfault. )
  99. */
  100. int randomize_va_space __read_mostly =
  101. #ifdef CONFIG_COMPAT_BRK
  102. 1;
  103. #else
  104. 2;
  105. #endif
  106. static int __init disable_randmaps(char *s)
  107. {
  108. randomize_va_space = 0;
  109. return 1;
  110. }
  111. __setup("norandmaps", disable_randmaps);
  112. unsigned long zero_pfn __read_mostly;
  113. EXPORT_SYMBOL(zero_pfn);
  114. unsigned long highest_memmap_pfn __read_mostly;
  115. /*
  116. * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  117. */
  118. static int __init init_zero_pfn(void)
  119. {
  120. zero_pfn = page_to_pfn(ZERO_PAGE(0));
  121. return 0;
  122. }
  123. core_initcall(init_zero_pfn);
  124. #if defined(SPLIT_RSS_COUNTING)
  125. void sync_mm_rss(struct mm_struct *mm)
  126. {
  127. int i;
  128. for (i = 0; i < NR_MM_COUNTERS; i++) {
  129. if (current->rss_stat.count[i]) {
  130. add_mm_counter(mm, i, current->rss_stat.count[i]);
  131. current->rss_stat.count[i] = 0;
  132. }
  133. }
  134. current->rss_stat.events = 0;
  135. }
  136. static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  137. {
  138. struct task_struct *task = current;
  139. if (likely(task->mm == mm))
  140. task->rss_stat.count[member] += val;
  141. else
  142. add_mm_counter(mm, member, val);
  143. }
  144. #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  145. #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  146. /* sync counter once per 64 page faults */
  147. #define TASK_RSS_EVENTS_THRESH (64)
  148. static void check_sync_rss_stat(struct task_struct *task)
  149. {
  150. if (unlikely(task != current))
  151. return;
  152. if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
  153. sync_mm_rss(task->mm);
  154. }
  155. #else /* SPLIT_RSS_COUNTING */
  156. #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
  157. #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
  158. static void check_sync_rss_stat(struct task_struct *task)
  159. {
  160. }
  161. #endif /* SPLIT_RSS_COUNTING */
  162. #ifdef HAVE_GENERIC_MMU_GATHER
  163. static bool tlb_next_batch(struct mmu_gather *tlb)
  164. {
  165. struct mmu_gather_batch *batch;
  166. batch = tlb->active;
  167. if (batch->next) {
  168. tlb->active = batch->next;
  169. return true;
  170. }
  171. if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
  172. return false;
  173. batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  174. if (!batch)
  175. return false;
  176. tlb->batch_count++;
  177. batch->next = NULL;
  178. batch->nr = 0;
  179. batch->max = MAX_GATHER_BATCH;
  180. tlb->active->next = batch;
  181. tlb->active = batch;
  182. return true;
  183. }
  184. void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  185. unsigned long start, unsigned long end)
  186. {
  187. tlb->mm = mm;
  188. /* Is it from 0 to ~0? */
  189. tlb->fullmm = !(start | (end+1));
  190. tlb->need_flush_all = 0;
  191. tlb->local.next = NULL;
  192. tlb->local.nr = 0;
  193. tlb->local.max = ARRAY_SIZE(tlb->__pages);
  194. tlb->active = &tlb->local;
  195. tlb->batch_count = 0;
  196. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  197. tlb->batch = NULL;
  198. #endif
  199. tlb->page_size = 0;
  200. __tlb_reset_range(tlb);
  201. }
  202. static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  203. {
  204. if (!tlb->end)
  205. return;
  206. tlb_flush(tlb);
  207. mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
  208. __tlb_reset_range(tlb);
  209. }
  210. static void tlb_flush_mmu_free(struct mmu_gather *tlb)
  211. {
  212. struct mmu_gather_batch *batch;
  213. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  214. tlb_table_flush(tlb);
  215. #endif
  216. for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
  217. free_pages_and_swap_cache(batch->pages, batch->nr);
  218. batch->nr = 0;
  219. }
  220. tlb->active = &tlb->local;
  221. }
  222. void tlb_flush_mmu(struct mmu_gather *tlb)
  223. {
  224. tlb_flush_mmu_tlbonly(tlb);
  225. tlb_flush_mmu_free(tlb);
  226. }
  227. /* tlb_finish_mmu
  228. * Called at the end of the shootdown operation to free up any resources
  229. * that were required.
  230. */
  231. void arch_tlb_finish_mmu(struct mmu_gather *tlb,
  232. unsigned long start, unsigned long end, bool force)
  233. {
  234. struct mmu_gather_batch *batch, *next;
  235. if (force)
  236. __tlb_adjust_range(tlb, start, end - start);
  237. tlb_flush_mmu(tlb);
  238. /* keep the page table cache within bounds */
  239. check_pgt_cache();
  240. for (batch = tlb->local.next; batch; batch = next) {
  241. next = batch->next;
  242. free_pages((unsigned long)batch, 0);
  243. }
  244. tlb->local.next = NULL;
  245. }
  246. /* __tlb_remove_page
  247. * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
  248. * handling the additional races in SMP caused by other CPUs caching valid
  249. * mappings in their TLBs. Returns the number of free page slots left.
  250. * When out of page slots we must call tlb_flush_mmu().
  251. *returns true if the caller should flush.
  252. */
  253. bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
  254. {
  255. struct mmu_gather_batch *batch;
  256. VM_BUG_ON(!tlb->end);
  257. VM_WARN_ON(tlb->page_size != page_size);
  258. batch = tlb->active;
  259. /*
  260. * Add the page and check if we are full. If so
  261. * force a flush.
  262. */
  263. batch->pages[batch->nr++] = page;
  264. if (batch->nr == batch->max) {
  265. if (!tlb_next_batch(tlb))
  266. return true;
  267. batch = tlb->active;
  268. }
  269. VM_BUG_ON_PAGE(batch->nr > batch->max, page);
  270. return false;
  271. }
  272. #endif /* HAVE_GENERIC_MMU_GATHER */
  273. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  274. /*
  275. * See the comment near struct mmu_table_batch.
  276. */
  277. /*
  278. * If we want tlb_remove_table() to imply TLB invalidates.
  279. */
  280. static inline void tlb_table_invalidate(struct mmu_gather *tlb)
  281. {
  282. #ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
  283. /*
  284. * Invalidate page-table caches used by hardware walkers. Then we still
  285. * need to RCU-sched wait while freeing the pages because software
  286. * walkers can still be in-flight.
  287. */
  288. tlb_flush_mmu_tlbonly(tlb);
  289. #endif
  290. }
  291. static void tlb_remove_table_smp_sync(void *arg)
  292. {
  293. /* Simply deliver the interrupt */
  294. }
  295. static void tlb_remove_table_one(void *table)
  296. {
  297. /*
  298. * This isn't an RCU grace period and hence the page-tables cannot be
  299. * assumed to be actually RCU-freed.
  300. *
  301. * It is however sufficient for software page-table walkers that rely on
  302. * IRQ disabling. See the comment near struct mmu_table_batch.
  303. */
  304. smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  305. __tlb_remove_table(table);
  306. }
  307. static void tlb_remove_table_rcu(struct rcu_head *head)
  308. {
  309. struct mmu_table_batch *batch;
  310. int i;
  311. batch = container_of(head, struct mmu_table_batch, rcu);
  312. for (i = 0; i < batch->nr; i++)
  313. __tlb_remove_table(batch->tables[i]);
  314. free_page((unsigned long)batch);
  315. }
  316. void tlb_table_flush(struct mmu_gather *tlb)
  317. {
  318. struct mmu_table_batch **batch = &tlb->batch;
  319. if (*batch) {
  320. tlb_table_invalidate(tlb);
  321. call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  322. *batch = NULL;
  323. }
  324. }
  325. void tlb_remove_table(struct mmu_gather *tlb, void *table)
  326. {
  327. struct mmu_table_batch **batch = &tlb->batch;
  328. if (*batch == NULL) {
  329. *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  330. if (*batch == NULL) {
  331. tlb_table_invalidate(tlb);
  332. tlb_remove_table_one(table);
  333. return;
  334. }
  335. (*batch)->nr = 0;
  336. }
  337. (*batch)->tables[(*batch)->nr++] = table;
  338. if ((*batch)->nr == MAX_TABLE_BATCH)
  339. tlb_table_flush(tlb);
  340. }
  341. #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
  342. /**
  343. * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
  344. * @tlb: the mmu_gather structure to initialize
  345. * @mm: the mm_struct of the target address space
  346. * @start: start of the region that will be removed from the page-table
  347. * @end: end of the region that will be removed from the page-table
  348. *
  349. * Called to initialize an (on-stack) mmu_gather structure for page-table
  350. * tear-down from @mm. The @start and @end are set to 0 and -1
  351. * respectively when @mm is without users and we're going to destroy
  352. * the full address space (exit/execve).
  353. */
  354. void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  355. unsigned long start, unsigned long end)
  356. {
  357. arch_tlb_gather_mmu(tlb, mm, start, end);
  358. inc_tlb_flush_pending(tlb->mm);
  359. }
  360. void tlb_finish_mmu(struct mmu_gather *tlb,
  361. unsigned long start, unsigned long end)
  362. {
  363. /*
  364. * If there are parallel threads are doing PTE changes on same range
  365. * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
  366. * flush by batching, a thread has stable TLB entry can fail to flush
  367. * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
  368. * forcefully if we detect parallel PTE batching threads.
  369. */
  370. bool force = mm_tlb_flush_nested(tlb->mm);
  371. arch_tlb_finish_mmu(tlb, start, end, force);
  372. dec_tlb_flush_pending(tlb->mm);
  373. }
  374. /*
  375. * Note: this doesn't free the actual pages themselves. That
  376. * has been handled earlier when unmapping all the memory regions.
  377. */
  378. static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  379. unsigned long addr)
  380. {
  381. pgtable_t token = pmd_pgtable(*pmd);
  382. pmd_clear(pmd);
  383. pte_free_tlb(tlb, token, addr);
  384. mm_dec_nr_ptes(tlb->mm);
  385. }
  386. static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  387. unsigned long addr, unsigned long end,
  388. unsigned long floor, unsigned long ceiling)
  389. {
  390. pmd_t *pmd;
  391. unsigned long next;
  392. unsigned long start;
  393. start = addr;
  394. pmd = pmd_offset(pud, addr);
  395. do {
  396. next = pmd_addr_end(addr, end);
  397. if (pmd_none_or_clear_bad(pmd))
  398. continue;
  399. free_pte_range(tlb, pmd, addr);
  400. } while (pmd++, addr = next, addr != end);
  401. start &= PUD_MASK;
  402. if (start < floor)
  403. return;
  404. if (ceiling) {
  405. ceiling &= PUD_MASK;
  406. if (!ceiling)
  407. return;
  408. }
  409. if (end - 1 > ceiling - 1)
  410. return;
  411. pmd = pmd_offset(pud, start);
  412. pud_clear(pud);
  413. pmd_free_tlb(tlb, pmd, start);
  414. mm_dec_nr_pmds(tlb->mm);
  415. }
  416. static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
  417. unsigned long addr, unsigned long end,
  418. unsigned long floor, unsigned long ceiling)
  419. {
  420. pud_t *pud;
  421. unsigned long next;
  422. unsigned long start;
  423. start = addr;
  424. pud = pud_offset(p4d, addr);
  425. do {
  426. next = pud_addr_end(addr, end);
  427. if (pud_none_or_clear_bad(pud))
  428. continue;
  429. free_pmd_range(tlb, pud, addr, next, floor, ceiling);
  430. } while (pud++, addr = next, addr != end);
  431. start &= P4D_MASK;
  432. if (start < floor)
  433. return;
  434. if (ceiling) {
  435. ceiling &= P4D_MASK;
  436. if (!ceiling)
  437. return;
  438. }
  439. if (end - 1 > ceiling - 1)
  440. return;
  441. pud = pud_offset(p4d, start);
  442. p4d_clear(p4d);
  443. pud_free_tlb(tlb, pud, start);
  444. mm_dec_nr_puds(tlb->mm);
  445. }
  446. static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
  447. unsigned long addr, unsigned long end,
  448. unsigned long floor, unsigned long ceiling)
  449. {
  450. p4d_t *p4d;
  451. unsigned long next;
  452. unsigned long start;
  453. start = addr;
  454. p4d = p4d_offset(pgd, addr);
  455. do {
  456. next = p4d_addr_end(addr, end);
  457. if (p4d_none_or_clear_bad(p4d))
  458. continue;
  459. free_pud_range(tlb, p4d, addr, next, floor, ceiling);
  460. } while (p4d++, addr = next, addr != end);
  461. start &= PGDIR_MASK;
  462. if (start < floor)
  463. return;
  464. if (ceiling) {
  465. ceiling &= PGDIR_MASK;
  466. if (!ceiling)
  467. return;
  468. }
  469. if (end - 1 > ceiling - 1)
  470. return;
  471. p4d = p4d_offset(pgd, start);
  472. pgd_clear(pgd);
  473. p4d_free_tlb(tlb, p4d, start);
  474. }
  475. /*
  476. * This function frees user-level page tables of a process.
  477. */
  478. void free_pgd_range(struct mmu_gather *tlb,
  479. unsigned long addr, unsigned long end,
  480. unsigned long floor, unsigned long ceiling)
  481. {
  482. pgd_t *pgd;
  483. unsigned long next;
  484. /*
  485. * The next few lines have given us lots of grief...
  486. *
  487. * Why are we testing PMD* at this top level? Because often
  488. * there will be no work to do at all, and we'd prefer not to
  489. * go all the way down to the bottom just to discover that.
  490. *
  491. * Why all these "- 1"s? Because 0 represents both the bottom
  492. * of the address space and the top of it (using -1 for the
  493. * top wouldn't help much: the masks would do the wrong thing).
  494. * The rule is that addr 0 and floor 0 refer to the bottom of
  495. * the address space, but end 0 and ceiling 0 refer to the top
  496. * Comparisons need to use "end - 1" and "ceiling - 1" (though
  497. * that end 0 case should be mythical).
  498. *
  499. * Wherever addr is brought up or ceiling brought down, we must
  500. * be careful to reject "the opposite 0" before it confuses the
  501. * subsequent tests. But what about where end is brought down
  502. * by PMD_SIZE below? no, end can't go down to 0 there.
  503. *
  504. * Whereas we round start (addr) and ceiling down, by different
  505. * masks at different levels, in order to test whether a table
  506. * now has no other vmas using it, so can be freed, we don't
  507. * bother to round floor or end up - the tests don't need that.
  508. */
  509. addr &= PMD_MASK;
  510. if (addr < floor) {
  511. addr += PMD_SIZE;
  512. if (!addr)
  513. return;
  514. }
  515. if (ceiling) {
  516. ceiling &= PMD_MASK;
  517. if (!ceiling)
  518. return;
  519. }
  520. if (end - 1 > ceiling - 1)
  521. end -= PMD_SIZE;
  522. if (addr > end - 1)
  523. return;
  524. /*
  525. * We add page table cache pages with PAGE_SIZE,
  526. * (see pte_free_tlb()), flush the tlb if we need
  527. */
  528. tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
  529. pgd = pgd_offset(tlb->mm, addr);
  530. do {
  531. next = pgd_addr_end(addr, end);
  532. if (pgd_none_or_clear_bad(pgd))
  533. continue;
  534. free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
  535. } while (pgd++, addr = next, addr != end);
  536. }
  537. void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
  538. unsigned long floor, unsigned long ceiling)
  539. {
  540. while (vma) {
  541. struct vm_area_struct *next = vma->vm_next;
  542. unsigned long addr = vma->vm_start;
  543. /*
  544. * Hide vma from rmap and truncate_pagecache before freeing
  545. * pgtables
  546. */
  547. unlink_anon_vmas(vma);
  548. unlink_file_vma(vma);
  549. if (is_vm_hugetlb_page(vma)) {
  550. hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
  551. floor, next ? next->vm_start : ceiling);
  552. } else {
  553. /*
  554. * Optimization: gather nearby vmas into one call down
  555. */
  556. while (next && next->vm_start <= vma->vm_end + PMD_SIZE
  557. && !is_vm_hugetlb_page(next)) {
  558. vma = next;
  559. next = vma->vm_next;
  560. unlink_anon_vmas(vma);
  561. unlink_file_vma(vma);
  562. }
  563. free_pgd_range(tlb, addr, vma->vm_end,
  564. floor, next ? next->vm_start : ceiling);
  565. }
  566. vma = next;
  567. }
  568. }
  569. int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
  570. {
  571. spinlock_t *ptl;
  572. pgtable_t new = pte_alloc_one(mm, address);
  573. if (!new)
  574. return -ENOMEM;
  575. /*
  576. * Ensure all pte setup (eg. pte page lock and page clearing) are
  577. * visible before the pte is made visible to other CPUs by being
  578. * put into page tables.
  579. *
  580. * The other side of the story is the pointer chasing in the page
  581. * table walking code (when walking the page table without locking;
  582. * ie. most of the time). Fortunately, these data accesses consist
  583. * of a chain of data-dependent loads, meaning most CPUs (alpha
  584. * being the notable exception) will already guarantee loads are
  585. * seen in-order. See the alpha page table accessors for the
  586. * smp_read_barrier_depends() barriers in page table walking code.
  587. */
  588. smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
  589. ptl = pmd_lock(mm, pmd);
  590. if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
  591. mm_inc_nr_ptes(mm);
  592. pmd_populate(mm, pmd, new);
  593. new = NULL;
  594. }
  595. spin_unlock(ptl);
  596. if (new)
  597. pte_free(mm, new);
  598. return 0;
  599. }
  600. int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
  601. {
  602. pte_t *new = pte_alloc_one_kernel(&init_mm, address);
  603. if (!new)
  604. return -ENOMEM;
  605. smp_wmb(); /* See comment in __pte_alloc */
  606. spin_lock(&init_mm.page_table_lock);
  607. if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
  608. pmd_populate_kernel(&init_mm, pmd, new);
  609. new = NULL;
  610. }
  611. spin_unlock(&init_mm.page_table_lock);
  612. if (new)
  613. pte_free_kernel(&init_mm, new);
  614. return 0;
  615. }
  616. static inline void init_rss_vec(int *rss)
  617. {
  618. memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
  619. }
  620. static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
  621. {
  622. int i;
  623. if (current->mm == mm)
  624. sync_mm_rss(mm);
  625. for (i = 0; i < NR_MM_COUNTERS; i++)
  626. if (rss[i])
  627. add_mm_counter(mm, i, rss[i]);
  628. }
  629. /*
  630. * This function is called to print an error when a bad pte
  631. * is found. For example, we might have a PFN-mapped pte in
  632. * a region that doesn't allow it.
  633. *
  634. * The calling function must still handle the error.
  635. */
  636. static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  637. pte_t pte, struct page *page)
  638. {
  639. pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
  640. p4d_t *p4d = p4d_offset(pgd, addr);
  641. pud_t *pud = pud_offset(p4d, addr);
  642. pmd_t *pmd = pmd_offset(pud, addr);
  643. struct address_space *mapping;
  644. pgoff_t index;
  645. static unsigned long resume;
  646. static unsigned long nr_shown;
  647. static unsigned long nr_unshown;
  648. /*
  649. * Allow a burst of 60 reports, then keep quiet for that minute;
  650. * or allow a steady drip of one report per second.
  651. */
  652. if (nr_shown == 60) {
  653. if (time_before(jiffies, resume)) {
  654. nr_unshown++;
  655. return;
  656. }
  657. if (nr_unshown) {
  658. pr_alert("BUG: Bad page map: %lu messages suppressed\n",
  659. nr_unshown);
  660. nr_unshown = 0;
  661. }
  662. nr_shown = 0;
  663. }
  664. if (nr_shown++ == 0)
  665. resume = jiffies + 60 * HZ;
  666. mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
  667. index = linear_page_index(vma, addr);
  668. pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
  669. current->comm,
  670. (long long)pte_val(pte), (long long)pmd_val(*pmd));
  671. if (page)
  672. dump_page(page, "bad pte");
  673. pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
  674. (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
  675. pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
  676. vma->vm_file,
  677. vma->vm_ops ? vma->vm_ops->fault : NULL,
  678. vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
  679. mapping ? mapping->a_ops->readpage : NULL);
  680. dump_stack();
  681. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  682. }
  683. /*
  684. * vm_normal_page -- This function gets the "struct page" associated with a pte.
  685. *
  686. * "Special" mappings do not wish to be associated with a "struct page" (either
  687. * it doesn't exist, or it exists but they don't want to touch it). In this
  688. * case, NULL is returned here. "Normal" mappings do have a struct page.
  689. *
  690. * There are 2 broad cases. Firstly, an architecture may define a pte_special()
  691. * pte bit, in which case this function is trivial. Secondly, an architecture
  692. * may not have a spare pte bit, which requires a more complicated scheme,
  693. * described below.
  694. *
  695. * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
  696. * special mapping (even if there are underlying and valid "struct pages").
  697. * COWed pages of a VM_PFNMAP are always normal.
  698. *
  699. * The way we recognize COWed pages within VM_PFNMAP mappings is through the
  700. * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
  701. * set, and the vm_pgoff will point to the first PFN mapped: thus every special
  702. * mapping will always honor the rule
  703. *
  704. * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
  705. *
  706. * And for normal mappings this is false.
  707. *
  708. * This restricts such mappings to be a linear translation from virtual address
  709. * to pfn. To get around this restriction, we allow arbitrary mappings so long
  710. * as the vma is not a COW mapping; in that case, we know that all ptes are
  711. * special (because none can have been COWed).
  712. *
  713. *
  714. * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
  715. *
  716. * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
  717. * page" backing, however the difference is that _all_ pages with a struct
  718. * page (that is, those where pfn_valid is true) are refcounted and considered
  719. * normal pages by the VM. The disadvantage is that pages are refcounted
  720. * (which can be slower and simply not an option for some PFNMAP users). The
  721. * advantage is that we don't have to follow the strict linearity rule of
  722. * PFNMAP mappings in order to support COWable mappings.
  723. *
  724. */
  725. struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  726. pte_t pte, bool with_public_device)
  727. {
  728. unsigned long pfn = pte_pfn(pte);
  729. if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
  730. if (likely(!pte_special(pte)))
  731. goto check_pfn;
  732. if (vma->vm_ops && vma->vm_ops->find_special_page)
  733. return vma->vm_ops->find_special_page(vma, addr);
  734. if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
  735. return NULL;
  736. if (is_zero_pfn(pfn))
  737. return NULL;
  738. /*
  739. * Device public pages are special pages (they are ZONE_DEVICE
  740. * pages but different from persistent memory). They behave
  741. * allmost like normal pages. The difference is that they are
  742. * not on the lru and thus should never be involve with any-
  743. * thing that involve lru manipulation (mlock, numa balancing,
  744. * ...).
  745. *
  746. * This is why we still want to return NULL for such page from
  747. * vm_normal_page() so that we do not have to special case all
  748. * call site of vm_normal_page().
  749. */
  750. if (likely(pfn <= highest_memmap_pfn)) {
  751. struct page *page = pfn_to_page(pfn);
  752. if (is_device_public_page(page)) {
  753. if (with_public_device)
  754. return page;
  755. return NULL;
  756. }
  757. }
  758. if (pte_devmap(pte))
  759. return NULL;
  760. print_bad_pte(vma, addr, pte, NULL);
  761. return NULL;
  762. }
  763. /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
  764. if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  765. if (vma->vm_flags & VM_MIXEDMAP) {
  766. if (!pfn_valid(pfn))
  767. return NULL;
  768. goto out;
  769. } else {
  770. unsigned long off;
  771. off = (addr - vma->vm_start) >> PAGE_SHIFT;
  772. if (pfn == vma->vm_pgoff + off)
  773. return NULL;
  774. if (!is_cow_mapping(vma->vm_flags))
  775. return NULL;
  776. }
  777. }
  778. if (is_zero_pfn(pfn))
  779. return NULL;
  780. check_pfn:
  781. if (unlikely(pfn > highest_memmap_pfn)) {
  782. print_bad_pte(vma, addr, pte, NULL);
  783. return NULL;
  784. }
  785. /*
  786. * NOTE! We still have PageReserved() pages in the page tables.
  787. * eg. VDSO mappings can cause them to exist.
  788. */
  789. out:
  790. return pfn_to_page(pfn);
  791. }
  792. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  793. struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
  794. pmd_t pmd)
  795. {
  796. unsigned long pfn = pmd_pfn(pmd);
  797. /*
  798. * There is no pmd_special() but there may be special pmds, e.g.
  799. * in a direct-access (dax) mapping, so let's just replicate the
  800. * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
  801. */
  802. if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  803. if (vma->vm_flags & VM_MIXEDMAP) {
  804. if (!pfn_valid(pfn))
  805. return NULL;
  806. goto out;
  807. } else {
  808. unsigned long off;
  809. off = (addr - vma->vm_start) >> PAGE_SHIFT;
  810. if (pfn == vma->vm_pgoff + off)
  811. return NULL;
  812. if (!is_cow_mapping(vma->vm_flags))
  813. return NULL;
  814. }
  815. }
  816. if (pmd_devmap(pmd))
  817. return NULL;
  818. if (is_zero_pfn(pfn))
  819. return NULL;
  820. if (unlikely(pfn > highest_memmap_pfn))
  821. return NULL;
  822. /*
  823. * NOTE! We still have PageReserved() pages in the page tables.
  824. * eg. VDSO mappings can cause them to exist.
  825. */
  826. out:
  827. return pfn_to_page(pfn);
  828. }
  829. #endif
  830. /*
  831. * copy one vm_area from one task to the other. Assumes the page tables
  832. * already present in the new task to be cleared in the whole range
  833. * covered by this vma.
  834. */
  835. static inline unsigned long
  836. copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  837. pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
  838. unsigned long addr, int *rss)
  839. {
  840. unsigned long vm_flags = vma->vm_flags;
  841. pte_t pte = *src_pte;
  842. struct page *page;
  843. /* pte contains position in swap or file, so copy. */
  844. if (unlikely(!pte_present(pte))) {
  845. swp_entry_t entry = pte_to_swp_entry(pte);
  846. if (likely(!non_swap_entry(entry))) {
  847. if (swap_duplicate(entry) < 0)
  848. return entry.val;
  849. /* make sure dst_mm is on swapoff's mmlist. */
  850. if (unlikely(list_empty(&dst_mm->mmlist))) {
  851. spin_lock(&mmlist_lock);
  852. if (list_empty(&dst_mm->mmlist))
  853. list_add(&dst_mm->mmlist,
  854. &src_mm->mmlist);
  855. spin_unlock(&mmlist_lock);
  856. }
  857. rss[MM_SWAPENTS]++;
  858. } else if (is_migration_entry(entry)) {
  859. page = migration_entry_to_page(entry);
  860. rss[mm_counter(page)]++;
  861. if (is_write_migration_entry(entry) &&
  862. is_cow_mapping(vm_flags)) {
  863. /*
  864. * COW mappings require pages in both
  865. * parent and child to be set to read.
  866. */
  867. make_migration_entry_read(&entry);
  868. pte = swp_entry_to_pte(entry);
  869. if (pte_swp_soft_dirty(*src_pte))
  870. pte = pte_swp_mksoft_dirty(pte);
  871. set_pte_at(src_mm, addr, src_pte, pte);
  872. }
  873. } else if (is_device_private_entry(entry)) {
  874. page = device_private_entry_to_page(entry);
  875. /*
  876. * Update rss count even for unaddressable pages, as
  877. * they should treated just like normal pages in this
  878. * respect.
  879. *
  880. * We will likely want to have some new rss counters
  881. * for unaddressable pages, at some point. But for now
  882. * keep things as they are.
  883. */
  884. get_page(page);
  885. rss[mm_counter(page)]++;
  886. page_dup_rmap(page, false);
  887. /*
  888. * We do not preserve soft-dirty information, because so
  889. * far, checkpoint/restore is the only feature that
  890. * requires that. And checkpoint/restore does not work
  891. * when a device driver is involved (you cannot easily
  892. * save and restore device driver state).
  893. */
  894. if (is_write_device_private_entry(entry) &&
  895. is_cow_mapping(vm_flags)) {
  896. make_device_private_entry_read(&entry);
  897. pte = swp_entry_to_pte(entry);
  898. set_pte_at(src_mm, addr, src_pte, pte);
  899. }
  900. }
  901. goto out_set_pte;
  902. }
  903. /*
  904. * If it's a COW mapping, write protect it both
  905. * in the parent and the child
  906. */
  907. if (is_cow_mapping(vm_flags)) {
  908. ptep_set_wrprotect(src_mm, addr, src_pte);
  909. pte = pte_wrprotect(pte);
  910. }
  911. /*
  912. * If it's a shared mapping, mark it clean in
  913. * the child
  914. */
  915. if (vm_flags & VM_SHARED)
  916. pte = pte_mkclean(pte);
  917. pte = pte_mkold(pte);
  918. page = vm_normal_page(vma, addr, pte);
  919. if (page) {
  920. get_page(page);
  921. page_dup_rmap(page, false);
  922. rss[mm_counter(page)]++;
  923. } else if (pte_devmap(pte)) {
  924. page = pte_page(pte);
  925. /*
  926. * Cache coherent device memory behave like regular page and
  927. * not like persistent memory page. For more informations see
  928. * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
  929. */
  930. if (is_device_public_page(page)) {
  931. get_page(page);
  932. page_dup_rmap(page, false);
  933. rss[mm_counter(page)]++;
  934. }
  935. }
  936. out_set_pte:
  937. set_pte_at(dst_mm, addr, dst_pte, pte);
  938. return 0;
  939. }
  940. static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  941. pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
  942. unsigned long addr, unsigned long end)
  943. {
  944. pte_t *orig_src_pte, *orig_dst_pte;
  945. pte_t *src_pte, *dst_pte;
  946. spinlock_t *src_ptl, *dst_ptl;
  947. int progress = 0;
  948. int rss[NR_MM_COUNTERS];
  949. swp_entry_t entry = (swp_entry_t){0};
  950. again:
  951. init_rss_vec(rss);
  952. dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
  953. if (!dst_pte)
  954. return -ENOMEM;
  955. src_pte = pte_offset_map(src_pmd, addr);
  956. src_ptl = pte_lockptr(src_mm, src_pmd);
  957. spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
  958. orig_src_pte = src_pte;
  959. orig_dst_pte = dst_pte;
  960. arch_enter_lazy_mmu_mode();
  961. do {
  962. /*
  963. * We are holding two locks at this point - either of them
  964. * could generate latencies in another task on another CPU.
  965. */
  966. if (progress >= 32) {
  967. progress = 0;
  968. if (need_resched() ||
  969. spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
  970. break;
  971. }
  972. if (pte_none(*src_pte)) {
  973. progress++;
  974. continue;
  975. }
  976. entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
  977. vma, addr, rss);
  978. if (entry.val)
  979. break;
  980. progress += 8;
  981. } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
  982. arch_leave_lazy_mmu_mode();
  983. spin_unlock(src_ptl);
  984. pte_unmap(orig_src_pte);
  985. add_mm_rss_vec(dst_mm, rss);
  986. pte_unmap_unlock(orig_dst_pte, dst_ptl);
  987. cond_resched();
  988. if (entry.val) {
  989. if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
  990. return -ENOMEM;
  991. progress = 0;
  992. }
  993. if (addr != end)
  994. goto again;
  995. return 0;
  996. }
  997. static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  998. pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
  999. unsigned long addr, unsigned long end)
  1000. {
  1001. pmd_t *src_pmd, *dst_pmd;
  1002. unsigned long next;
  1003. dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
  1004. if (!dst_pmd)
  1005. return -ENOMEM;
  1006. src_pmd = pmd_offset(src_pud, addr);
  1007. do {
  1008. next = pmd_addr_end(addr, end);
  1009. if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
  1010. || pmd_devmap(*src_pmd)) {
  1011. int err;
  1012. VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
  1013. err = copy_huge_pmd(dst_mm, src_mm,
  1014. dst_pmd, src_pmd, addr, vma);
  1015. if (err == -ENOMEM)
  1016. return -ENOMEM;
  1017. if (!err)
  1018. continue;
  1019. /* fall through */
  1020. }
  1021. if (pmd_none_or_clear_bad(src_pmd))
  1022. continue;
  1023. if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
  1024. vma, addr, next))
  1025. return -ENOMEM;
  1026. } while (dst_pmd++, src_pmd++, addr = next, addr != end);
  1027. return 0;
  1028. }
  1029. static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  1030. p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
  1031. unsigned long addr, unsigned long end)
  1032. {
  1033. pud_t *src_pud, *dst_pud;
  1034. unsigned long next;
  1035. dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
  1036. if (!dst_pud)
  1037. return -ENOMEM;
  1038. src_pud = pud_offset(src_p4d, addr);
  1039. do {
  1040. next = pud_addr_end(addr, end);
  1041. if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
  1042. int err;
  1043. VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
  1044. err = copy_huge_pud(dst_mm, src_mm,
  1045. dst_pud, src_pud, addr, vma);
  1046. if (err == -ENOMEM)
  1047. return -ENOMEM;
  1048. if (!err)
  1049. continue;
  1050. /* fall through */
  1051. }
  1052. if (pud_none_or_clear_bad(src_pud))
  1053. continue;
  1054. if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
  1055. vma, addr, next))
  1056. return -ENOMEM;
  1057. } while (dst_pud++, src_pud++, addr = next, addr != end);
  1058. return 0;
  1059. }
  1060. static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  1061. pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
  1062. unsigned long addr, unsigned long end)
  1063. {
  1064. p4d_t *src_p4d, *dst_p4d;
  1065. unsigned long next;
  1066. dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
  1067. if (!dst_p4d)
  1068. return -ENOMEM;
  1069. src_p4d = p4d_offset(src_pgd, addr);
  1070. do {
  1071. next = p4d_addr_end(addr, end);
  1072. if (p4d_none_or_clear_bad(src_p4d))
  1073. continue;
  1074. if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
  1075. vma, addr, next))
  1076. return -ENOMEM;
  1077. } while (dst_p4d++, src_p4d++, addr = next, addr != end);
  1078. return 0;
  1079. }
  1080. int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  1081. struct vm_area_struct *vma)
  1082. {
  1083. pgd_t *src_pgd, *dst_pgd;
  1084. unsigned long next;
  1085. unsigned long addr = vma->vm_start;
  1086. unsigned long end = vma->vm_end;
  1087. unsigned long mmun_start; /* For mmu_notifiers */
  1088. unsigned long mmun_end; /* For mmu_notifiers */
  1089. bool is_cow;
  1090. int ret;
  1091. /*
  1092. * Don't copy ptes where a page fault will fill them correctly.
  1093. * Fork becomes much lighter when there are big shared or private
  1094. * readonly mappings. The tradeoff is that copy_page_range is more
  1095. * efficient than faulting.
  1096. */
  1097. if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
  1098. !vma->anon_vma)
  1099. return 0;
  1100. if (is_vm_hugetlb_page(vma))
  1101. return copy_hugetlb_page_range(dst_mm, src_mm, vma);
  1102. if (unlikely(vma->vm_flags & VM_PFNMAP)) {
  1103. /*
  1104. * We do not free on error cases below as remove_vma
  1105. * gets called on error from higher level routine
  1106. */
  1107. ret = track_pfn_copy(vma);
  1108. if (ret)
  1109. return ret;
  1110. }
  1111. /*
  1112. * We need to invalidate the secondary MMU mappings only when
  1113. * there could be a permission downgrade on the ptes of the
  1114. * parent mm. And a permission downgrade will only happen if
  1115. * is_cow_mapping() returns true.
  1116. */
  1117. is_cow = is_cow_mapping(vma->vm_flags);
  1118. mmun_start = addr;
  1119. mmun_end = end;
  1120. if (is_cow)
  1121. mmu_notifier_invalidate_range_start(src_mm, mmun_start,
  1122. mmun_end);
  1123. ret = 0;
  1124. dst_pgd = pgd_offset(dst_mm, addr);
  1125. src_pgd = pgd_offset(src_mm, addr);
  1126. do {
  1127. next = pgd_addr_end(addr, end);
  1128. if (pgd_none_or_clear_bad(src_pgd))
  1129. continue;
  1130. if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
  1131. vma, addr, next))) {
  1132. ret = -ENOMEM;
  1133. break;
  1134. }
  1135. } while (dst_pgd++, src_pgd++, addr = next, addr != end);
  1136. if (is_cow)
  1137. mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
  1138. return ret;
  1139. }
  1140. static unsigned long zap_pte_range(struct mmu_gather *tlb,
  1141. struct vm_area_struct *vma, pmd_t *pmd,
  1142. unsigned long addr, unsigned long end,
  1143. struct zap_details *details)
  1144. {
  1145. struct mm_struct *mm = tlb->mm;
  1146. int force_flush = 0;
  1147. int rss[NR_MM_COUNTERS];
  1148. spinlock_t *ptl;
  1149. pte_t *start_pte;
  1150. pte_t *pte;
  1151. swp_entry_t entry;
  1152. tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
  1153. again:
  1154. init_rss_vec(rss);
  1155. start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  1156. pte = start_pte;
  1157. flush_tlb_batched_pending(mm);
  1158. arch_enter_lazy_mmu_mode();
  1159. do {
  1160. pte_t ptent = *pte;
  1161. if (pte_none(ptent))
  1162. continue;
  1163. if (pte_present(ptent)) {
  1164. struct page *page;
  1165. page = _vm_normal_page(vma, addr, ptent, true);
  1166. if (unlikely(details) && page) {
  1167. /*
  1168. * unmap_shared_mapping_pages() wants to
  1169. * invalidate cache without truncating:
  1170. * unmap shared but keep private pages.
  1171. */
  1172. if (details->check_mapping &&
  1173. details->check_mapping != page_rmapping(page))
  1174. continue;
  1175. }
  1176. ptent = ptep_get_and_clear_full(mm, addr, pte,
  1177. tlb->fullmm);
  1178. tlb_remove_tlb_entry(tlb, pte, addr);
  1179. if (unlikely(!page))
  1180. continue;
  1181. if (!PageAnon(page)) {
  1182. if (pte_dirty(ptent)) {
  1183. force_flush = 1;
  1184. set_page_dirty(page);
  1185. }
  1186. if (pte_young(ptent) &&
  1187. likely(!(vma->vm_flags & VM_SEQ_READ)))
  1188. mark_page_accessed(page);
  1189. }
  1190. rss[mm_counter(page)]--;
  1191. page_remove_rmap(page, false);
  1192. if (unlikely(page_mapcount(page) < 0))
  1193. print_bad_pte(vma, addr, ptent, page);
  1194. if (unlikely(__tlb_remove_page(tlb, page))) {
  1195. force_flush = 1;
  1196. addr += PAGE_SIZE;
  1197. break;
  1198. }
  1199. continue;
  1200. }
  1201. entry = pte_to_swp_entry(ptent);
  1202. if (non_swap_entry(entry) && is_device_private_entry(entry)) {
  1203. struct page *page = device_private_entry_to_page(entry);
  1204. if (unlikely(details && details->check_mapping)) {
  1205. /*
  1206. * unmap_shared_mapping_pages() wants to
  1207. * invalidate cache without truncating:
  1208. * unmap shared but keep private pages.
  1209. */
  1210. if (details->check_mapping !=
  1211. page_rmapping(page))
  1212. continue;
  1213. }
  1214. pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
  1215. rss[mm_counter(page)]--;
  1216. page_remove_rmap(page, false);
  1217. put_page(page);
  1218. continue;
  1219. }
  1220. /* If details->check_mapping, we leave swap entries. */
  1221. if (unlikely(details))
  1222. continue;
  1223. entry = pte_to_swp_entry(ptent);
  1224. if (!non_swap_entry(entry))
  1225. rss[MM_SWAPENTS]--;
  1226. else if (is_migration_entry(entry)) {
  1227. struct page *page;
  1228. page = migration_entry_to_page(entry);
  1229. rss[mm_counter(page)]--;
  1230. }
  1231. if (unlikely(!free_swap_and_cache(entry)))
  1232. print_bad_pte(vma, addr, ptent, NULL);
  1233. pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
  1234. } while (pte++, addr += PAGE_SIZE, addr != end);
  1235. add_mm_rss_vec(mm, rss);
  1236. arch_leave_lazy_mmu_mode();
  1237. /* Do the actual TLB flush before dropping ptl */
  1238. if (force_flush)
  1239. tlb_flush_mmu_tlbonly(tlb);
  1240. pte_unmap_unlock(start_pte, ptl);
  1241. /*
  1242. * If we forced a TLB flush (either due to running out of
  1243. * batch buffers or because we needed to flush dirty TLB
  1244. * entries before releasing the ptl), free the batched
  1245. * memory too. Restart if we didn't do everything.
  1246. */
  1247. if (force_flush) {
  1248. force_flush = 0;
  1249. tlb_flush_mmu_free(tlb);
  1250. if (addr != end)
  1251. goto again;
  1252. }
  1253. return addr;
  1254. }
  1255. static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
  1256. struct vm_area_struct *vma, pud_t *pud,
  1257. unsigned long addr, unsigned long end,
  1258. struct zap_details *details)
  1259. {
  1260. pmd_t *pmd;
  1261. unsigned long next;
  1262. pmd = pmd_offset(pud, addr);
  1263. do {
  1264. next = pmd_addr_end(addr, end);
  1265. if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
  1266. if (next - addr != HPAGE_PMD_SIZE)
  1267. __split_huge_pmd(vma, pmd, addr, false, NULL);
  1268. else if (zap_huge_pmd(tlb, vma, pmd, addr))
  1269. goto next;
  1270. /* fall through */
  1271. }
  1272. /*
  1273. * Here there can be other concurrent MADV_DONTNEED or
  1274. * trans huge page faults running, and if the pmd is
  1275. * none or trans huge it can change under us. This is
  1276. * because MADV_DONTNEED holds the mmap_sem in read
  1277. * mode.
  1278. */
  1279. if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  1280. goto next;
  1281. next = zap_pte_range(tlb, vma, pmd, addr, next, details);
  1282. next:
  1283. cond_resched();
  1284. } while (pmd++, addr = next, addr != end);
  1285. return addr;
  1286. }
  1287. static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
  1288. struct vm_area_struct *vma, p4d_t *p4d,
  1289. unsigned long addr, unsigned long end,
  1290. struct zap_details *details)
  1291. {
  1292. pud_t *pud;
  1293. unsigned long next;
  1294. pud = pud_offset(p4d, addr);
  1295. do {
  1296. next = pud_addr_end(addr, end);
  1297. if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
  1298. if (next - addr != HPAGE_PUD_SIZE) {
  1299. VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
  1300. split_huge_pud(vma, pud, addr);
  1301. } else if (zap_huge_pud(tlb, vma, pud, addr))
  1302. goto next;
  1303. /* fall through */
  1304. }
  1305. if (pud_none_or_clear_bad(pud))
  1306. continue;
  1307. next = zap_pmd_range(tlb, vma, pud, addr, next, details);
  1308. next:
  1309. cond_resched();
  1310. } while (pud++, addr = next, addr != end);
  1311. return addr;
  1312. }
  1313. static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
  1314. struct vm_area_struct *vma, pgd_t *pgd,
  1315. unsigned long addr, unsigned long end,
  1316. struct zap_details *details)
  1317. {
  1318. p4d_t *p4d;
  1319. unsigned long next;
  1320. p4d = p4d_offset(pgd, addr);
  1321. do {
  1322. next = p4d_addr_end(addr, end);
  1323. if (p4d_none_or_clear_bad(p4d))
  1324. continue;
  1325. next = zap_pud_range(tlb, vma, p4d, addr, next, details);
  1326. } while (p4d++, addr = next, addr != end);
  1327. return addr;
  1328. }
  1329. void unmap_page_range(struct mmu_gather *tlb,
  1330. struct vm_area_struct *vma,
  1331. unsigned long addr, unsigned long end,
  1332. struct zap_details *details)
  1333. {
  1334. pgd_t *pgd;
  1335. unsigned long next;
  1336. BUG_ON(addr >= end);
  1337. tlb_start_vma(tlb, vma);
  1338. pgd = pgd_offset(vma->vm_mm, addr);
  1339. do {
  1340. next = pgd_addr_end(addr, end);
  1341. if (pgd_none_or_clear_bad(pgd))
  1342. continue;
  1343. next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
  1344. } while (pgd++, addr = next, addr != end);
  1345. tlb_end_vma(tlb, vma);
  1346. }
  1347. static void unmap_single_vma(struct mmu_gather *tlb,
  1348. struct vm_area_struct *vma, unsigned long start_addr,
  1349. unsigned long end_addr,
  1350. struct zap_details *details)
  1351. {
  1352. unsigned long start = max(vma->vm_start, start_addr);
  1353. unsigned long end;
  1354. if (start >= vma->vm_end)
  1355. return;
  1356. end = min(vma->vm_end, end_addr);
  1357. if (end <= vma->vm_start)
  1358. return;
  1359. if (vma->vm_file)
  1360. uprobe_munmap(vma, start, end);
  1361. if (unlikely(vma->vm_flags & VM_PFNMAP))
  1362. untrack_pfn(vma, 0, 0);
  1363. if (start != end) {
  1364. if (unlikely(is_vm_hugetlb_page(vma))) {
  1365. /*
  1366. * It is undesirable to test vma->vm_file as it
  1367. * should be non-null for valid hugetlb area.
  1368. * However, vm_file will be NULL in the error
  1369. * cleanup path of mmap_region. When
  1370. * hugetlbfs ->mmap method fails,
  1371. * mmap_region() nullifies vma->vm_file
  1372. * before calling this function to clean up.
  1373. * Since no pte has actually been setup, it is
  1374. * safe to do nothing in this case.
  1375. */
  1376. if (vma->vm_file) {
  1377. i_mmap_lock_write(vma->vm_file->f_mapping);
  1378. __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
  1379. i_mmap_unlock_write(vma->vm_file->f_mapping);
  1380. }
  1381. } else
  1382. unmap_page_range(tlb, vma, start, end, details);
  1383. }
  1384. }
  1385. /**
  1386. * unmap_vmas - unmap a range of memory covered by a list of vma's
  1387. * @tlb: address of the caller's struct mmu_gather
  1388. * @vma: the starting vma
  1389. * @start_addr: virtual address at which to start unmapping
  1390. * @end_addr: virtual address at which to end unmapping
  1391. *
  1392. * Unmap all pages in the vma list.
  1393. *
  1394. * Only addresses between `start' and `end' will be unmapped.
  1395. *
  1396. * The VMA list must be sorted in ascending virtual address order.
  1397. *
  1398. * unmap_vmas() assumes that the caller will flush the whole unmapped address
  1399. * range after unmap_vmas() returns. So the only responsibility here is to
  1400. * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
  1401. * drops the lock and schedules.
  1402. */
  1403. void unmap_vmas(struct mmu_gather *tlb,
  1404. struct vm_area_struct *vma, unsigned long start_addr,
  1405. unsigned long end_addr)
  1406. {
  1407. struct mm_struct *mm = vma->vm_mm;
  1408. mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
  1409. for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
  1410. unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
  1411. mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
  1412. }
  1413. /**
  1414. * zap_page_range - remove user pages in a given range
  1415. * @vma: vm_area_struct holding the applicable pages
  1416. * @start: starting address of pages to zap
  1417. * @size: number of bytes to zap
  1418. *
  1419. * Caller must protect the VMA list
  1420. */
  1421. void zap_page_range(struct vm_area_struct *vma, unsigned long start,
  1422. unsigned long size)
  1423. {
  1424. struct mm_struct *mm = vma->vm_mm;
  1425. struct mmu_gather tlb;
  1426. unsigned long end = start + size;
  1427. lru_add_drain();
  1428. tlb_gather_mmu(&tlb, mm, start, end);
  1429. update_hiwater_rss(mm);
  1430. mmu_notifier_invalidate_range_start(mm, start, end);
  1431. for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
  1432. unmap_single_vma(&tlb, vma, start, end, NULL);
  1433. mmu_notifier_invalidate_range_end(mm, start, end);
  1434. tlb_finish_mmu(&tlb, start, end);
  1435. }
  1436. /**
  1437. * zap_page_range_single - remove user pages in a given range
  1438. * @vma: vm_area_struct holding the applicable pages
  1439. * @address: starting address of pages to zap
  1440. * @size: number of bytes to zap
  1441. * @details: details of shared cache invalidation
  1442. *
  1443. * The range must fit into one VMA.
  1444. */
  1445. static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
  1446. unsigned long size, struct zap_details *details)
  1447. {
  1448. struct mm_struct *mm = vma->vm_mm;
  1449. struct mmu_gather tlb;
  1450. unsigned long end = address + size;
  1451. lru_add_drain();
  1452. tlb_gather_mmu(&tlb, mm, address, end);
  1453. update_hiwater_rss(mm);
  1454. mmu_notifier_invalidate_range_start(mm, address, end);
  1455. unmap_single_vma(&tlb, vma, address, end, details);
  1456. mmu_notifier_invalidate_range_end(mm, address, end);
  1457. tlb_finish_mmu(&tlb, address, end);
  1458. }
  1459. /**
  1460. * zap_vma_ptes - remove ptes mapping the vma
  1461. * @vma: vm_area_struct holding ptes to be zapped
  1462. * @address: starting address of pages to zap
  1463. * @size: number of bytes to zap
  1464. *
  1465. * This function only unmaps ptes assigned to VM_PFNMAP vmas.
  1466. *
  1467. * The entire address range must be fully contained within the vma.
  1468. *
  1469. */
  1470. void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
  1471. unsigned long size)
  1472. {
  1473. if (address < vma->vm_start || address + size > vma->vm_end ||
  1474. !(vma->vm_flags & VM_PFNMAP))
  1475. return;
  1476. zap_page_range_single(vma, address, size, NULL);
  1477. }
  1478. EXPORT_SYMBOL_GPL(zap_vma_ptes);
  1479. pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
  1480. spinlock_t **ptl)
  1481. {
  1482. pgd_t *pgd;
  1483. p4d_t *p4d;
  1484. pud_t *pud;
  1485. pmd_t *pmd;
  1486. pgd = pgd_offset(mm, addr);
  1487. p4d = p4d_alloc(mm, pgd, addr);
  1488. if (!p4d)
  1489. return NULL;
  1490. pud = pud_alloc(mm, p4d, addr);
  1491. if (!pud)
  1492. return NULL;
  1493. pmd = pmd_alloc(mm, pud, addr);
  1494. if (!pmd)
  1495. return NULL;
  1496. VM_BUG_ON(pmd_trans_huge(*pmd));
  1497. return pte_alloc_map_lock(mm, pmd, addr, ptl);
  1498. }
  1499. /*
  1500. * This is the old fallback for page remapping.
  1501. *
  1502. * For historical reasons, it only allows reserved pages. Only
  1503. * old drivers should use this, and they needed to mark their
  1504. * pages reserved for the old functions anyway.
  1505. */
  1506. static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  1507. struct page *page, pgprot_t prot)
  1508. {
  1509. struct mm_struct *mm = vma->vm_mm;
  1510. int retval;
  1511. pte_t *pte;
  1512. spinlock_t *ptl;
  1513. retval = -EINVAL;
  1514. if (PageAnon(page))
  1515. goto out;
  1516. retval = -ENOMEM;
  1517. flush_dcache_page(page);
  1518. pte = get_locked_pte(mm, addr, &ptl);
  1519. if (!pte)
  1520. goto out;
  1521. retval = -EBUSY;
  1522. if (!pte_none(*pte))
  1523. goto out_unlock;
  1524. /* Ok, finally just insert the thing.. */
  1525. get_page(page);
  1526. inc_mm_counter_fast(mm, mm_counter_file(page));
  1527. page_add_file_rmap(page, false);
  1528. set_pte_at(mm, addr, pte, mk_pte(page, prot));
  1529. retval = 0;
  1530. pte_unmap_unlock(pte, ptl);
  1531. return retval;
  1532. out_unlock:
  1533. pte_unmap_unlock(pte, ptl);
  1534. out:
  1535. return retval;
  1536. }
  1537. /**
  1538. * vm_insert_page - insert single page into user vma
  1539. * @vma: user vma to map to
  1540. * @addr: target user address of this page
  1541. * @page: source kernel page
  1542. *
  1543. * This allows drivers to insert individual pages they've allocated
  1544. * into a user vma.
  1545. *
  1546. * The page has to be a nice clean _individual_ kernel allocation.
  1547. * If you allocate a compound page, you need to have marked it as
  1548. * such (__GFP_COMP), or manually just split the page up yourself
  1549. * (see split_page()).
  1550. *
  1551. * NOTE! Traditionally this was done with "remap_pfn_range()" which
  1552. * took an arbitrary page protection parameter. This doesn't allow
  1553. * that. Your vma protection will have to be set up correctly, which
  1554. * means that if you want a shared writable mapping, you'd better
  1555. * ask for a shared writable mapping!
  1556. *
  1557. * The page does not need to be reserved.
  1558. *
  1559. * Usually this function is called from f_op->mmap() handler
  1560. * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
  1561. * Caller must set VM_MIXEDMAP on vma if it wants to call this
  1562. * function from other places, for example from page-fault handler.
  1563. */
  1564. int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  1565. struct page *page)
  1566. {
  1567. if (addr < vma->vm_start || addr >= vma->vm_end)
  1568. return -EFAULT;
  1569. if (!page_count(page))
  1570. return -EINVAL;
  1571. if (!(vma->vm_flags & VM_MIXEDMAP)) {
  1572. BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
  1573. BUG_ON(vma->vm_flags & VM_PFNMAP);
  1574. vma->vm_flags |= VM_MIXEDMAP;
  1575. }
  1576. return insert_page(vma, addr, page, vma->vm_page_prot);
  1577. }
  1578. EXPORT_SYMBOL(vm_insert_page);
  1579. static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  1580. pfn_t pfn, pgprot_t prot, bool mkwrite)
  1581. {
  1582. struct mm_struct *mm = vma->vm_mm;
  1583. int retval;
  1584. pte_t *pte, entry;
  1585. spinlock_t *ptl;
  1586. retval = -ENOMEM;
  1587. pte = get_locked_pte(mm, addr, &ptl);
  1588. if (!pte)
  1589. goto out;
  1590. retval = -EBUSY;
  1591. if (!pte_none(*pte)) {
  1592. if (mkwrite) {
  1593. /*
  1594. * For read faults on private mappings the PFN passed
  1595. * in may not match the PFN we have mapped if the
  1596. * mapped PFN is a writeable COW page. In the mkwrite
  1597. * case we are creating a writable PTE for a shared
  1598. * mapping and we expect the PFNs to match.
  1599. */
  1600. if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
  1601. goto out_unlock;
  1602. entry = *pte;
  1603. goto out_mkwrite;
  1604. } else
  1605. goto out_unlock;
  1606. }
  1607. /* Ok, finally just insert the thing.. */
  1608. if (pfn_t_devmap(pfn))
  1609. entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
  1610. else
  1611. entry = pte_mkspecial(pfn_t_pte(pfn, prot));
  1612. out_mkwrite:
  1613. if (mkwrite) {
  1614. entry = pte_mkyoung(entry);
  1615. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1616. }
  1617. set_pte_at(mm, addr, pte, entry);
  1618. update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
  1619. retval = 0;
  1620. out_unlock:
  1621. pte_unmap_unlock(pte, ptl);
  1622. out:
  1623. return retval;
  1624. }
  1625. /**
  1626. * vm_insert_pfn - insert single pfn into user vma
  1627. * @vma: user vma to map to
  1628. * @addr: target user address of this page
  1629. * @pfn: source kernel pfn
  1630. *
  1631. * Similar to vm_insert_page, this allows drivers to insert individual pages
  1632. * they've allocated into a user vma. Same comments apply.
  1633. *
  1634. * This function should only be called from a vm_ops->fault handler, and
  1635. * in that case the handler should return NULL.
  1636. *
  1637. * vma cannot be a COW mapping.
  1638. *
  1639. * As this is called only for pages that do not currently exist, we
  1640. * do not need to flush old virtual caches or the TLB.
  1641. */
  1642. int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  1643. unsigned long pfn)
  1644. {
  1645. return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
  1646. }
  1647. EXPORT_SYMBOL(vm_insert_pfn);
  1648. /**
  1649. * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
  1650. * @vma: user vma to map to
  1651. * @addr: target user address of this page
  1652. * @pfn: source kernel pfn
  1653. * @pgprot: pgprot flags for the inserted page
  1654. *
  1655. * This is exactly like vm_insert_pfn, except that it allows drivers to
  1656. * to override pgprot on a per-page basis.
  1657. *
  1658. * This only makes sense for IO mappings, and it makes no sense for
  1659. * cow mappings. In general, using multiple vmas is preferable;
  1660. * vm_insert_pfn_prot should only be used if using multiple VMAs is
  1661. * impractical.
  1662. */
  1663. int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
  1664. unsigned long pfn, pgprot_t pgprot)
  1665. {
  1666. int ret;
  1667. /*
  1668. * Technically, architectures with pte_special can avoid all these
  1669. * restrictions (same for remap_pfn_range). However we would like
  1670. * consistency in testing and feature parity among all, so we should
  1671. * try to keep these invariants in place for everybody.
  1672. */
  1673. BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
  1674. BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
  1675. (VM_PFNMAP|VM_MIXEDMAP));
  1676. BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
  1677. BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
  1678. if (addr < vma->vm_start || addr >= vma->vm_end)
  1679. return -EFAULT;
  1680. if (!pfn_modify_allowed(pfn, pgprot))
  1681. return -EACCES;
  1682. track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
  1683. ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
  1684. false);
  1685. return ret;
  1686. }
  1687. EXPORT_SYMBOL(vm_insert_pfn_prot);
  1688. static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
  1689. {
  1690. /* these checks mirror the abort conditions in vm_normal_page */
  1691. if (vma->vm_flags & VM_MIXEDMAP)
  1692. return true;
  1693. if (pfn_t_devmap(pfn))
  1694. return true;
  1695. if (pfn_t_special(pfn))
  1696. return true;
  1697. if (is_zero_pfn(pfn_t_to_pfn(pfn)))
  1698. return true;
  1699. return false;
  1700. }
  1701. static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  1702. pfn_t pfn, bool mkwrite)
  1703. {
  1704. pgprot_t pgprot = vma->vm_page_prot;
  1705. BUG_ON(!vm_mixed_ok(vma, pfn));
  1706. if (addr < vma->vm_start || addr >= vma->vm_end)
  1707. return -EFAULT;
  1708. track_pfn_insert(vma, &pgprot, pfn);
  1709. if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
  1710. return -EACCES;
  1711. /*
  1712. * If we don't have pte special, then we have to use the pfn_valid()
  1713. * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
  1714. * refcount the page if pfn_valid is true (hence insert_page rather
  1715. * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
  1716. * without pte special, it would there be refcounted as a normal page.
  1717. */
  1718. if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
  1719. !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
  1720. struct page *page;
  1721. /*
  1722. * At this point we are committed to insert_page()
  1723. * regardless of whether the caller specified flags that
  1724. * result in pfn_t_has_page() == false.
  1725. */
  1726. page = pfn_to_page(pfn_t_to_pfn(pfn));
  1727. return insert_page(vma, addr, page, pgprot);
  1728. }
  1729. return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
  1730. }
  1731. int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  1732. pfn_t pfn)
  1733. {
  1734. return __vm_insert_mixed(vma, addr, pfn, false);
  1735. }
  1736. EXPORT_SYMBOL(vm_insert_mixed);
  1737. /*
  1738. * If the insertion of PTE failed because someone else already added a
  1739. * different entry in the mean time, we treat that as success as we assume
  1740. * the same entry was actually inserted.
  1741. */
  1742. vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
  1743. unsigned long addr, pfn_t pfn)
  1744. {
  1745. int err;
  1746. err = __vm_insert_mixed(vma, addr, pfn, true);
  1747. if (err == -ENOMEM)
  1748. return VM_FAULT_OOM;
  1749. if (err < 0 && err != -EBUSY)
  1750. return VM_FAULT_SIGBUS;
  1751. return VM_FAULT_NOPAGE;
  1752. }
  1753. EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
  1754. /*
  1755. * maps a range of physical memory into the requested pages. the old
  1756. * mappings are removed. any references to nonexistent pages results
  1757. * in null mappings (currently treated as "copy-on-access")
  1758. */
  1759. static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  1760. unsigned long addr, unsigned long end,
  1761. unsigned long pfn, pgprot_t prot)
  1762. {
  1763. pte_t *pte;
  1764. spinlock_t *ptl;
  1765. int err = 0;
  1766. pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
  1767. if (!pte)
  1768. return -ENOMEM;
  1769. arch_enter_lazy_mmu_mode();
  1770. do {
  1771. BUG_ON(!pte_none(*pte));
  1772. if (!pfn_modify_allowed(pfn, prot)) {
  1773. err = -EACCES;
  1774. break;
  1775. }
  1776. set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
  1777. pfn++;
  1778. } while (pte++, addr += PAGE_SIZE, addr != end);
  1779. arch_leave_lazy_mmu_mode();
  1780. pte_unmap_unlock(pte - 1, ptl);
  1781. return err;
  1782. }
  1783. static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
  1784. unsigned long addr, unsigned long end,
  1785. unsigned long pfn, pgprot_t prot)
  1786. {
  1787. pmd_t *pmd;
  1788. unsigned long next;
  1789. int err;
  1790. pfn -= addr >> PAGE_SHIFT;
  1791. pmd = pmd_alloc(mm, pud, addr);
  1792. if (!pmd)
  1793. return -ENOMEM;
  1794. VM_BUG_ON(pmd_trans_huge(*pmd));
  1795. do {
  1796. next = pmd_addr_end(addr, end);
  1797. err = remap_pte_range(mm, pmd, addr, next,
  1798. pfn + (addr >> PAGE_SHIFT), prot);
  1799. if (err)
  1800. return err;
  1801. } while (pmd++, addr = next, addr != end);
  1802. return 0;
  1803. }
  1804. static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
  1805. unsigned long addr, unsigned long end,
  1806. unsigned long pfn, pgprot_t prot)
  1807. {
  1808. pud_t *pud;
  1809. unsigned long next;
  1810. int err;
  1811. pfn -= addr >> PAGE_SHIFT;
  1812. pud = pud_alloc(mm, p4d, addr);
  1813. if (!pud)
  1814. return -ENOMEM;
  1815. do {
  1816. next = pud_addr_end(addr, end);
  1817. err = remap_pmd_range(mm, pud, addr, next,
  1818. pfn + (addr >> PAGE_SHIFT), prot);
  1819. if (err)
  1820. return err;
  1821. } while (pud++, addr = next, addr != end);
  1822. return 0;
  1823. }
  1824. static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
  1825. unsigned long addr, unsigned long end,
  1826. unsigned long pfn, pgprot_t prot)
  1827. {
  1828. p4d_t *p4d;
  1829. unsigned long next;
  1830. int err;
  1831. pfn -= addr >> PAGE_SHIFT;
  1832. p4d = p4d_alloc(mm, pgd, addr);
  1833. if (!p4d)
  1834. return -ENOMEM;
  1835. do {
  1836. next = p4d_addr_end(addr, end);
  1837. err = remap_pud_range(mm, p4d, addr, next,
  1838. pfn + (addr >> PAGE_SHIFT), prot);
  1839. if (err)
  1840. return err;
  1841. } while (p4d++, addr = next, addr != end);
  1842. return 0;
  1843. }
  1844. /**
  1845. * remap_pfn_range - remap kernel memory to userspace
  1846. * @vma: user vma to map to
  1847. * @addr: target user address to start at
  1848. * @pfn: physical address of kernel memory
  1849. * @size: size of map area
  1850. * @prot: page protection flags for this mapping
  1851. *
  1852. * Note: this is only safe if the mm semaphore is held when called.
  1853. */
  1854. int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
  1855. unsigned long pfn, unsigned long size, pgprot_t prot)
  1856. {
  1857. pgd_t *pgd;
  1858. unsigned long next;
  1859. unsigned long end = addr + PAGE_ALIGN(size);
  1860. struct mm_struct *mm = vma->vm_mm;
  1861. unsigned long remap_pfn = pfn;
  1862. int err;
  1863. /*
  1864. * Physically remapped pages are special. Tell the
  1865. * rest of the world about it:
  1866. * VM_IO tells people not to look at these pages
  1867. * (accesses can have side effects).
  1868. * VM_PFNMAP tells the core MM that the base pages are just
  1869. * raw PFN mappings, and do not have a "struct page" associated
  1870. * with them.
  1871. * VM_DONTEXPAND
  1872. * Disable vma merging and expanding with mremap().
  1873. * VM_DONTDUMP
  1874. * Omit vma from core dump, even when VM_IO turned off.
  1875. *
  1876. * There's a horrible special case to handle copy-on-write
  1877. * behaviour that some programs depend on. We mark the "original"
  1878. * un-COW'ed pages by matching them up with "vma->vm_pgoff".
  1879. * See vm_normal_page() for details.
  1880. */
  1881. if (is_cow_mapping(vma->vm_flags)) {
  1882. if (addr != vma->vm_start || end != vma->vm_end)
  1883. return -EINVAL;
  1884. vma->vm_pgoff = pfn;
  1885. }
  1886. err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
  1887. if (err)
  1888. return -EINVAL;
  1889. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
  1890. BUG_ON(addr >= end);
  1891. pfn -= addr >> PAGE_SHIFT;
  1892. pgd = pgd_offset(mm, addr);
  1893. flush_cache_range(vma, addr, end);
  1894. do {
  1895. next = pgd_addr_end(addr, end);
  1896. err = remap_p4d_range(mm, pgd, addr, next,
  1897. pfn + (addr >> PAGE_SHIFT), prot);
  1898. if (err)
  1899. break;
  1900. } while (pgd++, addr = next, addr != end);
  1901. if (err)
  1902. untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
  1903. return err;
  1904. }
  1905. EXPORT_SYMBOL(remap_pfn_range);
  1906. /**
  1907. * vm_iomap_memory - remap memory to userspace
  1908. * @vma: user vma to map to
  1909. * @start: start of area
  1910. * @len: size of area
  1911. *
  1912. * This is a simplified io_remap_pfn_range() for common driver use. The
  1913. * driver just needs to give us the physical memory range to be mapped,
  1914. * we'll figure out the rest from the vma information.
  1915. *
  1916. * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
  1917. * whatever write-combining details or similar.
  1918. */
  1919. int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
  1920. {
  1921. unsigned long vm_len, pfn, pages;
  1922. /* Check that the physical memory area passed in looks valid */
  1923. if (start + len < start)
  1924. return -EINVAL;
  1925. /*
  1926. * You *really* shouldn't map things that aren't page-aligned,
  1927. * but we've historically allowed it because IO memory might
  1928. * just have smaller alignment.
  1929. */
  1930. len += start & ~PAGE_MASK;
  1931. pfn = start >> PAGE_SHIFT;
  1932. pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
  1933. if (pfn + pages < pfn)
  1934. return -EINVAL;
  1935. /* We start the mapping 'vm_pgoff' pages into the area */
  1936. if (vma->vm_pgoff > pages)
  1937. return -EINVAL;
  1938. pfn += vma->vm_pgoff;
  1939. pages -= vma->vm_pgoff;
  1940. /* Can we fit all of the mapping? */
  1941. vm_len = vma->vm_end - vma->vm_start;
  1942. if (vm_len >> PAGE_SHIFT > pages)
  1943. return -EINVAL;
  1944. /* Ok, let it rip */
  1945. return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
  1946. }
  1947. EXPORT_SYMBOL(vm_iomap_memory);
  1948. static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
  1949. unsigned long addr, unsigned long end,
  1950. pte_fn_t fn, void *data)
  1951. {
  1952. pte_t *pte;
  1953. int err;
  1954. pgtable_t token;
  1955. spinlock_t *uninitialized_var(ptl);
  1956. pte = (mm == &init_mm) ?
  1957. pte_alloc_kernel(pmd, addr) :
  1958. pte_alloc_map_lock(mm, pmd, addr, &ptl);
  1959. if (!pte)
  1960. return -ENOMEM;
  1961. BUG_ON(pmd_huge(*pmd));
  1962. arch_enter_lazy_mmu_mode();
  1963. token = pmd_pgtable(*pmd);
  1964. do {
  1965. err = fn(pte++, token, addr, data);
  1966. if (err)
  1967. break;
  1968. } while (addr += PAGE_SIZE, addr != end);
  1969. arch_leave_lazy_mmu_mode();
  1970. if (mm != &init_mm)
  1971. pte_unmap_unlock(pte-1, ptl);
  1972. return err;
  1973. }
  1974. static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  1975. unsigned long addr, unsigned long end,
  1976. pte_fn_t fn, void *data)
  1977. {
  1978. pmd_t *pmd;
  1979. unsigned long next;
  1980. int err;
  1981. BUG_ON(pud_huge(*pud));
  1982. pmd = pmd_alloc(mm, pud, addr);
  1983. if (!pmd)
  1984. return -ENOMEM;
  1985. do {
  1986. next = pmd_addr_end(addr, end);
  1987. err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
  1988. if (err)
  1989. break;
  1990. } while (pmd++, addr = next, addr != end);
  1991. return err;
  1992. }
  1993. static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
  1994. unsigned long addr, unsigned long end,
  1995. pte_fn_t fn, void *data)
  1996. {
  1997. pud_t *pud;
  1998. unsigned long next;
  1999. int err;
  2000. pud = pud_alloc(mm, p4d, addr);
  2001. if (!pud)
  2002. return -ENOMEM;
  2003. do {
  2004. next = pud_addr_end(addr, end);
  2005. err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
  2006. if (err)
  2007. break;
  2008. } while (pud++, addr = next, addr != end);
  2009. return err;
  2010. }
  2011. static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
  2012. unsigned long addr, unsigned long end,
  2013. pte_fn_t fn, void *data)
  2014. {
  2015. p4d_t *p4d;
  2016. unsigned long next;
  2017. int err;
  2018. p4d = p4d_alloc(mm, pgd, addr);
  2019. if (!p4d)
  2020. return -ENOMEM;
  2021. do {
  2022. next = p4d_addr_end(addr, end);
  2023. err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
  2024. if (err)
  2025. break;
  2026. } while (p4d++, addr = next, addr != end);
  2027. return err;
  2028. }
  2029. /*
  2030. * Scan a region of virtual memory, filling in page tables as necessary
  2031. * and calling a provided function on each leaf page table.
  2032. */
  2033. int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
  2034. unsigned long size, pte_fn_t fn, void *data)
  2035. {
  2036. pgd_t *pgd;
  2037. unsigned long next;
  2038. unsigned long end = addr + size;
  2039. int err;
  2040. if (WARN_ON(addr >= end))
  2041. return -EINVAL;
  2042. pgd = pgd_offset(mm, addr);
  2043. do {
  2044. next = pgd_addr_end(addr, end);
  2045. err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
  2046. if (err)
  2047. break;
  2048. } while (pgd++, addr = next, addr != end);
  2049. return err;
  2050. }
  2051. EXPORT_SYMBOL_GPL(apply_to_page_range);
  2052. /*
  2053. * handle_pte_fault chooses page fault handler according to an entry which was
  2054. * read non-atomically. Before making any commitment, on those architectures
  2055. * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
  2056. * parts, do_swap_page must check under lock before unmapping the pte and
  2057. * proceeding (but do_wp_page is only called after already making such a check;
  2058. * and do_anonymous_page can safely check later on).
  2059. */
  2060. static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
  2061. pte_t *page_table, pte_t orig_pte)
  2062. {
  2063. int same = 1;
  2064. #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  2065. if (sizeof(pte_t) > sizeof(unsigned long)) {
  2066. spinlock_t *ptl = pte_lockptr(mm, pmd);
  2067. spin_lock(ptl);
  2068. same = pte_same(*page_table, orig_pte);
  2069. spin_unlock(ptl);
  2070. }
  2071. #endif
  2072. pte_unmap(page_table);
  2073. return same;
  2074. }
  2075. static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
  2076. {
  2077. debug_dma_assert_idle(src);
  2078. /*
  2079. * If the source page was a PFN mapping, we don't have
  2080. * a "struct page" for it. We do a best-effort copy by
  2081. * just copying from the original user address. If that
  2082. * fails, we just zero-fill it. Live with it.
  2083. */
  2084. if (unlikely(!src)) {
  2085. void *kaddr = kmap_atomic(dst);
  2086. void __user *uaddr = (void __user *)(va & PAGE_MASK);
  2087. /*
  2088. * This really shouldn't fail, because the page is there
  2089. * in the page tables. But it might just be unreadable,
  2090. * in which case we just give up and fill the result with
  2091. * zeroes.
  2092. */
  2093. if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
  2094. clear_page(kaddr);
  2095. kunmap_atomic(kaddr);
  2096. flush_dcache_page(dst);
  2097. } else
  2098. copy_user_highpage(dst, src, va, vma);
  2099. }
  2100. static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
  2101. {
  2102. struct file *vm_file = vma->vm_file;
  2103. if (vm_file)
  2104. return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
  2105. /*
  2106. * Special mappings (e.g. VDSO) do not have any file so fake
  2107. * a default GFP_KERNEL for them.
  2108. */
  2109. return GFP_KERNEL;
  2110. }
  2111. /*
  2112. * Notify the address space that the page is about to become writable so that
  2113. * it can prohibit this or wait for the page to get into an appropriate state.
  2114. *
  2115. * We do this without the lock held, so that it can sleep if it needs to.
  2116. */
  2117. static int do_page_mkwrite(struct vm_fault *vmf)
  2118. {
  2119. int ret;
  2120. struct page *page = vmf->page;
  2121. unsigned int old_flags = vmf->flags;
  2122. vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
  2123. ret = vmf->vma->vm_ops->page_mkwrite(vmf);
  2124. /* Restore original flags so that caller is not surprised */
  2125. vmf->flags = old_flags;
  2126. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
  2127. return ret;
  2128. if (unlikely(!(ret & VM_FAULT_LOCKED))) {
  2129. lock_page(page);
  2130. if (!page->mapping) {
  2131. unlock_page(page);
  2132. return 0; /* retry */
  2133. }
  2134. ret |= VM_FAULT_LOCKED;
  2135. } else
  2136. VM_BUG_ON_PAGE(!PageLocked(page), page);
  2137. return ret;
  2138. }
  2139. /*
  2140. * Handle dirtying of a page in shared file mapping on a write fault.
  2141. *
  2142. * The function expects the page to be locked and unlocks it.
  2143. */
  2144. static void fault_dirty_shared_page(struct vm_area_struct *vma,
  2145. struct page *page)
  2146. {
  2147. struct address_space *mapping;
  2148. bool dirtied;
  2149. bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
  2150. dirtied = set_page_dirty(page);
  2151. VM_BUG_ON_PAGE(PageAnon(page), page);
  2152. /*
  2153. * Take a local copy of the address_space - page.mapping may be zeroed
  2154. * by truncate after unlock_page(). The address_space itself remains
  2155. * pinned by vma->vm_file's reference. We rely on unlock_page()'s
  2156. * release semantics to prevent the compiler from undoing this copying.
  2157. */
  2158. mapping = page_rmapping(page);
  2159. unlock_page(page);
  2160. if ((dirtied || page_mkwrite) && mapping) {
  2161. /*
  2162. * Some device drivers do not set page.mapping
  2163. * but still dirty their pages
  2164. */
  2165. balance_dirty_pages_ratelimited(mapping);
  2166. }
  2167. if (!page_mkwrite)
  2168. file_update_time(vma->vm_file);
  2169. }
  2170. /*
  2171. * Handle write page faults for pages that can be reused in the current vma
  2172. *
  2173. * This can happen either due to the mapping being with the VM_SHARED flag,
  2174. * or due to us being the last reference standing to the page. In either
  2175. * case, all we need to do here is to mark the page as writable and update
  2176. * any related book-keeping.
  2177. */
  2178. static inline void wp_page_reuse(struct vm_fault *vmf)
  2179. __releases(vmf->ptl)
  2180. {
  2181. struct vm_area_struct *vma = vmf->vma;
  2182. struct page *page = vmf->page;
  2183. pte_t entry;
  2184. /*
  2185. * Clear the pages cpupid information as the existing
  2186. * information potentially belongs to a now completely
  2187. * unrelated process.
  2188. */
  2189. if (page)
  2190. page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
  2191. flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
  2192. entry = pte_mkyoung(vmf->orig_pte);
  2193. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  2194. if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
  2195. update_mmu_cache(vma, vmf->address, vmf->pte);
  2196. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2197. }
  2198. /*
  2199. * Handle the case of a page which we actually need to copy to a new page.
  2200. *
  2201. * Called with mmap_sem locked and the old page referenced, but
  2202. * without the ptl held.
  2203. *
  2204. * High level logic flow:
  2205. *
  2206. * - Allocate a page, copy the content of the old page to the new one.
  2207. * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
  2208. * - Take the PTL. If the pte changed, bail out and release the allocated page
  2209. * - If the pte is still the way we remember it, update the page table and all
  2210. * relevant references. This includes dropping the reference the page-table
  2211. * held to the old page, as well as updating the rmap.
  2212. * - In any case, unlock the PTL and drop the reference we took to the old page.
  2213. */
  2214. static int wp_page_copy(struct vm_fault *vmf)
  2215. {
  2216. struct vm_area_struct *vma = vmf->vma;
  2217. struct mm_struct *mm = vma->vm_mm;
  2218. struct page *old_page = vmf->page;
  2219. struct page *new_page = NULL;
  2220. pte_t entry;
  2221. int page_copied = 0;
  2222. const unsigned long mmun_start = vmf->address & PAGE_MASK;
  2223. const unsigned long mmun_end = mmun_start + PAGE_SIZE;
  2224. struct mem_cgroup *memcg;
  2225. if (unlikely(anon_vma_prepare(vma)))
  2226. goto oom;
  2227. if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
  2228. new_page = alloc_zeroed_user_highpage_movable(vma,
  2229. vmf->address);
  2230. if (!new_page)
  2231. goto oom;
  2232. } else {
  2233. new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
  2234. vmf->address);
  2235. if (!new_page)
  2236. goto oom;
  2237. cow_user_page(new_page, old_page, vmf->address, vma);
  2238. }
  2239. if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
  2240. goto oom_free_new;
  2241. __SetPageUptodate(new_page);
  2242. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2243. /*
  2244. * Re-check the pte - we dropped the lock
  2245. */
  2246. vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
  2247. if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
  2248. if (old_page) {
  2249. if (!PageAnon(old_page)) {
  2250. dec_mm_counter_fast(mm,
  2251. mm_counter_file(old_page));
  2252. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2253. }
  2254. } else {
  2255. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2256. }
  2257. flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
  2258. entry = mk_pte(new_page, vma->vm_page_prot);
  2259. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  2260. /*
  2261. * Clear the pte entry and flush it first, before updating the
  2262. * pte with the new entry. This will avoid a race condition
  2263. * seen in the presence of one thread doing SMC and another
  2264. * thread doing COW.
  2265. */
  2266. ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
  2267. page_add_new_anon_rmap(new_page, vma, vmf->address, false);
  2268. mem_cgroup_commit_charge(new_page, memcg, false, false);
  2269. lru_cache_add_active_or_unevictable(new_page, vma);
  2270. /*
  2271. * We call the notify macro here because, when using secondary
  2272. * mmu page tables (such as kvm shadow page tables), we want the
  2273. * new page to be mapped directly into the secondary page table.
  2274. */
  2275. set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
  2276. update_mmu_cache(vma, vmf->address, vmf->pte);
  2277. if (old_page) {
  2278. /*
  2279. * Only after switching the pte to the new page may
  2280. * we remove the mapcount here. Otherwise another
  2281. * process may come and find the rmap count decremented
  2282. * before the pte is switched to the new page, and
  2283. * "reuse" the old page writing into it while our pte
  2284. * here still points into it and can be read by other
  2285. * threads.
  2286. *
  2287. * The critical issue is to order this
  2288. * page_remove_rmap with the ptp_clear_flush above.
  2289. * Those stores are ordered by (if nothing else,)
  2290. * the barrier present in the atomic_add_negative
  2291. * in page_remove_rmap.
  2292. *
  2293. * Then the TLB flush in ptep_clear_flush ensures that
  2294. * no process can access the old page before the
  2295. * decremented mapcount is visible. And the old page
  2296. * cannot be reused until after the decremented
  2297. * mapcount is visible. So transitively, TLBs to
  2298. * old page will be flushed before it can be reused.
  2299. */
  2300. page_remove_rmap(old_page, false);
  2301. }
  2302. /* Free the old page.. */
  2303. new_page = old_page;
  2304. page_copied = 1;
  2305. } else {
  2306. mem_cgroup_cancel_charge(new_page, memcg, false);
  2307. }
  2308. if (new_page)
  2309. put_page(new_page);
  2310. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2311. /*
  2312. * No need to double call mmu_notifier->invalidate_range() callback as
  2313. * the above ptep_clear_flush_notify() did already call it.
  2314. */
  2315. mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
  2316. if (old_page) {
  2317. /*
  2318. * Don't let another task, with possibly unlocked vma,
  2319. * keep the mlocked page.
  2320. */
  2321. if (page_copied && (vma->vm_flags & VM_LOCKED)) {
  2322. lock_page(old_page); /* LRU manipulation */
  2323. if (PageMlocked(old_page))
  2324. munlock_vma_page(old_page);
  2325. unlock_page(old_page);
  2326. }
  2327. put_page(old_page);
  2328. }
  2329. return page_copied ? VM_FAULT_WRITE : 0;
  2330. oom_free_new:
  2331. put_page(new_page);
  2332. oom:
  2333. if (old_page)
  2334. put_page(old_page);
  2335. return VM_FAULT_OOM;
  2336. }
  2337. /**
  2338. * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
  2339. * writeable once the page is prepared
  2340. *
  2341. * @vmf: structure describing the fault
  2342. *
  2343. * This function handles all that is needed to finish a write page fault in a
  2344. * shared mapping due to PTE being read-only once the mapped page is prepared.
  2345. * It handles locking of PTE and modifying it. The function returns
  2346. * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
  2347. * lock.
  2348. *
  2349. * The function expects the page to be locked or other protection against
  2350. * concurrent faults / writeback (such as DAX radix tree locks).
  2351. */
  2352. int finish_mkwrite_fault(struct vm_fault *vmf)
  2353. {
  2354. WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
  2355. vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
  2356. &vmf->ptl);
  2357. /*
  2358. * We might have raced with another page fault while we released the
  2359. * pte_offset_map_lock.
  2360. */
  2361. if (!pte_same(*vmf->pte, vmf->orig_pte)) {
  2362. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2363. return VM_FAULT_NOPAGE;
  2364. }
  2365. wp_page_reuse(vmf);
  2366. return 0;
  2367. }
  2368. /*
  2369. * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
  2370. * mapping
  2371. */
  2372. static int wp_pfn_shared(struct vm_fault *vmf)
  2373. {
  2374. struct vm_area_struct *vma = vmf->vma;
  2375. if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
  2376. int ret;
  2377. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2378. vmf->flags |= FAULT_FLAG_MKWRITE;
  2379. ret = vma->vm_ops->pfn_mkwrite(vmf);
  2380. if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
  2381. return ret;
  2382. return finish_mkwrite_fault(vmf);
  2383. }
  2384. wp_page_reuse(vmf);
  2385. return VM_FAULT_WRITE;
  2386. }
  2387. static int wp_page_shared(struct vm_fault *vmf)
  2388. __releases(vmf->ptl)
  2389. {
  2390. struct vm_area_struct *vma = vmf->vma;
  2391. get_page(vmf->page);
  2392. if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
  2393. int tmp;
  2394. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2395. tmp = do_page_mkwrite(vmf);
  2396. if (unlikely(!tmp || (tmp &
  2397. (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
  2398. put_page(vmf->page);
  2399. return tmp;
  2400. }
  2401. tmp = finish_mkwrite_fault(vmf);
  2402. if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  2403. unlock_page(vmf->page);
  2404. put_page(vmf->page);
  2405. return tmp;
  2406. }
  2407. } else {
  2408. wp_page_reuse(vmf);
  2409. lock_page(vmf->page);
  2410. }
  2411. fault_dirty_shared_page(vma, vmf->page);
  2412. put_page(vmf->page);
  2413. return VM_FAULT_WRITE;
  2414. }
  2415. /*
  2416. * This routine handles present pages, when users try to write
  2417. * to a shared page. It is done by copying the page to a new address
  2418. * and decrementing the shared-page counter for the old page.
  2419. *
  2420. * Note that this routine assumes that the protection checks have been
  2421. * done by the caller (the low-level page fault routine in most cases).
  2422. * Thus we can safely just mark it writable once we've done any necessary
  2423. * COW.
  2424. *
  2425. * We also mark the page dirty at this point even though the page will
  2426. * change only once the write actually happens. This avoids a few races,
  2427. * and potentially makes it more efficient.
  2428. *
  2429. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2430. * but allow concurrent faults), with pte both mapped and locked.
  2431. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2432. */
  2433. static int do_wp_page(struct vm_fault *vmf)
  2434. __releases(vmf->ptl)
  2435. {
  2436. struct vm_area_struct *vma = vmf->vma;
  2437. vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
  2438. if (!vmf->page) {
  2439. /*
  2440. * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
  2441. * VM_PFNMAP VMA.
  2442. *
  2443. * We should not cow pages in a shared writeable mapping.
  2444. * Just mark the pages writable and/or call ops->pfn_mkwrite.
  2445. */
  2446. if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  2447. (VM_WRITE|VM_SHARED))
  2448. return wp_pfn_shared(vmf);
  2449. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2450. return wp_page_copy(vmf);
  2451. }
  2452. /*
  2453. * Take out anonymous pages first, anonymous shared vmas are
  2454. * not dirty accountable.
  2455. */
  2456. if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
  2457. int total_map_swapcount;
  2458. if (!trylock_page(vmf->page)) {
  2459. get_page(vmf->page);
  2460. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2461. lock_page(vmf->page);
  2462. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
  2463. vmf->address, &vmf->ptl);
  2464. if (!pte_same(*vmf->pte, vmf->orig_pte)) {
  2465. unlock_page(vmf->page);
  2466. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2467. put_page(vmf->page);
  2468. return 0;
  2469. }
  2470. put_page(vmf->page);
  2471. }
  2472. if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
  2473. if (total_map_swapcount == 1) {
  2474. /*
  2475. * The page is all ours. Move it to
  2476. * our anon_vma so the rmap code will
  2477. * not search our parent or siblings.
  2478. * Protected against the rmap code by
  2479. * the page lock.
  2480. */
  2481. page_move_anon_rmap(vmf->page, vma);
  2482. }
  2483. unlock_page(vmf->page);
  2484. wp_page_reuse(vmf);
  2485. return VM_FAULT_WRITE;
  2486. }
  2487. unlock_page(vmf->page);
  2488. } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  2489. (VM_WRITE|VM_SHARED))) {
  2490. return wp_page_shared(vmf);
  2491. }
  2492. /*
  2493. * Ok, we need to copy. Oh, well..
  2494. */
  2495. get_page(vmf->page);
  2496. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2497. return wp_page_copy(vmf);
  2498. }
  2499. static void unmap_mapping_range_vma(struct vm_area_struct *vma,
  2500. unsigned long start_addr, unsigned long end_addr,
  2501. struct zap_details *details)
  2502. {
  2503. zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
  2504. }
  2505. static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
  2506. struct zap_details *details)
  2507. {
  2508. struct vm_area_struct *vma;
  2509. pgoff_t vba, vea, zba, zea;
  2510. vma_interval_tree_foreach(vma, root,
  2511. details->first_index, details->last_index) {
  2512. vba = vma->vm_pgoff;
  2513. vea = vba + vma_pages(vma) - 1;
  2514. zba = details->first_index;
  2515. if (zba < vba)
  2516. zba = vba;
  2517. zea = details->last_index;
  2518. if (zea > vea)
  2519. zea = vea;
  2520. unmap_mapping_range_vma(vma,
  2521. ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
  2522. ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
  2523. details);
  2524. }
  2525. }
  2526. /**
  2527. * unmap_mapping_pages() - Unmap pages from processes.
  2528. * @mapping: The address space containing pages to be unmapped.
  2529. * @start: Index of first page to be unmapped.
  2530. * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
  2531. * @even_cows: Whether to unmap even private COWed pages.
  2532. *
  2533. * Unmap the pages in this address space from any userspace process which
  2534. * has them mmaped. Generally, you want to remove COWed pages as well when
  2535. * a file is being truncated, but not when invalidating pages from the page
  2536. * cache.
  2537. */
  2538. void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
  2539. pgoff_t nr, bool even_cows)
  2540. {
  2541. struct zap_details details = { };
  2542. details.check_mapping = even_cows ? NULL : mapping;
  2543. details.first_index = start;
  2544. details.last_index = start + nr - 1;
  2545. if (details.last_index < details.first_index)
  2546. details.last_index = ULONG_MAX;
  2547. i_mmap_lock_write(mapping);
  2548. if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
  2549. unmap_mapping_range_tree(&mapping->i_mmap, &details);
  2550. i_mmap_unlock_write(mapping);
  2551. }
  2552. /**
  2553. * unmap_mapping_range - unmap the portion of all mmaps in the specified
  2554. * address_space corresponding to the specified byte range in the underlying
  2555. * file.
  2556. *
  2557. * @mapping: the address space containing mmaps to be unmapped.
  2558. * @holebegin: byte in first page to unmap, relative to the start of
  2559. * the underlying file. This will be rounded down to a PAGE_SIZE
  2560. * boundary. Note that this is different from truncate_pagecache(), which
  2561. * must keep the partial page. In contrast, we must get rid of
  2562. * partial pages.
  2563. * @holelen: size of prospective hole in bytes. This will be rounded
  2564. * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
  2565. * end of the file.
  2566. * @even_cows: 1 when truncating a file, unmap even private COWed pages;
  2567. * but 0 when invalidating pagecache, don't throw away private data.
  2568. */
  2569. void unmap_mapping_range(struct address_space *mapping,
  2570. loff_t const holebegin, loff_t const holelen, int even_cows)
  2571. {
  2572. pgoff_t hba = holebegin >> PAGE_SHIFT;
  2573. pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2574. /* Check for overflow. */
  2575. if (sizeof(holelen) > sizeof(hlen)) {
  2576. long long holeend =
  2577. (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2578. if (holeend & ~(long long)ULONG_MAX)
  2579. hlen = ULONG_MAX - hba + 1;
  2580. }
  2581. unmap_mapping_pages(mapping, hba, hlen, even_cows);
  2582. }
  2583. EXPORT_SYMBOL(unmap_mapping_range);
  2584. /*
  2585. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2586. * but allow concurrent faults), and pte mapped but not yet locked.
  2587. * We return with pte unmapped and unlocked.
  2588. *
  2589. * We return with the mmap_sem locked or unlocked in the same cases
  2590. * as does filemap_fault().
  2591. */
  2592. int do_swap_page(struct vm_fault *vmf)
  2593. {
  2594. struct vm_area_struct *vma = vmf->vma;
  2595. struct page *page = NULL, *swapcache;
  2596. struct mem_cgroup *memcg;
  2597. swp_entry_t entry;
  2598. pte_t pte;
  2599. int locked;
  2600. int exclusive = 0;
  2601. int ret = 0;
  2602. if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
  2603. goto out;
  2604. entry = pte_to_swp_entry(vmf->orig_pte);
  2605. if (unlikely(non_swap_entry(entry))) {
  2606. if (is_migration_entry(entry)) {
  2607. migration_entry_wait(vma->vm_mm, vmf->pmd,
  2608. vmf->address);
  2609. } else if (is_device_private_entry(entry)) {
  2610. /*
  2611. * For un-addressable device memory we call the pgmap
  2612. * fault handler callback. The callback must migrate
  2613. * the page back to some CPU accessible page.
  2614. */
  2615. ret = device_private_entry_fault(vma, vmf->address, entry,
  2616. vmf->flags, vmf->pmd);
  2617. } else if (is_hwpoison_entry(entry)) {
  2618. ret = VM_FAULT_HWPOISON;
  2619. } else {
  2620. print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
  2621. ret = VM_FAULT_SIGBUS;
  2622. }
  2623. goto out;
  2624. }
  2625. delayacct_set_flag(DELAYACCT_PF_SWAPIN);
  2626. page = lookup_swap_cache(entry, vma, vmf->address);
  2627. swapcache = page;
  2628. if (!page) {
  2629. struct swap_info_struct *si = swp_swap_info(entry);
  2630. if (si->flags & SWP_SYNCHRONOUS_IO &&
  2631. __swap_count(si, entry) == 1) {
  2632. /* skip swapcache */
  2633. page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
  2634. vmf->address);
  2635. if (page) {
  2636. __SetPageLocked(page);
  2637. __SetPageSwapBacked(page);
  2638. set_page_private(page, entry.val);
  2639. lru_cache_add_anon(page);
  2640. swap_readpage(page, true);
  2641. }
  2642. } else {
  2643. page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
  2644. vmf);
  2645. swapcache = page;
  2646. }
  2647. if (!page) {
  2648. /*
  2649. * Back out if somebody else faulted in this pte
  2650. * while we released the pte lock.
  2651. */
  2652. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
  2653. vmf->address, &vmf->ptl);
  2654. if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
  2655. ret = VM_FAULT_OOM;
  2656. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2657. goto unlock;
  2658. }
  2659. /* Had to read the page from swap area: Major fault */
  2660. ret = VM_FAULT_MAJOR;
  2661. count_vm_event(PGMAJFAULT);
  2662. count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
  2663. } else if (PageHWPoison(page)) {
  2664. /*
  2665. * hwpoisoned dirty swapcache pages are kept for killing
  2666. * owner processes (which may be unknown at hwpoison time)
  2667. */
  2668. ret = VM_FAULT_HWPOISON;
  2669. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2670. goto out_release;
  2671. }
  2672. locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
  2673. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2674. if (!locked) {
  2675. ret |= VM_FAULT_RETRY;
  2676. goto out_release;
  2677. }
  2678. /*
  2679. * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
  2680. * release the swapcache from under us. The page pin, and pte_same
  2681. * test below, are not enough to exclude that. Even if it is still
  2682. * swapcache, we need to check that the page's swap has not changed.
  2683. */
  2684. if (unlikely((!PageSwapCache(page) ||
  2685. page_private(page) != entry.val)) && swapcache)
  2686. goto out_page;
  2687. page = ksm_might_need_to_copy(page, vma, vmf->address);
  2688. if (unlikely(!page)) {
  2689. ret = VM_FAULT_OOM;
  2690. page = swapcache;
  2691. goto out_page;
  2692. }
  2693. if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL,
  2694. &memcg, false)) {
  2695. ret = VM_FAULT_OOM;
  2696. goto out_page;
  2697. }
  2698. /*
  2699. * Back out if somebody else already faulted in this pte.
  2700. */
  2701. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
  2702. &vmf->ptl);
  2703. if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
  2704. goto out_nomap;
  2705. if (unlikely(!PageUptodate(page))) {
  2706. ret = VM_FAULT_SIGBUS;
  2707. goto out_nomap;
  2708. }
  2709. /*
  2710. * The page isn't present yet, go ahead with the fault.
  2711. *
  2712. * Be careful about the sequence of operations here.
  2713. * To get its accounting right, reuse_swap_page() must be called
  2714. * while the page is counted on swap but not yet in mapcount i.e.
  2715. * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
  2716. * must be called after the swap_free(), or it will never succeed.
  2717. */
  2718. inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  2719. dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
  2720. pte = mk_pte(page, vma->vm_page_prot);
  2721. if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
  2722. pte = maybe_mkwrite(pte_mkdirty(pte), vma);
  2723. vmf->flags &= ~FAULT_FLAG_WRITE;
  2724. ret |= VM_FAULT_WRITE;
  2725. exclusive = RMAP_EXCLUSIVE;
  2726. }
  2727. flush_icache_page(vma, page);
  2728. if (pte_swp_soft_dirty(vmf->orig_pte))
  2729. pte = pte_mksoft_dirty(pte);
  2730. set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
  2731. arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
  2732. vmf->orig_pte = pte;
  2733. /* ksm created a completely new copy */
  2734. if (unlikely(page != swapcache && swapcache)) {
  2735. page_add_new_anon_rmap(page, vma, vmf->address, false);
  2736. mem_cgroup_commit_charge(page, memcg, false, false);
  2737. lru_cache_add_active_or_unevictable(page, vma);
  2738. } else {
  2739. do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
  2740. mem_cgroup_commit_charge(page, memcg, true, false);
  2741. activate_page(page);
  2742. }
  2743. swap_free(entry);
  2744. if (mem_cgroup_swap_full(page) ||
  2745. (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  2746. try_to_free_swap(page);
  2747. unlock_page(page);
  2748. if (page != swapcache && swapcache) {
  2749. /*
  2750. * Hold the lock to avoid the swap entry to be reused
  2751. * until we take the PT lock for the pte_same() check
  2752. * (to avoid false positives from pte_same). For
  2753. * further safety release the lock after the swap_free
  2754. * so that the swap count won't change under a
  2755. * parallel locked swapcache.
  2756. */
  2757. unlock_page(swapcache);
  2758. put_page(swapcache);
  2759. }
  2760. if (vmf->flags & FAULT_FLAG_WRITE) {
  2761. ret |= do_wp_page(vmf);
  2762. if (ret & VM_FAULT_ERROR)
  2763. ret &= VM_FAULT_ERROR;
  2764. goto out;
  2765. }
  2766. /* No need to invalidate - it was non-present before */
  2767. update_mmu_cache(vma, vmf->address, vmf->pte);
  2768. unlock:
  2769. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2770. out:
  2771. return ret;
  2772. out_nomap:
  2773. mem_cgroup_cancel_charge(page, memcg, false);
  2774. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2775. out_page:
  2776. unlock_page(page);
  2777. out_release:
  2778. put_page(page);
  2779. if (page != swapcache && swapcache) {
  2780. unlock_page(swapcache);
  2781. put_page(swapcache);
  2782. }
  2783. return ret;
  2784. }
  2785. /*
  2786. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2787. * but allow concurrent faults), and pte mapped but not yet locked.
  2788. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2789. */
  2790. static int do_anonymous_page(struct vm_fault *vmf)
  2791. {
  2792. struct vm_area_struct *vma = vmf->vma;
  2793. struct mem_cgroup *memcg;
  2794. struct page *page;
  2795. int ret = 0;
  2796. pte_t entry;
  2797. /* File mapping without ->vm_ops ? */
  2798. if (vma->vm_flags & VM_SHARED)
  2799. return VM_FAULT_SIGBUS;
  2800. /*
  2801. * Use pte_alloc() instead of pte_alloc_map(). We can't run
  2802. * pte_offset_map() on pmds where a huge pmd might be created
  2803. * from a different thread.
  2804. *
  2805. * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
  2806. * parallel threads are excluded by other means.
  2807. *
  2808. * Here we only have down_read(mmap_sem).
  2809. */
  2810. if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
  2811. return VM_FAULT_OOM;
  2812. /* See the comment in pte_alloc_one_map() */
  2813. if (unlikely(pmd_trans_unstable(vmf->pmd)))
  2814. return 0;
  2815. /* Use the zero-page for reads */
  2816. if (!(vmf->flags & FAULT_FLAG_WRITE) &&
  2817. !mm_forbids_zeropage(vma->vm_mm)) {
  2818. entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
  2819. vma->vm_page_prot));
  2820. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
  2821. vmf->address, &vmf->ptl);
  2822. if (!pte_none(*vmf->pte))
  2823. goto unlock;
  2824. ret = check_stable_address_space(vma->vm_mm);
  2825. if (ret)
  2826. goto unlock;
  2827. /* Deliver the page fault to userland, check inside PT lock */
  2828. if (userfaultfd_missing(vma)) {
  2829. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2830. return handle_userfault(vmf, VM_UFFD_MISSING);
  2831. }
  2832. goto setpte;
  2833. }
  2834. /* Allocate our own private page. */
  2835. if (unlikely(anon_vma_prepare(vma)))
  2836. goto oom;
  2837. page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
  2838. if (!page)
  2839. goto oom;
  2840. if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg,
  2841. false))
  2842. goto oom_free_page;
  2843. /*
  2844. * The memory barrier inside __SetPageUptodate makes sure that
  2845. * preceeding stores to the page contents become visible before
  2846. * the set_pte_at() write.
  2847. */
  2848. __SetPageUptodate(page);
  2849. entry = mk_pte(page, vma->vm_page_prot);
  2850. if (vma->vm_flags & VM_WRITE)
  2851. entry = pte_mkwrite(pte_mkdirty(entry));
  2852. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
  2853. &vmf->ptl);
  2854. if (!pte_none(*vmf->pte))
  2855. goto release;
  2856. ret = check_stable_address_space(vma->vm_mm);
  2857. if (ret)
  2858. goto release;
  2859. /* Deliver the page fault to userland, check inside PT lock */
  2860. if (userfaultfd_missing(vma)) {
  2861. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2862. mem_cgroup_cancel_charge(page, memcg, false);
  2863. put_page(page);
  2864. return handle_userfault(vmf, VM_UFFD_MISSING);
  2865. }
  2866. inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  2867. page_add_new_anon_rmap(page, vma, vmf->address, false);
  2868. mem_cgroup_commit_charge(page, memcg, false, false);
  2869. lru_cache_add_active_or_unevictable(page, vma);
  2870. setpte:
  2871. set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
  2872. /* No need to invalidate - it was non-present before */
  2873. update_mmu_cache(vma, vmf->address, vmf->pte);
  2874. unlock:
  2875. pte_unmap_unlock(vmf->pte, vmf->ptl);
  2876. return ret;
  2877. release:
  2878. mem_cgroup_cancel_charge(page, memcg, false);
  2879. put_page(page);
  2880. goto unlock;
  2881. oom_free_page:
  2882. put_page(page);
  2883. oom:
  2884. return VM_FAULT_OOM;
  2885. }
  2886. /*
  2887. * The mmap_sem must have been held on entry, and may have been
  2888. * released depending on flags and vma->vm_ops->fault() return value.
  2889. * See filemap_fault() and __lock_page_retry().
  2890. */
  2891. static int __do_fault(struct vm_fault *vmf)
  2892. {
  2893. struct vm_area_struct *vma = vmf->vma;
  2894. int ret;
  2895. ret = vma->vm_ops->fault(vmf);
  2896. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
  2897. VM_FAULT_DONE_COW)))
  2898. return ret;
  2899. if (unlikely(PageHWPoison(vmf->page))) {
  2900. if (ret & VM_FAULT_LOCKED)
  2901. unlock_page(vmf->page);
  2902. put_page(vmf->page);
  2903. vmf->page = NULL;
  2904. return VM_FAULT_HWPOISON;
  2905. }
  2906. if (unlikely(!(ret & VM_FAULT_LOCKED)))
  2907. lock_page(vmf->page);
  2908. else
  2909. VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
  2910. return ret;
  2911. }
  2912. /*
  2913. * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
  2914. * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
  2915. * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
  2916. * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
  2917. */
  2918. static int pmd_devmap_trans_unstable(pmd_t *pmd)
  2919. {
  2920. return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
  2921. }
  2922. static int pte_alloc_one_map(struct vm_fault *vmf)
  2923. {
  2924. struct vm_area_struct *vma = vmf->vma;
  2925. if (!pmd_none(*vmf->pmd))
  2926. goto map_pte;
  2927. if (vmf->prealloc_pte) {
  2928. vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
  2929. if (unlikely(!pmd_none(*vmf->pmd))) {
  2930. spin_unlock(vmf->ptl);
  2931. goto map_pte;
  2932. }
  2933. mm_inc_nr_ptes(vma->vm_mm);
  2934. pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
  2935. spin_unlock(vmf->ptl);
  2936. vmf->prealloc_pte = NULL;
  2937. } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
  2938. return VM_FAULT_OOM;
  2939. }
  2940. map_pte:
  2941. /*
  2942. * If a huge pmd materialized under us just retry later. Use
  2943. * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
  2944. * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
  2945. * under us and then back to pmd_none, as a result of MADV_DONTNEED
  2946. * running immediately after a huge pmd fault in a different thread of
  2947. * this mm, in turn leading to a misleading pmd_trans_huge() retval.
  2948. * All we have to ensure is that it is a regular pmd that we can walk
  2949. * with pte_offset_map() and we can do that through an atomic read in
  2950. * C, which is what pmd_trans_unstable() provides.
  2951. */
  2952. if (pmd_devmap_trans_unstable(vmf->pmd))
  2953. return VM_FAULT_NOPAGE;
  2954. /*
  2955. * At this point we know that our vmf->pmd points to a page of ptes
  2956. * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
  2957. * for the duration of the fault. If a racing MADV_DONTNEED runs and
  2958. * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
  2959. * be valid and we will re-check to make sure the vmf->pte isn't
  2960. * pte_none() under vmf->ptl protection when we return to
  2961. * alloc_set_pte().
  2962. */
  2963. vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
  2964. &vmf->ptl);
  2965. return 0;
  2966. }
  2967. #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
  2968. #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
  2969. static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
  2970. unsigned long haddr)
  2971. {
  2972. if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
  2973. (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
  2974. return false;
  2975. if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
  2976. return false;
  2977. return true;
  2978. }
  2979. static void deposit_prealloc_pte(struct vm_fault *vmf)
  2980. {
  2981. struct vm_area_struct *vma = vmf->vma;
  2982. pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
  2983. /*
  2984. * We are going to consume the prealloc table,
  2985. * count that as nr_ptes.
  2986. */
  2987. mm_inc_nr_ptes(vma->vm_mm);
  2988. vmf->prealloc_pte = NULL;
  2989. }
  2990. static int do_set_pmd(struct vm_fault *vmf, struct page *page)
  2991. {
  2992. struct vm_area_struct *vma = vmf->vma;
  2993. bool write = vmf->flags & FAULT_FLAG_WRITE;
  2994. unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
  2995. pmd_t entry;
  2996. int i, ret;
  2997. if (!transhuge_vma_suitable(vma, haddr))
  2998. return VM_FAULT_FALLBACK;
  2999. ret = VM_FAULT_FALLBACK;
  3000. page = compound_head(page);
  3001. /*
  3002. * Archs like ppc64 need additonal space to store information
  3003. * related to pte entry. Use the preallocated table for that.
  3004. */
  3005. if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
  3006. vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
  3007. if (!vmf->prealloc_pte)
  3008. return VM_FAULT_OOM;
  3009. smp_wmb(); /* See comment in __pte_alloc() */
  3010. }
  3011. vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
  3012. if (unlikely(!pmd_none(*vmf->pmd)))
  3013. goto out;
  3014. for (i = 0; i < HPAGE_PMD_NR; i++)
  3015. flush_icache_page(vma, page + i);
  3016. entry = mk_huge_pmd(page, vma->vm_page_prot);
  3017. if (write)
  3018. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  3019. add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
  3020. page_add_file_rmap(page, true);
  3021. /*
  3022. * deposit and withdraw with pmd lock held
  3023. */
  3024. if (arch_needs_pgtable_deposit())
  3025. deposit_prealloc_pte(vmf);
  3026. set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
  3027. update_mmu_cache_pmd(vma, haddr, vmf->pmd);
  3028. /* fault is handled */
  3029. ret = 0;
  3030. count_vm_event(THP_FILE_MAPPED);
  3031. out:
  3032. spin_unlock(vmf->ptl);
  3033. return ret;
  3034. }
  3035. #else
  3036. static int do_set_pmd(struct vm_fault *vmf, struct page *page)
  3037. {
  3038. BUILD_BUG();
  3039. return 0;
  3040. }
  3041. #endif
  3042. /**
  3043. * alloc_set_pte - setup new PTE entry for given page and add reverse page
  3044. * mapping. If needed, the fucntion allocates page table or use pre-allocated.
  3045. *
  3046. * @vmf: fault environment
  3047. * @memcg: memcg to charge page (only for private mappings)
  3048. * @page: page to map
  3049. *
  3050. * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
  3051. * return.
  3052. *
  3053. * Target users are page handler itself and implementations of
  3054. * vm_ops->map_pages.
  3055. */
  3056. int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
  3057. struct page *page)
  3058. {
  3059. struct vm_area_struct *vma = vmf->vma;
  3060. bool write = vmf->flags & FAULT_FLAG_WRITE;
  3061. pte_t entry;
  3062. int ret;
  3063. if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
  3064. IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
  3065. /* THP on COW? */
  3066. VM_BUG_ON_PAGE(memcg, page);
  3067. ret = do_set_pmd(vmf, page);
  3068. if (ret != VM_FAULT_FALLBACK)
  3069. return ret;
  3070. }
  3071. if (!vmf->pte) {
  3072. ret = pte_alloc_one_map(vmf);
  3073. if (ret)
  3074. return ret;
  3075. }
  3076. /* Re-check under ptl */
  3077. if (unlikely(!pte_none(*vmf->pte)))
  3078. return VM_FAULT_NOPAGE;
  3079. flush_icache_page(vma, page);
  3080. entry = mk_pte(page, vma->vm_page_prot);
  3081. if (write)
  3082. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  3083. /* copy-on-write page */
  3084. if (write && !(vma->vm_flags & VM_SHARED)) {
  3085. inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  3086. page_add_new_anon_rmap(page, vma, vmf->address, false);
  3087. mem_cgroup_commit_charge(page, memcg, false, false);
  3088. lru_cache_add_active_or_unevictable(page, vma);
  3089. } else {
  3090. inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
  3091. page_add_file_rmap(page, false);
  3092. }
  3093. set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
  3094. /* no need to invalidate: a not-present page won't be cached */
  3095. update_mmu_cache(vma, vmf->address, vmf->pte);
  3096. return 0;
  3097. }
  3098. /**
  3099. * finish_fault - finish page fault once we have prepared the page to fault
  3100. *
  3101. * @vmf: structure describing the fault
  3102. *
  3103. * This function handles all that is needed to finish a page fault once the
  3104. * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
  3105. * given page, adds reverse page mapping, handles memcg charges and LRU
  3106. * addition. The function returns 0 on success, VM_FAULT_ code in case of
  3107. * error.
  3108. *
  3109. * The function expects the page to be locked and on success it consumes a
  3110. * reference of a page being mapped (for the PTE which maps it).
  3111. */
  3112. int finish_fault(struct vm_fault *vmf)
  3113. {
  3114. struct page *page;
  3115. int ret = 0;
  3116. /* Did we COW the page? */
  3117. if ((vmf->flags & FAULT_FLAG_WRITE) &&
  3118. !(vmf->vma->vm_flags & VM_SHARED))
  3119. page = vmf->cow_page;
  3120. else
  3121. page = vmf->page;
  3122. /*
  3123. * check even for read faults because we might have lost our CoWed
  3124. * page
  3125. */
  3126. if (!(vmf->vma->vm_flags & VM_SHARED))
  3127. ret = check_stable_address_space(vmf->vma->vm_mm);
  3128. if (!ret)
  3129. ret = alloc_set_pte(vmf, vmf->memcg, page);
  3130. if (vmf->pte)
  3131. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3132. return ret;
  3133. }
  3134. static unsigned long fault_around_bytes __read_mostly =
  3135. rounddown_pow_of_two(65536);
  3136. #ifdef CONFIG_DEBUG_FS
  3137. static int fault_around_bytes_get(void *data, u64 *val)
  3138. {
  3139. *val = fault_around_bytes;
  3140. return 0;
  3141. }
  3142. /*
  3143. * fault_around_bytes must be rounded down to the nearest page order as it's
  3144. * what do_fault_around() expects to see.
  3145. */
  3146. static int fault_around_bytes_set(void *data, u64 val)
  3147. {
  3148. if (val / PAGE_SIZE > PTRS_PER_PTE)
  3149. return -EINVAL;
  3150. if (val > PAGE_SIZE)
  3151. fault_around_bytes = rounddown_pow_of_two(val);
  3152. else
  3153. fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
  3154. return 0;
  3155. }
  3156. DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
  3157. fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
  3158. static int __init fault_around_debugfs(void)
  3159. {
  3160. void *ret;
  3161. ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
  3162. &fault_around_bytes_fops);
  3163. if (!ret)
  3164. pr_warn("Failed to create fault_around_bytes in debugfs");
  3165. return 0;
  3166. }
  3167. late_initcall(fault_around_debugfs);
  3168. #endif
  3169. /*
  3170. * do_fault_around() tries to map few pages around the fault address. The hope
  3171. * is that the pages will be needed soon and this will lower the number of
  3172. * faults to handle.
  3173. *
  3174. * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
  3175. * not ready to be mapped: not up-to-date, locked, etc.
  3176. *
  3177. * This function is called with the page table lock taken. In the split ptlock
  3178. * case the page table lock only protects only those entries which belong to
  3179. * the page table corresponding to the fault address.
  3180. *
  3181. * This function doesn't cross the VMA boundaries, in order to call map_pages()
  3182. * only once.
  3183. *
  3184. * fault_around_bytes defines how many bytes we'll try to map.
  3185. * do_fault_around() expects it to be set to a power of two less than or equal
  3186. * to PTRS_PER_PTE.
  3187. *
  3188. * The virtual address of the area that we map is naturally aligned to
  3189. * fault_around_bytes rounded down to the machine page size
  3190. * (and therefore to page order). This way it's easier to guarantee
  3191. * that we don't cross page table boundaries.
  3192. */
  3193. static int do_fault_around(struct vm_fault *vmf)
  3194. {
  3195. unsigned long address = vmf->address, nr_pages, mask;
  3196. pgoff_t start_pgoff = vmf->pgoff;
  3197. pgoff_t end_pgoff;
  3198. int off, ret = 0;
  3199. nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
  3200. mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
  3201. vmf->address = max(address & mask, vmf->vma->vm_start);
  3202. off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  3203. start_pgoff -= off;
  3204. /*
  3205. * end_pgoff is either the end of the page table, the end of
  3206. * the vma or nr_pages from start_pgoff, depending what is nearest.
  3207. */
  3208. end_pgoff = start_pgoff -
  3209. ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
  3210. PTRS_PER_PTE - 1;
  3211. end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
  3212. start_pgoff + nr_pages - 1);
  3213. if (pmd_none(*vmf->pmd)) {
  3214. vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
  3215. vmf->address);
  3216. if (!vmf->prealloc_pte)
  3217. goto out;
  3218. smp_wmb(); /* See comment in __pte_alloc() */
  3219. }
  3220. vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
  3221. /* Huge page is mapped? Page fault is solved */
  3222. if (pmd_trans_huge(*vmf->pmd)) {
  3223. ret = VM_FAULT_NOPAGE;
  3224. goto out;
  3225. }
  3226. /* ->map_pages() haven't done anything useful. Cold page cache? */
  3227. if (!vmf->pte)
  3228. goto out;
  3229. /* check if the page fault is solved */
  3230. vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
  3231. if (!pte_none(*vmf->pte))
  3232. ret = VM_FAULT_NOPAGE;
  3233. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3234. out:
  3235. vmf->address = address;
  3236. vmf->pte = NULL;
  3237. return ret;
  3238. }
  3239. static int do_read_fault(struct vm_fault *vmf)
  3240. {
  3241. struct vm_area_struct *vma = vmf->vma;
  3242. int ret = 0;
  3243. /*
  3244. * Let's call ->map_pages() first and use ->fault() as fallback
  3245. * if page by the offset is not ready to be mapped (cold cache or
  3246. * something).
  3247. */
  3248. if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
  3249. ret = do_fault_around(vmf);
  3250. if (ret)
  3251. return ret;
  3252. }
  3253. ret = __do_fault(vmf);
  3254. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3255. return ret;
  3256. ret |= finish_fault(vmf);
  3257. unlock_page(vmf->page);
  3258. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3259. put_page(vmf->page);
  3260. return ret;
  3261. }
  3262. static int do_cow_fault(struct vm_fault *vmf)
  3263. {
  3264. struct vm_area_struct *vma = vmf->vma;
  3265. int ret;
  3266. if (unlikely(anon_vma_prepare(vma)))
  3267. return VM_FAULT_OOM;
  3268. vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
  3269. if (!vmf->cow_page)
  3270. return VM_FAULT_OOM;
  3271. if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
  3272. &vmf->memcg, false)) {
  3273. put_page(vmf->cow_page);
  3274. return VM_FAULT_OOM;
  3275. }
  3276. ret = __do_fault(vmf);
  3277. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3278. goto uncharge_out;
  3279. if (ret & VM_FAULT_DONE_COW)
  3280. return ret;
  3281. copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
  3282. __SetPageUptodate(vmf->cow_page);
  3283. ret |= finish_fault(vmf);
  3284. unlock_page(vmf->page);
  3285. put_page(vmf->page);
  3286. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3287. goto uncharge_out;
  3288. return ret;
  3289. uncharge_out:
  3290. mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
  3291. put_page(vmf->cow_page);
  3292. return ret;
  3293. }
  3294. static int do_shared_fault(struct vm_fault *vmf)
  3295. {
  3296. struct vm_area_struct *vma = vmf->vma;
  3297. int ret, tmp;
  3298. ret = __do_fault(vmf);
  3299. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  3300. return ret;
  3301. /*
  3302. * Check if the backing address space wants to know that the page is
  3303. * about to become writable
  3304. */
  3305. if (vma->vm_ops->page_mkwrite) {
  3306. unlock_page(vmf->page);
  3307. tmp = do_page_mkwrite(vmf);
  3308. if (unlikely(!tmp ||
  3309. (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
  3310. put_page(vmf->page);
  3311. return tmp;
  3312. }
  3313. }
  3314. ret |= finish_fault(vmf);
  3315. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
  3316. VM_FAULT_RETRY))) {
  3317. unlock_page(vmf->page);
  3318. put_page(vmf->page);
  3319. return ret;
  3320. }
  3321. fault_dirty_shared_page(vma, vmf->page);
  3322. return ret;
  3323. }
  3324. /*
  3325. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  3326. * but allow concurrent faults).
  3327. * The mmap_sem may have been released depending on flags and our
  3328. * return value. See filemap_fault() and __lock_page_or_retry().
  3329. */
  3330. static int do_fault(struct vm_fault *vmf)
  3331. {
  3332. struct vm_area_struct *vma = vmf->vma;
  3333. int ret;
  3334. /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
  3335. if (!vma->vm_ops->fault)
  3336. ret = VM_FAULT_SIGBUS;
  3337. else if (!(vmf->flags & FAULT_FLAG_WRITE))
  3338. ret = do_read_fault(vmf);
  3339. else if (!(vma->vm_flags & VM_SHARED))
  3340. ret = do_cow_fault(vmf);
  3341. else
  3342. ret = do_shared_fault(vmf);
  3343. /* preallocated pagetable is unused: free it */
  3344. if (vmf->prealloc_pte) {
  3345. pte_free(vma->vm_mm, vmf->prealloc_pte);
  3346. vmf->prealloc_pte = NULL;
  3347. }
  3348. return ret;
  3349. }
  3350. static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
  3351. unsigned long addr, int page_nid,
  3352. int *flags)
  3353. {
  3354. get_page(page);
  3355. count_vm_numa_event(NUMA_HINT_FAULTS);
  3356. if (page_nid == numa_node_id()) {
  3357. count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
  3358. *flags |= TNF_FAULT_LOCAL;
  3359. }
  3360. return mpol_misplaced(page, vma, addr);
  3361. }
  3362. static int do_numa_page(struct vm_fault *vmf)
  3363. {
  3364. struct vm_area_struct *vma = vmf->vma;
  3365. struct page *page = NULL;
  3366. int page_nid = -1;
  3367. int last_cpupid;
  3368. int target_nid;
  3369. bool migrated = false;
  3370. pte_t pte;
  3371. bool was_writable = pte_savedwrite(vmf->orig_pte);
  3372. int flags = 0;
  3373. /*
  3374. * The "pte" at this point cannot be used safely without
  3375. * validation through pte_unmap_same(). It's of NUMA type but
  3376. * the pfn may be screwed if the read is non atomic.
  3377. */
  3378. vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
  3379. spin_lock(vmf->ptl);
  3380. if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
  3381. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3382. goto out;
  3383. }
  3384. /*
  3385. * Make it present again, Depending on how arch implementes non
  3386. * accessible ptes, some can allow access by kernel mode.
  3387. */
  3388. pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
  3389. pte = pte_modify(pte, vma->vm_page_prot);
  3390. pte = pte_mkyoung(pte);
  3391. if (was_writable)
  3392. pte = pte_mkwrite(pte);
  3393. ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
  3394. update_mmu_cache(vma, vmf->address, vmf->pte);
  3395. page = vm_normal_page(vma, vmf->address, pte);
  3396. if (!page) {
  3397. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3398. return 0;
  3399. }
  3400. /* TODO: handle PTE-mapped THP */
  3401. if (PageCompound(page)) {
  3402. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3403. return 0;
  3404. }
  3405. /*
  3406. * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
  3407. * much anyway since they can be in shared cache state. This misses
  3408. * the case where a mapping is writable but the process never writes
  3409. * to it but pte_write gets cleared during protection updates and
  3410. * pte_dirty has unpredictable behaviour between PTE scan updates,
  3411. * background writeback, dirty balancing and application behaviour.
  3412. */
  3413. if (!pte_write(pte))
  3414. flags |= TNF_NO_GROUP;
  3415. /*
  3416. * Flag if the page is shared between multiple address spaces. This
  3417. * is later used when determining whether to group tasks together
  3418. */
  3419. if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
  3420. flags |= TNF_SHARED;
  3421. last_cpupid = page_cpupid_last(page);
  3422. page_nid = page_to_nid(page);
  3423. target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
  3424. &flags);
  3425. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3426. if (target_nid == -1) {
  3427. put_page(page);
  3428. goto out;
  3429. }
  3430. /* Migrate to the requested node */
  3431. migrated = migrate_misplaced_page(page, vma, target_nid);
  3432. if (migrated) {
  3433. page_nid = target_nid;
  3434. flags |= TNF_MIGRATED;
  3435. } else
  3436. flags |= TNF_MIGRATE_FAIL;
  3437. out:
  3438. if (page_nid != -1)
  3439. task_numa_fault(last_cpupid, page_nid, 1, flags);
  3440. return 0;
  3441. }
  3442. static inline int create_huge_pmd(struct vm_fault *vmf)
  3443. {
  3444. if (vma_is_anonymous(vmf->vma))
  3445. return do_huge_pmd_anonymous_page(vmf);
  3446. if (vmf->vma->vm_ops->huge_fault)
  3447. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
  3448. return VM_FAULT_FALLBACK;
  3449. }
  3450. /* `inline' is required to avoid gcc 4.1.2 build error */
  3451. static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
  3452. {
  3453. if (vma_is_anonymous(vmf->vma))
  3454. return do_huge_pmd_wp_page(vmf, orig_pmd);
  3455. if (vmf->vma->vm_ops->huge_fault)
  3456. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
  3457. /* COW handled on pte level: split pmd */
  3458. VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
  3459. __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
  3460. return VM_FAULT_FALLBACK;
  3461. }
  3462. static inline bool vma_is_accessible(struct vm_area_struct *vma)
  3463. {
  3464. return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
  3465. }
  3466. static int create_huge_pud(struct vm_fault *vmf)
  3467. {
  3468. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3469. /* No support for anonymous transparent PUD pages yet */
  3470. if (vma_is_anonymous(vmf->vma))
  3471. return VM_FAULT_FALLBACK;
  3472. if (vmf->vma->vm_ops->huge_fault)
  3473. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
  3474. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  3475. return VM_FAULT_FALLBACK;
  3476. }
  3477. static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
  3478. {
  3479. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3480. /* No support for anonymous transparent PUD pages yet */
  3481. if (vma_is_anonymous(vmf->vma))
  3482. return VM_FAULT_FALLBACK;
  3483. if (vmf->vma->vm_ops->huge_fault)
  3484. return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
  3485. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  3486. return VM_FAULT_FALLBACK;
  3487. }
  3488. /*
  3489. * These routines also need to handle stuff like marking pages dirty
  3490. * and/or accessed for architectures that don't do it in hardware (most
  3491. * RISC architectures). The early dirtying is also good on the i386.
  3492. *
  3493. * There is also a hook called "update_mmu_cache()" that architectures
  3494. * with external mmu caches can use to update those (ie the Sparc or
  3495. * PowerPC hashed page tables that act as extended TLBs).
  3496. *
  3497. * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
  3498. * concurrent faults).
  3499. *
  3500. * The mmap_sem may have been released depending on flags and our return value.
  3501. * See filemap_fault() and __lock_page_or_retry().
  3502. */
  3503. static int handle_pte_fault(struct vm_fault *vmf)
  3504. {
  3505. pte_t entry;
  3506. if (unlikely(pmd_none(*vmf->pmd))) {
  3507. /*
  3508. * Leave __pte_alloc() until later: because vm_ops->fault may
  3509. * want to allocate huge page, and if we expose page table
  3510. * for an instant, it will be difficult to retract from
  3511. * concurrent faults and from rmap lookups.
  3512. */
  3513. vmf->pte = NULL;
  3514. } else {
  3515. /* See comment in pte_alloc_one_map() */
  3516. if (pmd_devmap_trans_unstable(vmf->pmd))
  3517. return 0;
  3518. /*
  3519. * A regular pmd is established and it can't morph into a huge
  3520. * pmd from under us anymore at this point because we hold the
  3521. * mmap_sem read mode and khugepaged takes it in write mode.
  3522. * So now it's safe to run pte_offset_map().
  3523. */
  3524. vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
  3525. vmf->orig_pte = *vmf->pte;
  3526. /*
  3527. * some architectures can have larger ptes than wordsize,
  3528. * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
  3529. * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
  3530. * accesses. The code below just needs a consistent view
  3531. * for the ifs and we later double check anyway with the
  3532. * ptl lock held. So here a barrier will do.
  3533. */
  3534. barrier();
  3535. if (pte_none(vmf->orig_pte)) {
  3536. pte_unmap(vmf->pte);
  3537. vmf->pte = NULL;
  3538. }
  3539. }
  3540. if (!vmf->pte) {
  3541. if (vma_is_anonymous(vmf->vma))
  3542. return do_anonymous_page(vmf);
  3543. else
  3544. return do_fault(vmf);
  3545. }
  3546. if (!pte_present(vmf->orig_pte))
  3547. return do_swap_page(vmf);
  3548. if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
  3549. return do_numa_page(vmf);
  3550. vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
  3551. spin_lock(vmf->ptl);
  3552. entry = vmf->orig_pte;
  3553. if (unlikely(!pte_same(*vmf->pte, entry)))
  3554. goto unlock;
  3555. if (vmf->flags & FAULT_FLAG_WRITE) {
  3556. if (!pte_write(entry))
  3557. return do_wp_page(vmf);
  3558. entry = pte_mkdirty(entry);
  3559. }
  3560. entry = pte_mkyoung(entry);
  3561. if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
  3562. vmf->flags & FAULT_FLAG_WRITE)) {
  3563. update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
  3564. } else {
  3565. /*
  3566. * This is needed only for protection faults but the arch code
  3567. * is not yet telling us if this is a protection fault or not.
  3568. * This still avoids useless tlb flushes for .text page faults
  3569. * with threads.
  3570. */
  3571. if (vmf->flags & FAULT_FLAG_WRITE)
  3572. flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
  3573. }
  3574. unlock:
  3575. pte_unmap_unlock(vmf->pte, vmf->ptl);
  3576. return 0;
  3577. }
  3578. /*
  3579. * By the time we get here, we already hold the mm semaphore
  3580. *
  3581. * The mmap_sem may have been released depending on flags and our
  3582. * return value. See filemap_fault() and __lock_page_or_retry().
  3583. */
  3584. static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
  3585. unsigned int flags)
  3586. {
  3587. struct vm_fault vmf = {
  3588. .vma = vma,
  3589. .address = address & PAGE_MASK,
  3590. .flags = flags,
  3591. .pgoff = linear_page_index(vma, address),
  3592. .gfp_mask = __get_fault_gfp_mask(vma),
  3593. };
  3594. unsigned int dirty = flags & FAULT_FLAG_WRITE;
  3595. struct mm_struct *mm = vma->vm_mm;
  3596. pgd_t *pgd;
  3597. p4d_t *p4d;
  3598. int ret;
  3599. pgd = pgd_offset(mm, address);
  3600. p4d = p4d_alloc(mm, pgd, address);
  3601. if (!p4d)
  3602. return VM_FAULT_OOM;
  3603. vmf.pud = pud_alloc(mm, p4d, address);
  3604. if (!vmf.pud)
  3605. return VM_FAULT_OOM;
  3606. if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
  3607. ret = create_huge_pud(&vmf);
  3608. if (!(ret & VM_FAULT_FALLBACK))
  3609. return ret;
  3610. } else {
  3611. pud_t orig_pud = *vmf.pud;
  3612. barrier();
  3613. if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
  3614. /* NUMA case for anonymous PUDs would go here */
  3615. if (dirty && !pud_write(orig_pud)) {
  3616. ret = wp_huge_pud(&vmf, orig_pud);
  3617. if (!(ret & VM_FAULT_FALLBACK))
  3618. return ret;
  3619. } else {
  3620. huge_pud_set_accessed(&vmf, orig_pud);
  3621. return 0;
  3622. }
  3623. }
  3624. }
  3625. vmf.pmd = pmd_alloc(mm, vmf.pud, address);
  3626. if (!vmf.pmd)
  3627. return VM_FAULT_OOM;
  3628. if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
  3629. ret = create_huge_pmd(&vmf);
  3630. if (!(ret & VM_FAULT_FALLBACK))
  3631. return ret;
  3632. } else {
  3633. pmd_t orig_pmd = *vmf.pmd;
  3634. barrier();
  3635. if (unlikely(is_swap_pmd(orig_pmd))) {
  3636. VM_BUG_ON(thp_migration_supported() &&
  3637. !is_pmd_migration_entry(orig_pmd));
  3638. if (is_pmd_migration_entry(orig_pmd))
  3639. pmd_migration_entry_wait(mm, vmf.pmd);
  3640. return 0;
  3641. }
  3642. if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
  3643. if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
  3644. return do_huge_pmd_numa_page(&vmf, orig_pmd);
  3645. if (dirty && !pmd_write(orig_pmd)) {
  3646. ret = wp_huge_pmd(&vmf, orig_pmd);
  3647. if (!(ret & VM_FAULT_FALLBACK))
  3648. return ret;
  3649. } else {
  3650. huge_pmd_set_accessed(&vmf, orig_pmd);
  3651. return 0;
  3652. }
  3653. }
  3654. }
  3655. return handle_pte_fault(&vmf);
  3656. }
  3657. /*
  3658. * By the time we get here, we already hold the mm semaphore
  3659. *
  3660. * The mmap_sem may have been released depending on flags and our
  3661. * return value. See filemap_fault() and __lock_page_or_retry().
  3662. */
  3663. int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
  3664. unsigned int flags)
  3665. {
  3666. int ret;
  3667. __set_current_state(TASK_RUNNING);
  3668. count_vm_event(PGFAULT);
  3669. count_memcg_event_mm(vma->vm_mm, PGFAULT);
  3670. /* do counter updates before entering really critical section. */
  3671. check_sync_rss_stat(current);
  3672. if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
  3673. flags & FAULT_FLAG_INSTRUCTION,
  3674. flags & FAULT_FLAG_REMOTE))
  3675. return VM_FAULT_SIGSEGV;
  3676. /*
  3677. * Enable the memcg OOM handling for faults triggered in user
  3678. * space. Kernel faults are handled more gracefully.
  3679. */
  3680. if (flags & FAULT_FLAG_USER)
  3681. mem_cgroup_enter_user_fault();
  3682. if (unlikely(is_vm_hugetlb_page(vma)))
  3683. ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
  3684. else
  3685. ret = __handle_mm_fault(vma, address, flags);
  3686. if (flags & FAULT_FLAG_USER) {
  3687. mem_cgroup_exit_user_fault();
  3688. /*
  3689. * The task may have entered a memcg OOM situation but
  3690. * if the allocation error was handled gracefully (no
  3691. * VM_FAULT_OOM), there is no need to kill anything.
  3692. * Just clean up the OOM state peacefully.
  3693. */
  3694. if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
  3695. mem_cgroup_oom_synchronize(false);
  3696. }
  3697. return ret;
  3698. }
  3699. EXPORT_SYMBOL_GPL(handle_mm_fault);
  3700. #ifndef __PAGETABLE_P4D_FOLDED
  3701. /*
  3702. * Allocate p4d page table.
  3703. * We've already handled the fast-path in-line.
  3704. */
  3705. int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  3706. {
  3707. p4d_t *new = p4d_alloc_one(mm, address);
  3708. if (!new)
  3709. return -ENOMEM;
  3710. smp_wmb(); /* See comment in __pte_alloc */
  3711. spin_lock(&mm->page_table_lock);
  3712. if (pgd_present(*pgd)) /* Another has populated it */
  3713. p4d_free(mm, new);
  3714. else
  3715. pgd_populate(mm, pgd, new);
  3716. spin_unlock(&mm->page_table_lock);
  3717. return 0;
  3718. }
  3719. #endif /* __PAGETABLE_P4D_FOLDED */
  3720. #ifndef __PAGETABLE_PUD_FOLDED
  3721. /*
  3722. * Allocate page upper directory.
  3723. * We've already handled the fast-path in-line.
  3724. */
  3725. int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
  3726. {
  3727. pud_t *new = pud_alloc_one(mm, address);
  3728. if (!new)
  3729. return -ENOMEM;
  3730. smp_wmb(); /* See comment in __pte_alloc */
  3731. spin_lock(&mm->page_table_lock);
  3732. #ifndef __ARCH_HAS_5LEVEL_HACK
  3733. if (!p4d_present(*p4d)) {
  3734. mm_inc_nr_puds(mm);
  3735. p4d_populate(mm, p4d, new);
  3736. } else /* Another has populated it */
  3737. pud_free(mm, new);
  3738. #else
  3739. if (!pgd_present(*p4d)) {
  3740. mm_inc_nr_puds(mm);
  3741. pgd_populate(mm, p4d, new);
  3742. } else /* Another has populated it */
  3743. pud_free(mm, new);
  3744. #endif /* __ARCH_HAS_5LEVEL_HACK */
  3745. spin_unlock(&mm->page_table_lock);
  3746. return 0;
  3747. }
  3748. #endif /* __PAGETABLE_PUD_FOLDED */
  3749. #ifndef __PAGETABLE_PMD_FOLDED
  3750. /*
  3751. * Allocate page middle directory.
  3752. * We've already handled the fast-path in-line.
  3753. */
  3754. int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  3755. {
  3756. spinlock_t *ptl;
  3757. pmd_t *new = pmd_alloc_one(mm, address);
  3758. if (!new)
  3759. return -ENOMEM;
  3760. smp_wmb(); /* See comment in __pte_alloc */
  3761. ptl = pud_lock(mm, pud);
  3762. #ifndef __ARCH_HAS_4LEVEL_HACK
  3763. if (!pud_present(*pud)) {
  3764. mm_inc_nr_pmds(mm);
  3765. pud_populate(mm, pud, new);
  3766. } else /* Another has populated it */
  3767. pmd_free(mm, new);
  3768. #else
  3769. if (!pgd_present(*pud)) {
  3770. mm_inc_nr_pmds(mm);
  3771. pgd_populate(mm, pud, new);
  3772. } else /* Another has populated it */
  3773. pmd_free(mm, new);
  3774. #endif /* __ARCH_HAS_4LEVEL_HACK */
  3775. spin_unlock(ptl);
  3776. return 0;
  3777. }
  3778. #endif /* __PAGETABLE_PMD_FOLDED */
  3779. static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
  3780. unsigned long *start, unsigned long *end,
  3781. pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
  3782. {
  3783. pgd_t *pgd;
  3784. p4d_t *p4d;
  3785. pud_t *pud;
  3786. pmd_t *pmd;
  3787. pte_t *ptep;
  3788. pgd = pgd_offset(mm, address);
  3789. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  3790. goto out;
  3791. p4d = p4d_offset(pgd, address);
  3792. if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
  3793. goto out;
  3794. pud = pud_offset(p4d, address);
  3795. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  3796. goto out;
  3797. pmd = pmd_offset(pud, address);
  3798. VM_BUG_ON(pmd_trans_huge(*pmd));
  3799. if (pmd_huge(*pmd)) {
  3800. if (!pmdpp)
  3801. goto out;
  3802. if (start && end) {
  3803. *start = address & PMD_MASK;
  3804. *end = *start + PMD_SIZE;
  3805. mmu_notifier_invalidate_range_start(mm, *start, *end);
  3806. }
  3807. *ptlp = pmd_lock(mm, pmd);
  3808. if (pmd_huge(*pmd)) {
  3809. *pmdpp = pmd;
  3810. return 0;
  3811. }
  3812. spin_unlock(*ptlp);
  3813. if (start && end)
  3814. mmu_notifier_invalidate_range_end(mm, *start, *end);
  3815. }
  3816. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  3817. goto out;
  3818. if (start && end) {
  3819. *start = address & PAGE_MASK;
  3820. *end = *start + PAGE_SIZE;
  3821. mmu_notifier_invalidate_range_start(mm, *start, *end);
  3822. }
  3823. ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
  3824. if (!pte_present(*ptep))
  3825. goto unlock;
  3826. *ptepp = ptep;
  3827. return 0;
  3828. unlock:
  3829. pte_unmap_unlock(ptep, *ptlp);
  3830. if (start && end)
  3831. mmu_notifier_invalidate_range_end(mm, *start, *end);
  3832. out:
  3833. return -EINVAL;
  3834. }
  3835. static inline int follow_pte(struct mm_struct *mm, unsigned long address,
  3836. pte_t **ptepp, spinlock_t **ptlp)
  3837. {
  3838. int res;
  3839. /* (void) is needed to make gcc happy */
  3840. (void) __cond_lock(*ptlp,
  3841. !(res = __follow_pte_pmd(mm, address, NULL, NULL,
  3842. ptepp, NULL, ptlp)));
  3843. return res;
  3844. }
  3845. int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
  3846. unsigned long *start, unsigned long *end,
  3847. pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
  3848. {
  3849. int res;
  3850. /* (void) is needed to make gcc happy */
  3851. (void) __cond_lock(*ptlp,
  3852. !(res = __follow_pte_pmd(mm, address, start, end,
  3853. ptepp, pmdpp, ptlp)));
  3854. return res;
  3855. }
  3856. EXPORT_SYMBOL(follow_pte_pmd);
  3857. /**
  3858. * follow_pfn - look up PFN at a user virtual address
  3859. * @vma: memory mapping
  3860. * @address: user virtual address
  3861. * @pfn: location to store found PFN
  3862. *
  3863. * Only IO mappings and raw PFN mappings are allowed.
  3864. *
  3865. * Returns zero and the pfn at @pfn on success, -ve otherwise.
  3866. */
  3867. int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  3868. unsigned long *pfn)
  3869. {
  3870. int ret = -EINVAL;
  3871. spinlock_t *ptl;
  3872. pte_t *ptep;
  3873. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  3874. return ret;
  3875. ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
  3876. if (ret)
  3877. return ret;
  3878. *pfn = pte_pfn(*ptep);
  3879. pte_unmap_unlock(ptep, ptl);
  3880. return 0;
  3881. }
  3882. EXPORT_SYMBOL(follow_pfn);
  3883. #ifdef CONFIG_HAVE_IOREMAP_PROT
  3884. int follow_phys(struct vm_area_struct *vma,
  3885. unsigned long address, unsigned int flags,
  3886. unsigned long *prot, resource_size_t *phys)
  3887. {
  3888. int ret = -EINVAL;
  3889. pte_t *ptep, pte;
  3890. spinlock_t *ptl;
  3891. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  3892. goto out;
  3893. if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
  3894. goto out;
  3895. pte = *ptep;
  3896. if ((flags & FOLL_WRITE) && !pte_write(pte))
  3897. goto unlock;
  3898. *prot = pgprot_val(pte_pgprot(pte));
  3899. *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
  3900. ret = 0;
  3901. unlock:
  3902. pte_unmap_unlock(ptep, ptl);
  3903. out:
  3904. return ret;
  3905. }
  3906. int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
  3907. void *buf, int len, int write)
  3908. {
  3909. resource_size_t phys_addr;
  3910. unsigned long prot = 0;
  3911. void __iomem *maddr;
  3912. int offset = addr & (PAGE_SIZE-1);
  3913. if (follow_phys(vma, addr, write, &prot, &phys_addr))
  3914. return -EINVAL;
  3915. maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
  3916. if (!maddr)
  3917. return -ENOMEM;
  3918. if (write)
  3919. memcpy_toio(maddr + offset, buf, len);
  3920. else
  3921. memcpy_fromio(buf, maddr + offset, len);
  3922. iounmap(maddr);
  3923. return len;
  3924. }
  3925. EXPORT_SYMBOL_GPL(generic_access_phys);
  3926. #endif
  3927. /*
  3928. * Access another process' address space as given in mm. If non-NULL, use the
  3929. * given task for page fault accounting.
  3930. */
  3931. int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  3932. unsigned long addr, void *buf, int len, unsigned int gup_flags)
  3933. {
  3934. struct vm_area_struct *vma;
  3935. void *old_buf = buf;
  3936. int write = gup_flags & FOLL_WRITE;
  3937. down_read(&mm->mmap_sem);
  3938. /* ignore errors, just check how much was successfully transferred */
  3939. while (len) {
  3940. int bytes, ret, offset;
  3941. void *maddr;
  3942. struct page *page = NULL;
  3943. ret = get_user_pages_remote(tsk, mm, addr, 1,
  3944. gup_flags, &page, &vma, NULL);
  3945. if (ret <= 0) {
  3946. #ifndef CONFIG_HAVE_IOREMAP_PROT
  3947. break;
  3948. #else
  3949. /*
  3950. * Check if this is a VM_IO | VM_PFNMAP VMA, which
  3951. * we can access using slightly different code.
  3952. */
  3953. vma = find_vma(mm, addr);
  3954. if (!vma || vma->vm_start > addr)
  3955. break;
  3956. if (vma->vm_ops && vma->vm_ops->access)
  3957. ret = vma->vm_ops->access(vma, addr, buf,
  3958. len, write);
  3959. if (ret <= 0)
  3960. break;
  3961. bytes = ret;
  3962. #endif
  3963. } else {
  3964. bytes = len;
  3965. offset = addr & (PAGE_SIZE-1);
  3966. if (bytes > PAGE_SIZE-offset)
  3967. bytes = PAGE_SIZE-offset;
  3968. maddr = kmap(page);
  3969. if (write) {
  3970. copy_to_user_page(vma, page, addr,
  3971. maddr + offset, buf, bytes);
  3972. set_page_dirty_lock(page);
  3973. } else {
  3974. copy_from_user_page(vma, page, addr,
  3975. buf, maddr + offset, bytes);
  3976. }
  3977. kunmap(page);
  3978. put_page(page);
  3979. }
  3980. len -= bytes;
  3981. buf += bytes;
  3982. addr += bytes;
  3983. }
  3984. up_read(&mm->mmap_sem);
  3985. return buf - old_buf;
  3986. }
  3987. /**
  3988. * access_remote_vm - access another process' address space
  3989. * @mm: the mm_struct of the target address space
  3990. * @addr: start address to access
  3991. * @buf: source or destination buffer
  3992. * @len: number of bytes to transfer
  3993. * @gup_flags: flags modifying lookup behaviour
  3994. *
  3995. * The caller must hold a reference on @mm.
  3996. */
  3997. int access_remote_vm(struct mm_struct *mm, unsigned long addr,
  3998. void *buf, int len, unsigned int gup_flags)
  3999. {
  4000. return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
  4001. }
  4002. /*
  4003. * Access another process' address space.
  4004. * Source/target buffer must be kernel space,
  4005. * Do not walk the page table directly, use get_user_pages
  4006. */
  4007. int access_process_vm(struct task_struct *tsk, unsigned long addr,
  4008. void *buf, int len, unsigned int gup_flags)
  4009. {
  4010. struct mm_struct *mm;
  4011. int ret;
  4012. mm = get_task_mm(tsk);
  4013. if (!mm)
  4014. return 0;
  4015. ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  4016. mmput(mm);
  4017. return ret;
  4018. }
  4019. EXPORT_SYMBOL_GPL(access_process_vm);
  4020. /*
  4021. * Print the name of a VMA.
  4022. */
  4023. void print_vma_addr(char *prefix, unsigned long ip)
  4024. {
  4025. struct mm_struct *mm = current->mm;
  4026. struct vm_area_struct *vma;
  4027. /*
  4028. * we might be running from an atomic context so we cannot sleep
  4029. */
  4030. if (!down_read_trylock(&mm->mmap_sem))
  4031. return;
  4032. vma = find_vma(mm, ip);
  4033. if (vma && vma->vm_file) {
  4034. struct file *f = vma->vm_file;
  4035. char *buf = (char *)__get_free_page(GFP_NOWAIT);
  4036. if (buf) {
  4037. char *p;
  4038. p = file_path(f, buf, PAGE_SIZE);
  4039. if (IS_ERR(p))
  4040. p = "?";
  4041. printk("%s%s[%lx+%lx]", prefix, kbasename(p),
  4042. vma->vm_start,
  4043. vma->vm_end - vma->vm_start);
  4044. free_page((unsigned long)buf);
  4045. }
  4046. }
  4047. up_read(&mm->mmap_sem);
  4048. }
  4049. #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
  4050. void __might_fault(const char *file, int line)
  4051. {
  4052. /*
  4053. * Some code (nfs/sunrpc) uses socket ops on kernel memory while
  4054. * holding the mmap_sem, this is safe because kernel memory doesn't
  4055. * get paged out, therefore we'll never actually fault, and the
  4056. * below annotations will generate false positives.
  4057. */
  4058. if (uaccess_kernel())
  4059. return;
  4060. if (pagefault_disabled())
  4061. return;
  4062. __might_sleep(file, line, 0);
  4063. #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
  4064. if (current->mm)
  4065. might_lock_read(&current->mm->mmap_sem);
  4066. #endif
  4067. }
  4068. EXPORT_SYMBOL(__might_fault);
  4069. #endif
  4070. #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
  4071. /*
  4072. * Process all subpages of the specified huge page with the specified
  4073. * operation. The target subpage will be processed last to keep its
  4074. * cache lines hot.
  4075. */
  4076. static inline void process_huge_page(
  4077. unsigned long addr_hint, unsigned int pages_per_huge_page,
  4078. void (*process_subpage)(unsigned long addr, int idx, void *arg),
  4079. void *arg)
  4080. {
  4081. int i, n, base, l;
  4082. unsigned long addr = addr_hint &
  4083. ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
  4084. /* Process target subpage last to keep its cache lines hot */
  4085. might_sleep();
  4086. n = (addr_hint - addr) / PAGE_SIZE;
  4087. if (2 * n <= pages_per_huge_page) {
  4088. /* If target subpage in first half of huge page */
  4089. base = 0;
  4090. l = n;
  4091. /* Process subpages at the end of huge page */
  4092. for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
  4093. cond_resched();
  4094. process_subpage(addr + i * PAGE_SIZE, i, arg);
  4095. }
  4096. } else {
  4097. /* If target subpage in second half of huge page */
  4098. base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
  4099. l = pages_per_huge_page - n;
  4100. /* Process subpages at the begin of huge page */
  4101. for (i = 0; i < base; i++) {
  4102. cond_resched();
  4103. process_subpage(addr + i * PAGE_SIZE, i, arg);
  4104. }
  4105. }
  4106. /*
  4107. * Process remaining subpages in left-right-left-right pattern
  4108. * towards the target subpage
  4109. */
  4110. for (i = 0; i < l; i++) {
  4111. int left_idx = base + i;
  4112. int right_idx = base + 2 * l - 1 - i;
  4113. cond_resched();
  4114. process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
  4115. cond_resched();
  4116. process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
  4117. }
  4118. }
  4119. static void clear_gigantic_page(struct page *page,
  4120. unsigned long addr,
  4121. unsigned int pages_per_huge_page)
  4122. {
  4123. int i;
  4124. struct page *p = page;
  4125. might_sleep();
  4126. for (i = 0; i < pages_per_huge_page;
  4127. i++, p = mem_map_next(p, page, i)) {
  4128. cond_resched();
  4129. clear_user_highpage(p, addr + i * PAGE_SIZE);
  4130. }
  4131. }
  4132. static void clear_subpage(unsigned long addr, int idx, void *arg)
  4133. {
  4134. struct page *page = arg;
  4135. clear_user_highpage(page + idx, addr);
  4136. }
  4137. void clear_huge_page(struct page *page,
  4138. unsigned long addr_hint, unsigned int pages_per_huge_page)
  4139. {
  4140. unsigned long addr = addr_hint &
  4141. ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
  4142. if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  4143. clear_gigantic_page(page, addr, pages_per_huge_page);
  4144. return;
  4145. }
  4146. process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
  4147. }
  4148. static void copy_user_gigantic_page(struct page *dst, struct page *src,
  4149. unsigned long addr,
  4150. struct vm_area_struct *vma,
  4151. unsigned int pages_per_huge_page)
  4152. {
  4153. int i;
  4154. struct page *dst_base = dst;
  4155. struct page *src_base = src;
  4156. for (i = 0; i < pages_per_huge_page; ) {
  4157. cond_resched();
  4158. copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  4159. i++;
  4160. dst = mem_map_next(dst, dst_base, i);
  4161. src = mem_map_next(src, src_base, i);
  4162. }
  4163. }
  4164. struct copy_subpage_arg {
  4165. struct page *dst;
  4166. struct page *src;
  4167. struct vm_area_struct *vma;
  4168. };
  4169. static void copy_subpage(unsigned long addr, int idx, void *arg)
  4170. {
  4171. struct copy_subpage_arg *copy_arg = arg;
  4172. copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
  4173. addr, copy_arg->vma);
  4174. }
  4175. void copy_user_huge_page(struct page *dst, struct page *src,
  4176. unsigned long addr_hint, struct vm_area_struct *vma,
  4177. unsigned int pages_per_huge_page)
  4178. {
  4179. unsigned long addr = addr_hint &
  4180. ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
  4181. struct copy_subpage_arg arg = {
  4182. .dst = dst,
  4183. .src = src,
  4184. .vma = vma,
  4185. };
  4186. if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  4187. copy_user_gigantic_page(dst, src, addr, vma,
  4188. pages_per_huge_page);
  4189. return;
  4190. }
  4191. process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
  4192. }
  4193. long copy_huge_page_from_user(struct page *dst_page,
  4194. const void __user *usr_src,
  4195. unsigned int pages_per_huge_page,
  4196. bool allow_pagefault)
  4197. {
  4198. void *src = (void *)usr_src;
  4199. void *page_kaddr;
  4200. unsigned long i, rc = 0;
  4201. unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
  4202. for (i = 0; i < pages_per_huge_page; i++) {
  4203. if (allow_pagefault)
  4204. page_kaddr = kmap(dst_page + i);
  4205. else
  4206. page_kaddr = kmap_atomic(dst_page + i);
  4207. rc = copy_from_user(page_kaddr,
  4208. (const void __user *)(src + i * PAGE_SIZE),
  4209. PAGE_SIZE);
  4210. if (allow_pagefault)
  4211. kunmap(dst_page + i);
  4212. else
  4213. kunmap_atomic(page_kaddr);
  4214. ret_val -= (PAGE_SIZE - rc);
  4215. if (rc)
  4216. break;
  4217. cond_resched();
  4218. }
  4219. return ret_val;
  4220. }
  4221. #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
  4222. #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
  4223. static struct kmem_cache *page_ptl_cachep;
  4224. void __init ptlock_cache_init(void)
  4225. {
  4226. page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
  4227. SLAB_PANIC, NULL);
  4228. }
  4229. bool ptlock_alloc(struct page *page)
  4230. {
  4231. spinlock_t *ptl;
  4232. ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
  4233. if (!ptl)
  4234. return false;
  4235. page->ptl = ptl;
  4236. return true;
  4237. }
  4238. void ptlock_free(struct page *page)
  4239. {
  4240. kmem_cache_free(page_ptl_cachep, page->ptl);
  4241. }
  4242. #endif