slab.c 110 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461
  1. /*
  2. * linux/mm/slab.c
  3. * Written by Mark Hemment, 1996/97.
  4. * (markhe@nextd.demon.co.uk)
  5. *
  6. * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
  7. *
  8. * Major cleanup, different bufctl logic, per-cpu arrays
  9. * (c) 2000 Manfred Spraul
  10. *
  11. * Cleanup, make the head arrays unconditional, preparation for NUMA
  12. * (c) 2002 Manfred Spraul
  13. *
  14. * An implementation of the Slab Allocator as described in outline in;
  15. * UNIX Internals: The New Frontiers by Uresh Vahalia
  16. * Pub: Prentice Hall ISBN 0-13-101908-2
  17. * or with a little more detail in;
  18. * The Slab Allocator: An Object-Caching Kernel Memory Allocator
  19. * Jeff Bonwick (Sun Microsystems).
  20. * Presented at: USENIX Summer 1994 Technical Conference
  21. *
  22. * The memory is organized in caches, one cache for each object type.
  23. * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  24. * Each cache consists out of many slabs (they are small (usually one
  25. * page long) and always contiguous), and each slab contains multiple
  26. * initialized objects.
  27. *
  28. * This means, that your constructor is used only for newly allocated
  29. * slabs and you must pass objects with the same initializations to
  30. * kmem_cache_free.
  31. *
  32. * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  33. * normal). If you need a special memory type, then must create a new
  34. * cache for that memory type.
  35. *
  36. * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  37. * full slabs with 0 free objects
  38. * partial slabs
  39. * empty slabs with no allocated objects
  40. *
  41. * If partial slabs exist, then new allocations come from these slabs,
  42. * otherwise from empty slabs or new slabs are allocated.
  43. *
  44. * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  45. * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  46. *
  47. * Each cache has a short per-cpu head array, most allocs
  48. * and frees go into that array, and if that array overflows, then 1/2
  49. * of the entries in the array are given back into the global cache.
  50. * The head array is strictly LIFO and should improve the cache hit rates.
  51. * On SMP, it additionally reduces the spinlock operations.
  52. *
  53. * The c_cpuarray may not be read with enabled local interrupts -
  54. * it's changed with a smp_call_function().
  55. *
  56. * SMP synchronization:
  57. * constructors and destructors are called without any locking.
  58. * Several members in struct kmem_cache and struct slab never change, they
  59. * are accessed without any locking.
  60. * The per-cpu arrays are never accessed from the wrong cpu, no locking,
  61. * and local interrupts are disabled so slab code is preempt-safe.
  62. * The non-constant members are protected with a per-cache irq spinlock.
  63. *
  64. * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  65. * in 2000 - many ideas in the current implementation are derived from
  66. * his patch.
  67. *
  68. * Further notes from the original documentation:
  69. *
  70. * 11 April '97. Started multi-threading - markhe
  71. * The global cache-chain is protected by the mutex 'slab_mutex'.
  72. * The sem is only needed when accessing/extending the cache-chain, which
  73. * can never happen inside an interrupt (kmem_cache_create(),
  74. * kmem_cache_shrink() and kmem_cache_reap()).
  75. *
  76. * At present, each engine can be growing a cache. This should be blocked.
  77. *
  78. * 15 March 2005. NUMA slab allocator.
  79. * Shai Fultheim <shai@scalex86.org>.
  80. * Shobhit Dayal <shobhit@calsoftinc.com>
  81. * Alok N Kataria <alokk@calsoftinc.com>
  82. * Christoph Lameter <christoph@lameter.com>
  83. *
  84. * Modified the slab allocator to be node aware on NUMA systems.
  85. * Each node has its own list of partial, free and full slabs.
  86. * All object allocations for a node occur from node specific slab lists.
  87. */
  88. #include <linux/slab.h>
  89. #include <linux/mm.h>
  90. #include <linux/poison.h>
  91. #include <linux/swap.h>
  92. #include <linux/cache.h>
  93. #include <linux/interrupt.h>
  94. #include <linux/init.h>
  95. #include <linux/compiler.h>
  96. #include <linux/cpuset.h>
  97. #include <linux/proc_fs.h>
  98. #include <linux/seq_file.h>
  99. #include <linux/notifier.h>
  100. #include <linux/kallsyms.h>
  101. #include <linux/cpu.h>
  102. #include <linux/sysctl.h>
  103. #include <linux/module.h>
  104. #include <linux/rcupdate.h>
  105. #include <linux/string.h>
  106. #include <linux/uaccess.h>
  107. #include <linux/nodemask.h>
  108. #include <linux/kmemleak.h>
  109. #include <linux/mempolicy.h>
  110. #include <linux/mutex.h>
  111. #include <linux/fault-inject.h>
  112. #include <linux/rtmutex.h>
  113. #include <linux/reciprocal_div.h>
  114. #include <linux/debugobjects.h>
  115. #include <linux/kmemcheck.h>
  116. #include <linux/memory.h>
  117. #include <linux/prefetch.h>
  118. #include <net/sock.h>
  119. #include <asm/cacheflush.h>
  120. #include <asm/tlbflush.h>
  121. #include <asm/page.h>
  122. #include <trace/events/kmem.h>
  123. #include "internal.h"
  124. #include "slab.h"
  125. /*
  126. * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
  127. * 0 for faster, smaller code (especially in the critical paths).
  128. *
  129. * STATS - 1 to collect stats for /proc/slabinfo.
  130. * 0 for faster, smaller code (especially in the critical paths).
  131. *
  132. * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
  133. */
  134. #ifdef CONFIG_DEBUG_SLAB
  135. #define DEBUG 1
  136. #define STATS 1
  137. #define FORCED_DEBUG 1
  138. #else
  139. #define DEBUG 0
  140. #define STATS 0
  141. #define FORCED_DEBUG 0
  142. #endif
  143. /* Shouldn't this be in a header file somewhere? */
  144. #define BYTES_PER_WORD sizeof(void *)
  145. #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
  146. #ifndef ARCH_KMALLOC_FLAGS
  147. #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
  148. #endif
  149. #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
  150. <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
  151. #if FREELIST_BYTE_INDEX
  152. typedef unsigned char freelist_idx_t;
  153. #else
  154. typedef unsigned short freelist_idx_t;
  155. #endif
  156. #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
  157. /*
  158. * struct array_cache
  159. *
  160. * Purpose:
  161. * - LIFO ordering, to hand out cache-warm objects from _alloc
  162. * - reduce the number of linked list operations
  163. * - reduce spinlock operations
  164. *
  165. * The limit is stored in the per-cpu structure to reduce the data cache
  166. * footprint.
  167. *
  168. */
  169. struct array_cache {
  170. unsigned int avail;
  171. unsigned int limit;
  172. unsigned int batchcount;
  173. unsigned int touched;
  174. void *entry[]; /*
  175. * Must have this definition in here for the proper
  176. * alignment of array_cache. Also simplifies accessing
  177. * the entries.
  178. */
  179. };
  180. struct alien_cache {
  181. spinlock_t lock;
  182. struct array_cache ac;
  183. };
  184. /*
  185. * Need this for bootstrapping a per node allocator.
  186. */
  187. #define NUM_INIT_LISTS (2 * MAX_NUMNODES)
  188. static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
  189. #define CACHE_CACHE 0
  190. #define SIZE_NODE (MAX_NUMNODES)
  191. static int drain_freelist(struct kmem_cache *cache,
  192. struct kmem_cache_node *n, int tofree);
  193. static void free_block(struct kmem_cache *cachep, void **objpp, int len,
  194. int node, struct list_head *list);
  195. static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
  196. static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
  197. static void cache_reap(struct work_struct *unused);
  198. static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
  199. void **list);
  200. static inline void fixup_slab_list(struct kmem_cache *cachep,
  201. struct kmem_cache_node *n, struct page *page,
  202. void **list);
  203. static int slab_early_init = 1;
  204. #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
  205. static void kmem_cache_node_init(struct kmem_cache_node *parent)
  206. {
  207. INIT_LIST_HEAD(&parent->slabs_full);
  208. INIT_LIST_HEAD(&parent->slabs_partial);
  209. INIT_LIST_HEAD(&parent->slabs_free);
  210. parent->total_slabs = 0;
  211. parent->free_slabs = 0;
  212. parent->shared = NULL;
  213. parent->alien = NULL;
  214. parent->colour_next = 0;
  215. spin_lock_init(&parent->list_lock);
  216. parent->free_objects = 0;
  217. parent->free_touched = 0;
  218. }
  219. #define MAKE_LIST(cachep, listp, slab, nodeid) \
  220. do { \
  221. INIT_LIST_HEAD(listp); \
  222. list_splice(&get_node(cachep, nodeid)->slab, listp); \
  223. } while (0)
  224. #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
  225. do { \
  226. MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
  227. MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
  228. MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
  229. } while (0)
  230. #define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
  231. #define CFLGS_OFF_SLAB (0x80000000UL)
  232. #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
  233. #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
  234. #define BATCHREFILL_LIMIT 16
  235. /*
  236. * Optimization question: fewer reaps means less probability for unnessary
  237. * cpucache drain/refill cycles.
  238. *
  239. * OTOH the cpuarrays can contain lots of objects,
  240. * which could lock up otherwise freeable slabs.
  241. */
  242. #define REAPTIMEOUT_AC (2*HZ)
  243. #define REAPTIMEOUT_NODE (4*HZ)
  244. #if STATS
  245. #define STATS_INC_ACTIVE(x) ((x)->num_active++)
  246. #define STATS_DEC_ACTIVE(x) ((x)->num_active--)
  247. #define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
  248. #define STATS_INC_GROWN(x) ((x)->grown++)
  249. #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
  250. #define STATS_SET_HIGH(x) \
  251. do { \
  252. if ((x)->num_active > (x)->high_mark) \
  253. (x)->high_mark = (x)->num_active; \
  254. } while (0)
  255. #define STATS_INC_ERR(x) ((x)->errors++)
  256. #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
  257. #define STATS_INC_NODEFREES(x) ((x)->node_frees++)
  258. #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
  259. #define STATS_SET_FREEABLE(x, i) \
  260. do { \
  261. if ((x)->max_freeable < i) \
  262. (x)->max_freeable = i; \
  263. } while (0)
  264. #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
  265. #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
  266. #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
  267. #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
  268. #else
  269. #define STATS_INC_ACTIVE(x) do { } while (0)
  270. #define STATS_DEC_ACTIVE(x) do { } while (0)
  271. #define STATS_INC_ALLOCED(x) do { } while (0)
  272. #define STATS_INC_GROWN(x) do { } while (0)
  273. #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
  274. #define STATS_SET_HIGH(x) do { } while (0)
  275. #define STATS_INC_ERR(x) do { } while (0)
  276. #define STATS_INC_NODEALLOCS(x) do { } while (0)
  277. #define STATS_INC_NODEFREES(x) do { } while (0)
  278. #define STATS_INC_ACOVERFLOW(x) do { } while (0)
  279. #define STATS_SET_FREEABLE(x, i) do { } while (0)
  280. #define STATS_INC_ALLOCHIT(x) do { } while (0)
  281. #define STATS_INC_ALLOCMISS(x) do { } while (0)
  282. #define STATS_INC_FREEHIT(x) do { } while (0)
  283. #define STATS_INC_FREEMISS(x) do { } while (0)
  284. #endif
  285. #if DEBUG
  286. /*
  287. * memory layout of objects:
  288. * 0 : objp
  289. * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
  290. * the end of an object is aligned with the end of the real
  291. * allocation. Catches writes behind the end of the allocation.
  292. * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
  293. * redzone word.
  294. * cachep->obj_offset: The real object.
  295. * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
  296. * cachep->size - 1* BYTES_PER_WORD: last caller address
  297. * [BYTES_PER_WORD long]
  298. */
  299. static int obj_offset(struct kmem_cache *cachep)
  300. {
  301. return cachep->obj_offset;
  302. }
  303. static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
  304. {
  305. BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
  306. return (unsigned long long*) (objp + obj_offset(cachep) -
  307. sizeof(unsigned long long));
  308. }
  309. static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
  310. {
  311. BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
  312. if (cachep->flags & SLAB_STORE_USER)
  313. return (unsigned long long *)(objp + cachep->size -
  314. sizeof(unsigned long long) -
  315. REDZONE_ALIGN);
  316. return (unsigned long long *) (objp + cachep->size -
  317. sizeof(unsigned long long));
  318. }
  319. static void **dbg_userword(struct kmem_cache *cachep, void *objp)
  320. {
  321. BUG_ON(!(cachep->flags & SLAB_STORE_USER));
  322. return (void **)(objp + cachep->size - BYTES_PER_WORD);
  323. }
  324. #else
  325. #define obj_offset(x) 0
  326. #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
  327. #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
  328. #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
  329. #endif
  330. #ifdef CONFIG_DEBUG_SLAB_LEAK
  331. static inline bool is_store_user_clean(struct kmem_cache *cachep)
  332. {
  333. return atomic_read(&cachep->store_user_clean) == 1;
  334. }
  335. static inline void set_store_user_clean(struct kmem_cache *cachep)
  336. {
  337. atomic_set(&cachep->store_user_clean, 1);
  338. }
  339. static inline void set_store_user_dirty(struct kmem_cache *cachep)
  340. {
  341. if (is_store_user_clean(cachep))
  342. atomic_set(&cachep->store_user_clean, 0);
  343. }
  344. #else
  345. static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
  346. #endif
  347. /*
  348. * Do not go above this order unless 0 objects fit into the slab or
  349. * overridden on the command line.
  350. */
  351. #define SLAB_MAX_ORDER_HI 1
  352. #define SLAB_MAX_ORDER_LO 0
  353. static int slab_max_order = SLAB_MAX_ORDER_LO;
  354. static bool slab_max_order_set __initdata;
  355. static inline struct kmem_cache *virt_to_cache(const void *obj)
  356. {
  357. struct page *page = virt_to_head_page(obj);
  358. return page->slab_cache;
  359. }
  360. static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
  361. unsigned int idx)
  362. {
  363. return page->s_mem + cache->size * idx;
  364. }
  365. /*
  366. * We want to avoid an expensive divide : (offset / cache->size)
  367. * Using the fact that size is a constant for a particular cache,
  368. * we can replace (offset / cache->size) by
  369. * reciprocal_divide(offset, cache->reciprocal_buffer_size)
  370. */
  371. static inline unsigned int obj_to_index(const struct kmem_cache *cache,
  372. const struct page *page, void *obj)
  373. {
  374. u32 offset = (obj - page->s_mem);
  375. return reciprocal_divide(offset, cache->reciprocal_buffer_size);
  376. }
  377. #define BOOT_CPUCACHE_ENTRIES 1
  378. /* internal cache of cache description objs */
  379. static struct kmem_cache kmem_cache_boot = {
  380. .batchcount = 1,
  381. .limit = BOOT_CPUCACHE_ENTRIES,
  382. .shared = 1,
  383. .size = sizeof(struct kmem_cache),
  384. .name = "kmem_cache",
  385. };
  386. static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
  387. static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
  388. {
  389. return this_cpu_ptr(cachep->cpu_cache);
  390. }
  391. /*
  392. * Calculate the number of objects and left-over bytes for a given buffer size.
  393. */
  394. static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
  395. unsigned long flags, size_t *left_over)
  396. {
  397. unsigned int num;
  398. size_t slab_size = PAGE_SIZE << gfporder;
  399. /*
  400. * The slab management structure can be either off the slab or
  401. * on it. For the latter case, the memory allocated for a
  402. * slab is used for:
  403. *
  404. * - @buffer_size bytes for each object
  405. * - One freelist_idx_t for each object
  406. *
  407. * We don't need to consider alignment of freelist because
  408. * freelist will be at the end of slab page. The objects will be
  409. * at the correct alignment.
  410. *
  411. * If the slab management structure is off the slab, then the
  412. * alignment will already be calculated into the size. Because
  413. * the slabs are all pages aligned, the objects will be at the
  414. * correct alignment when allocated.
  415. */
  416. if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
  417. num = slab_size / buffer_size;
  418. *left_over = slab_size % buffer_size;
  419. } else {
  420. num = slab_size / (buffer_size + sizeof(freelist_idx_t));
  421. *left_over = slab_size %
  422. (buffer_size + sizeof(freelist_idx_t));
  423. }
  424. return num;
  425. }
  426. #if DEBUG
  427. #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
  428. static void __slab_error(const char *function, struct kmem_cache *cachep,
  429. char *msg)
  430. {
  431. pr_err("slab error in %s(): cache `%s': %s\n",
  432. function, cachep->name, msg);
  433. dump_stack();
  434. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  435. }
  436. #endif
  437. /*
  438. * By default on NUMA we use alien caches to stage the freeing of
  439. * objects allocated from other nodes. This causes massive memory
  440. * inefficiencies when using fake NUMA setup to split memory into a
  441. * large number of small nodes, so it can be disabled on the command
  442. * line
  443. */
  444. static int use_alien_caches __read_mostly = 1;
  445. static int __init noaliencache_setup(char *s)
  446. {
  447. use_alien_caches = 0;
  448. return 1;
  449. }
  450. __setup("noaliencache", noaliencache_setup);
  451. static int __init slab_max_order_setup(char *str)
  452. {
  453. get_option(&str, &slab_max_order);
  454. slab_max_order = slab_max_order < 0 ? 0 :
  455. min(slab_max_order, MAX_ORDER - 1);
  456. slab_max_order_set = true;
  457. return 1;
  458. }
  459. __setup("slab_max_order=", slab_max_order_setup);
  460. #ifdef CONFIG_NUMA
  461. /*
  462. * Special reaping functions for NUMA systems called from cache_reap().
  463. * These take care of doing round robin flushing of alien caches (containing
  464. * objects freed on different nodes from which they were allocated) and the
  465. * flushing of remote pcps by calling drain_node_pages.
  466. */
  467. static DEFINE_PER_CPU(unsigned long, slab_reap_node);
  468. static void init_reap_node(int cpu)
  469. {
  470. per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
  471. node_online_map);
  472. }
  473. static void next_reap_node(void)
  474. {
  475. int node = __this_cpu_read(slab_reap_node);
  476. node = next_node_in(node, node_online_map);
  477. __this_cpu_write(slab_reap_node, node);
  478. }
  479. #else
  480. #define init_reap_node(cpu) do { } while (0)
  481. #define next_reap_node(void) do { } while (0)
  482. #endif
  483. /*
  484. * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
  485. * via the workqueue/eventd.
  486. * Add the CPU number into the expiration time to minimize the possibility of
  487. * the CPUs getting into lockstep and contending for the global cache chain
  488. * lock.
  489. */
  490. static void start_cpu_timer(int cpu)
  491. {
  492. struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
  493. if (reap_work->work.func == NULL) {
  494. init_reap_node(cpu);
  495. INIT_DEFERRABLE_WORK(reap_work, cache_reap);
  496. schedule_delayed_work_on(cpu, reap_work,
  497. __round_jiffies_relative(HZ, cpu));
  498. }
  499. }
  500. static void init_arraycache(struct array_cache *ac, int limit, int batch)
  501. {
  502. /*
  503. * The array_cache structures contain pointers to free object.
  504. * However, when such objects are allocated or transferred to another
  505. * cache the pointers are not cleared and they could be counted as
  506. * valid references during a kmemleak scan. Therefore, kmemleak must
  507. * not scan such objects.
  508. */
  509. kmemleak_no_scan(ac);
  510. if (ac) {
  511. ac->avail = 0;
  512. ac->limit = limit;
  513. ac->batchcount = batch;
  514. ac->touched = 0;
  515. }
  516. }
  517. static struct array_cache *alloc_arraycache(int node, int entries,
  518. int batchcount, gfp_t gfp)
  519. {
  520. size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
  521. struct array_cache *ac = NULL;
  522. ac = kmalloc_node(memsize, gfp, node);
  523. init_arraycache(ac, entries, batchcount);
  524. return ac;
  525. }
  526. static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
  527. struct page *page, void *objp)
  528. {
  529. struct kmem_cache_node *n;
  530. int page_node;
  531. LIST_HEAD(list);
  532. page_node = page_to_nid(page);
  533. n = get_node(cachep, page_node);
  534. spin_lock(&n->list_lock);
  535. free_block(cachep, &objp, 1, page_node, &list);
  536. spin_unlock(&n->list_lock);
  537. slabs_destroy(cachep, &list);
  538. }
  539. /*
  540. * Transfer objects in one arraycache to another.
  541. * Locking must be handled by the caller.
  542. *
  543. * Return the number of entries transferred.
  544. */
  545. static int transfer_objects(struct array_cache *to,
  546. struct array_cache *from, unsigned int max)
  547. {
  548. /* Figure out how many entries to transfer */
  549. int nr = min3(from->avail, max, to->limit - to->avail);
  550. if (!nr)
  551. return 0;
  552. memcpy(to->entry + to->avail, from->entry + from->avail -nr,
  553. sizeof(void *) *nr);
  554. from->avail -= nr;
  555. to->avail += nr;
  556. return nr;
  557. }
  558. #ifndef CONFIG_NUMA
  559. #define drain_alien_cache(cachep, alien) do { } while (0)
  560. #define reap_alien(cachep, n) do { } while (0)
  561. static inline struct alien_cache **alloc_alien_cache(int node,
  562. int limit, gfp_t gfp)
  563. {
  564. return NULL;
  565. }
  566. static inline void free_alien_cache(struct alien_cache **ac_ptr)
  567. {
  568. }
  569. static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
  570. {
  571. return 0;
  572. }
  573. static inline void *alternate_node_alloc(struct kmem_cache *cachep,
  574. gfp_t flags)
  575. {
  576. return NULL;
  577. }
  578. static inline void *____cache_alloc_node(struct kmem_cache *cachep,
  579. gfp_t flags, int nodeid)
  580. {
  581. return NULL;
  582. }
  583. static inline gfp_t gfp_exact_node(gfp_t flags)
  584. {
  585. return flags & ~__GFP_NOFAIL;
  586. }
  587. #else /* CONFIG_NUMA */
  588. static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
  589. static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
  590. static struct alien_cache *__alloc_alien_cache(int node, int entries,
  591. int batch, gfp_t gfp)
  592. {
  593. size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
  594. struct alien_cache *alc = NULL;
  595. alc = kmalloc_node(memsize, gfp, node);
  596. init_arraycache(&alc->ac, entries, batch);
  597. spin_lock_init(&alc->lock);
  598. return alc;
  599. }
  600. static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
  601. {
  602. struct alien_cache **alc_ptr;
  603. size_t memsize = sizeof(void *) * nr_node_ids;
  604. int i;
  605. if (limit > 1)
  606. limit = 12;
  607. alc_ptr = kzalloc_node(memsize, gfp, node);
  608. if (!alc_ptr)
  609. return NULL;
  610. for_each_node(i) {
  611. if (i == node || !node_online(i))
  612. continue;
  613. alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
  614. if (!alc_ptr[i]) {
  615. for (i--; i >= 0; i--)
  616. kfree(alc_ptr[i]);
  617. kfree(alc_ptr);
  618. return NULL;
  619. }
  620. }
  621. return alc_ptr;
  622. }
  623. static void free_alien_cache(struct alien_cache **alc_ptr)
  624. {
  625. int i;
  626. if (!alc_ptr)
  627. return;
  628. for_each_node(i)
  629. kfree(alc_ptr[i]);
  630. kfree(alc_ptr);
  631. }
  632. static void __drain_alien_cache(struct kmem_cache *cachep,
  633. struct array_cache *ac, int node,
  634. struct list_head *list)
  635. {
  636. struct kmem_cache_node *n = get_node(cachep, node);
  637. if (ac->avail) {
  638. spin_lock(&n->list_lock);
  639. /*
  640. * Stuff objects into the remote nodes shared array first.
  641. * That way we could avoid the overhead of putting the objects
  642. * into the free lists and getting them back later.
  643. */
  644. if (n->shared)
  645. transfer_objects(n->shared, ac, ac->limit);
  646. free_block(cachep, ac->entry, ac->avail, node, list);
  647. ac->avail = 0;
  648. spin_unlock(&n->list_lock);
  649. }
  650. }
  651. /*
  652. * Called from cache_reap() to regularly drain alien caches round robin.
  653. */
  654. static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
  655. {
  656. int node = __this_cpu_read(slab_reap_node);
  657. if (n->alien) {
  658. struct alien_cache *alc = n->alien[node];
  659. struct array_cache *ac;
  660. if (alc) {
  661. ac = &alc->ac;
  662. if (ac->avail && spin_trylock_irq(&alc->lock)) {
  663. LIST_HEAD(list);
  664. __drain_alien_cache(cachep, ac, node, &list);
  665. spin_unlock_irq(&alc->lock);
  666. slabs_destroy(cachep, &list);
  667. }
  668. }
  669. }
  670. }
  671. static void drain_alien_cache(struct kmem_cache *cachep,
  672. struct alien_cache **alien)
  673. {
  674. int i = 0;
  675. struct alien_cache *alc;
  676. struct array_cache *ac;
  677. unsigned long flags;
  678. for_each_online_node(i) {
  679. alc = alien[i];
  680. if (alc) {
  681. LIST_HEAD(list);
  682. ac = &alc->ac;
  683. spin_lock_irqsave(&alc->lock, flags);
  684. __drain_alien_cache(cachep, ac, i, &list);
  685. spin_unlock_irqrestore(&alc->lock, flags);
  686. slabs_destroy(cachep, &list);
  687. }
  688. }
  689. }
  690. static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
  691. int node, int page_node)
  692. {
  693. struct kmem_cache_node *n;
  694. struct alien_cache *alien = NULL;
  695. struct array_cache *ac;
  696. LIST_HEAD(list);
  697. n = get_node(cachep, node);
  698. STATS_INC_NODEFREES(cachep);
  699. if (n->alien && n->alien[page_node]) {
  700. alien = n->alien[page_node];
  701. ac = &alien->ac;
  702. spin_lock(&alien->lock);
  703. if (unlikely(ac->avail == ac->limit)) {
  704. STATS_INC_ACOVERFLOW(cachep);
  705. __drain_alien_cache(cachep, ac, page_node, &list);
  706. }
  707. ac->entry[ac->avail++] = objp;
  708. spin_unlock(&alien->lock);
  709. slabs_destroy(cachep, &list);
  710. } else {
  711. n = get_node(cachep, page_node);
  712. spin_lock(&n->list_lock);
  713. free_block(cachep, &objp, 1, page_node, &list);
  714. spin_unlock(&n->list_lock);
  715. slabs_destroy(cachep, &list);
  716. }
  717. return 1;
  718. }
  719. static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
  720. {
  721. int page_node = page_to_nid(virt_to_page(objp));
  722. int node = numa_mem_id();
  723. /*
  724. * Make sure we are not freeing a object from another node to the array
  725. * cache on this cpu.
  726. */
  727. if (likely(node == page_node))
  728. return 0;
  729. return __cache_free_alien(cachep, objp, node, page_node);
  730. }
  731. /*
  732. * Construct gfp mask to allocate from a specific node but do not reclaim or
  733. * warn about failures.
  734. */
  735. static inline gfp_t gfp_exact_node(gfp_t flags)
  736. {
  737. return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
  738. }
  739. #endif
  740. static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
  741. {
  742. struct kmem_cache_node *n;
  743. /*
  744. * Set up the kmem_cache_node for cpu before we can
  745. * begin anything. Make sure some other cpu on this
  746. * node has not already allocated this
  747. */
  748. n = get_node(cachep, node);
  749. if (n) {
  750. spin_lock_irq(&n->list_lock);
  751. n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
  752. cachep->num;
  753. spin_unlock_irq(&n->list_lock);
  754. return 0;
  755. }
  756. n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
  757. if (!n)
  758. return -ENOMEM;
  759. kmem_cache_node_init(n);
  760. n->next_reap = jiffies + REAPTIMEOUT_NODE +
  761. ((unsigned long)cachep) % REAPTIMEOUT_NODE;
  762. n->free_limit =
  763. (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
  764. /*
  765. * The kmem_cache_nodes don't come and go as CPUs
  766. * come and go. slab_mutex is sufficient
  767. * protection here.
  768. */
  769. cachep->node[node] = n;
  770. return 0;
  771. }
  772. #if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
  773. /*
  774. * Allocates and initializes node for a node on each slab cache, used for
  775. * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
  776. * will be allocated off-node since memory is not yet online for the new node.
  777. * When hotplugging memory or a cpu, existing node are not replaced if
  778. * already in use.
  779. *
  780. * Must hold slab_mutex.
  781. */
  782. static int init_cache_node_node(int node)
  783. {
  784. int ret;
  785. struct kmem_cache *cachep;
  786. list_for_each_entry(cachep, &slab_caches, list) {
  787. ret = init_cache_node(cachep, node, GFP_KERNEL);
  788. if (ret)
  789. return ret;
  790. }
  791. return 0;
  792. }
  793. #endif
  794. static int setup_kmem_cache_node(struct kmem_cache *cachep,
  795. int node, gfp_t gfp, bool force_change)
  796. {
  797. int ret = -ENOMEM;
  798. struct kmem_cache_node *n;
  799. struct array_cache *old_shared = NULL;
  800. struct array_cache *new_shared = NULL;
  801. struct alien_cache **new_alien = NULL;
  802. LIST_HEAD(list);
  803. if (use_alien_caches) {
  804. new_alien = alloc_alien_cache(node, cachep->limit, gfp);
  805. if (!new_alien)
  806. goto fail;
  807. }
  808. if (cachep->shared) {
  809. new_shared = alloc_arraycache(node,
  810. cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
  811. if (!new_shared)
  812. goto fail;
  813. }
  814. ret = init_cache_node(cachep, node, gfp);
  815. if (ret)
  816. goto fail;
  817. n = get_node(cachep, node);
  818. spin_lock_irq(&n->list_lock);
  819. if (n->shared && force_change) {
  820. free_block(cachep, n->shared->entry,
  821. n->shared->avail, node, &list);
  822. n->shared->avail = 0;
  823. }
  824. if (!n->shared || force_change) {
  825. old_shared = n->shared;
  826. n->shared = new_shared;
  827. new_shared = NULL;
  828. }
  829. if (!n->alien) {
  830. n->alien = new_alien;
  831. new_alien = NULL;
  832. }
  833. spin_unlock_irq(&n->list_lock);
  834. slabs_destroy(cachep, &list);
  835. /*
  836. * To protect lockless access to n->shared during irq disabled context.
  837. * If n->shared isn't NULL in irq disabled context, accessing to it is
  838. * guaranteed to be valid until irq is re-enabled, because it will be
  839. * freed after synchronize_sched().
  840. */
  841. if (old_shared && force_change)
  842. synchronize_sched();
  843. fail:
  844. kfree(old_shared);
  845. kfree(new_shared);
  846. free_alien_cache(new_alien);
  847. return ret;
  848. }
  849. #ifdef CONFIG_SMP
  850. static void cpuup_canceled(long cpu)
  851. {
  852. struct kmem_cache *cachep;
  853. struct kmem_cache_node *n = NULL;
  854. int node = cpu_to_mem(cpu);
  855. const struct cpumask *mask = cpumask_of_node(node);
  856. list_for_each_entry(cachep, &slab_caches, list) {
  857. struct array_cache *nc;
  858. struct array_cache *shared;
  859. struct alien_cache **alien;
  860. LIST_HEAD(list);
  861. n = get_node(cachep, node);
  862. if (!n)
  863. continue;
  864. spin_lock_irq(&n->list_lock);
  865. /* Free limit for this kmem_cache_node */
  866. n->free_limit -= cachep->batchcount;
  867. /* cpu is dead; no one can alloc from it. */
  868. nc = per_cpu_ptr(cachep->cpu_cache, cpu);
  869. if (nc) {
  870. free_block(cachep, nc->entry, nc->avail, node, &list);
  871. nc->avail = 0;
  872. }
  873. if (!cpumask_empty(mask)) {
  874. spin_unlock_irq(&n->list_lock);
  875. goto free_slab;
  876. }
  877. shared = n->shared;
  878. if (shared) {
  879. free_block(cachep, shared->entry,
  880. shared->avail, node, &list);
  881. n->shared = NULL;
  882. }
  883. alien = n->alien;
  884. n->alien = NULL;
  885. spin_unlock_irq(&n->list_lock);
  886. kfree(shared);
  887. if (alien) {
  888. drain_alien_cache(cachep, alien);
  889. free_alien_cache(alien);
  890. }
  891. free_slab:
  892. slabs_destroy(cachep, &list);
  893. }
  894. /*
  895. * In the previous loop, all the objects were freed to
  896. * the respective cache's slabs, now we can go ahead and
  897. * shrink each nodelist to its limit.
  898. */
  899. list_for_each_entry(cachep, &slab_caches, list) {
  900. n = get_node(cachep, node);
  901. if (!n)
  902. continue;
  903. drain_freelist(cachep, n, INT_MAX);
  904. }
  905. }
  906. static int cpuup_prepare(long cpu)
  907. {
  908. struct kmem_cache *cachep;
  909. int node = cpu_to_mem(cpu);
  910. int err;
  911. /*
  912. * We need to do this right in the beginning since
  913. * alloc_arraycache's are going to use this list.
  914. * kmalloc_node allows us to add the slab to the right
  915. * kmem_cache_node and not this cpu's kmem_cache_node
  916. */
  917. err = init_cache_node_node(node);
  918. if (err < 0)
  919. goto bad;
  920. /*
  921. * Now we can go ahead with allocating the shared arrays and
  922. * array caches
  923. */
  924. list_for_each_entry(cachep, &slab_caches, list) {
  925. err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
  926. if (err)
  927. goto bad;
  928. }
  929. return 0;
  930. bad:
  931. cpuup_canceled(cpu);
  932. return -ENOMEM;
  933. }
  934. int slab_prepare_cpu(unsigned int cpu)
  935. {
  936. int err;
  937. mutex_lock(&slab_mutex);
  938. err = cpuup_prepare(cpu);
  939. mutex_unlock(&slab_mutex);
  940. return err;
  941. }
  942. /*
  943. * This is called for a failed online attempt and for a successful
  944. * offline.
  945. *
  946. * Even if all the cpus of a node are down, we don't free the
  947. * kmem_list3 of any cache. This to avoid a race between cpu_down, and
  948. * a kmalloc allocation from another cpu for memory from the node of
  949. * the cpu going down. The list3 structure is usually allocated from
  950. * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
  951. */
  952. int slab_dead_cpu(unsigned int cpu)
  953. {
  954. mutex_lock(&slab_mutex);
  955. cpuup_canceled(cpu);
  956. mutex_unlock(&slab_mutex);
  957. return 0;
  958. }
  959. #endif
  960. static int slab_online_cpu(unsigned int cpu)
  961. {
  962. start_cpu_timer(cpu);
  963. return 0;
  964. }
  965. static int slab_offline_cpu(unsigned int cpu)
  966. {
  967. /*
  968. * Shutdown cache reaper. Note that the slab_mutex is held so
  969. * that if cache_reap() is invoked it cannot do anything
  970. * expensive but will only modify reap_work and reschedule the
  971. * timer.
  972. */
  973. cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
  974. /* Now the cache_reaper is guaranteed to be not running. */
  975. per_cpu(slab_reap_work, cpu).work.func = NULL;
  976. return 0;
  977. }
  978. #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
  979. /*
  980. * Drains freelist for a node on each slab cache, used for memory hot-remove.
  981. * Returns -EBUSY if all objects cannot be drained so that the node is not
  982. * removed.
  983. *
  984. * Must hold slab_mutex.
  985. */
  986. static int __meminit drain_cache_node_node(int node)
  987. {
  988. struct kmem_cache *cachep;
  989. int ret = 0;
  990. list_for_each_entry(cachep, &slab_caches, list) {
  991. struct kmem_cache_node *n;
  992. n = get_node(cachep, node);
  993. if (!n)
  994. continue;
  995. drain_freelist(cachep, n, INT_MAX);
  996. if (!list_empty(&n->slabs_full) ||
  997. !list_empty(&n->slabs_partial)) {
  998. ret = -EBUSY;
  999. break;
  1000. }
  1001. }
  1002. return ret;
  1003. }
  1004. static int __meminit slab_memory_callback(struct notifier_block *self,
  1005. unsigned long action, void *arg)
  1006. {
  1007. struct memory_notify *mnb = arg;
  1008. int ret = 0;
  1009. int nid;
  1010. nid = mnb->status_change_nid;
  1011. if (nid < 0)
  1012. goto out;
  1013. switch (action) {
  1014. case MEM_GOING_ONLINE:
  1015. mutex_lock(&slab_mutex);
  1016. ret = init_cache_node_node(nid);
  1017. mutex_unlock(&slab_mutex);
  1018. break;
  1019. case MEM_GOING_OFFLINE:
  1020. mutex_lock(&slab_mutex);
  1021. ret = drain_cache_node_node(nid);
  1022. mutex_unlock(&slab_mutex);
  1023. break;
  1024. case MEM_ONLINE:
  1025. case MEM_OFFLINE:
  1026. case MEM_CANCEL_ONLINE:
  1027. case MEM_CANCEL_OFFLINE:
  1028. break;
  1029. }
  1030. out:
  1031. return notifier_from_errno(ret);
  1032. }
  1033. #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
  1034. /*
  1035. * swap the static kmem_cache_node with kmalloced memory
  1036. */
  1037. static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
  1038. int nodeid)
  1039. {
  1040. struct kmem_cache_node *ptr;
  1041. ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
  1042. BUG_ON(!ptr);
  1043. memcpy(ptr, list, sizeof(struct kmem_cache_node));
  1044. /*
  1045. * Do not assume that spinlocks can be initialized via memcpy:
  1046. */
  1047. spin_lock_init(&ptr->list_lock);
  1048. MAKE_ALL_LISTS(cachep, ptr, nodeid);
  1049. cachep->node[nodeid] = ptr;
  1050. }
  1051. /*
  1052. * For setting up all the kmem_cache_node for cache whose buffer_size is same as
  1053. * size of kmem_cache_node.
  1054. */
  1055. static void __init set_up_node(struct kmem_cache *cachep, int index)
  1056. {
  1057. int node;
  1058. for_each_online_node(node) {
  1059. cachep->node[node] = &init_kmem_cache_node[index + node];
  1060. cachep->node[node]->next_reap = jiffies +
  1061. REAPTIMEOUT_NODE +
  1062. ((unsigned long)cachep) % REAPTIMEOUT_NODE;
  1063. }
  1064. }
  1065. /*
  1066. * Initialisation. Called after the page allocator have been initialised and
  1067. * before smp_init().
  1068. */
  1069. void __init kmem_cache_init(void)
  1070. {
  1071. int i;
  1072. BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
  1073. sizeof(struct rcu_head));
  1074. kmem_cache = &kmem_cache_boot;
  1075. if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
  1076. use_alien_caches = 0;
  1077. for (i = 0; i < NUM_INIT_LISTS; i++)
  1078. kmem_cache_node_init(&init_kmem_cache_node[i]);
  1079. /*
  1080. * Fragmentation resistance on low memory - only use bigger
  1081. * page orders on machines with more than 32MB of memory if
  1082. * not overridden on the command line.
  1083. */
  1084. if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
  1085. slab_max_order = SLAB_MAX_ORDER_HI;
  1086. /* Bootstrap is tricky, because several objects are allocated
  1087. * from caches that do not exist yet:
  1088. * 1) initialize the kmem_cache cache: it contains the struct
  1089. * kmem_cache structures of all caches, except kmem_cache itself:
  1090. * kmem_cache is statically allocated.
  1091. * Initially an __init data area is used for the head array and the
  1092. * kmem_cache_node structures, it's replaced with a kmalloc allocated
  1093. * array at the end of the bootstrap.
  1094. * 2) Create the first kmalloc cache.
  1095. * The struct kmem_cache for the new cache is allocated normally.
  1096. * An __init data area is used for the head array.
  1097. * 3) Create the remaining kmalloc caches, with minimally sized
  1098. * head arrays.
  1099. * 4) Replace the __init data head arrays for kmem_cache and the first
  1100. * kmalloc cache with kmalloc allocated arrays.
  1101. * 5) Replace the __init data for kmem_cache_node for kmem_cache and
  1102. * the other cache's with kmalloc allocated memory.
  1103. * 6) Resize the head arrays of the kmalloc caches to their final sizes.
  1104. */
  1105. /* 1) create the kmem_cache */
  1106. /*
  1107. * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
  1108. */
  1109. create_boot_cache(kmem_cache, "kmem_cache",
  1110. offsetof(struct kmem_cache, node) +
  1111. nr_node_ids * sizeof(struct kmem_cache_node *),
  1112. SLAB_HWCACHE_ALIGN);
  1113. list_add(&kmem_cache->list, &slab_caches);
  1114. slab_state = PARTIAL;
  1115. /*
  1116. * Initialize the caches that provide memory for the kmem_cache_node
  1117. * structures first. Without this, further allocations will bug.
  1118. */
  1119. kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
  1120. kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
  1121. slab_state = PARTIAL_NODE;
  1122. setup_kmalloc_cache_index_table();
  1123. slab_early_init = 0;
  1124. /* 5) Replace the bootstrap kmem_cache_node */
  1125. {
  1126. int nid;
  1127. for_each_online_node(nid) {
  1128. init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
  1129. init_list(kmalloc_caches[INDEX_NODE],
  1130. &init_kmem_cache_node[SIZE_NODE + nid], nid);
  1131. }
  1132. }
  1133. create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
  1134. }
  1135. void __init kmem_cache_init_late(void)
  1136. {
  1137. struct kmem_cache *cachep;
  1138. slab_state = UP;
  1139. /* 6) resize the head arrays to their final sizes */
  1140. mutex_lock(&slab_mutex);
  1141. list_for_each_entry(cachep, &slab_caches, list)
  1142. if (enable_cpucache(cachep, GFP_NOWAIT))
  1143. BUG();
  1144. mutex_unlock(&slab_mutex);
  1145. /* Done! */
  1146. slab_state = FULL;
  1147. #ifdef CONFIG_NUMA
  1148. /*
  1149. * Register a memory hotplug callback that initializes and frees
  1150. * node.
  1151. */
  1152. hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
  1153. #endif
  1154. /*
  1155. * The reap timers are started later, with a module init call: That part
  1156. * of the kernel is not yet operational.
  1157. */
  1158. }
  1159. static int __init cpucache_init(void)
  1160. {
  1161. int ret;
  1162. /*
  1163. * Register the timers that return unneeded pages to the page allocator
  1164. */
  1165. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
  1166. slab_online_cpu, slab_offline_cpu);
  1167. WARN_ON(ret < 0);
  1168. /* Done! */
  1169. slab_state = FULL;
  1170. return 0;
  1171. }
  1172. __initcall(cpucache_init);
  1173. static noinline void
  1174. slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
  1175. {
  1176. #if DEBUG
  1177. struct kmem_cache_node *n;
  1178. unsigned long flags;
  1179. int node;
  1180. static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
  1181. DEFAULT_RATELIMIT_BURST);
  1182. if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
  1183. return;
  1184. pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
  1185. nodeid, gfpflags, &gfpflags);
  1186. pr_warn(" cache: %s, object size: %d, order: %d\n",
  1187. cachep->name, cachep->size, cachep->gfporder);
  1188. for_each_kmem_cache_node(cachep, node, n) {
  1189. unsigned long total_slabs, free_slabs, free_objs;
  1190. spin_lock_irqsave(&n->list_lock, flags);
  1191. total_slabs = n->total_slabs;
  1192. free_slabs = n->free_slabs;
  1193. free_objs = n->free_objects;
  1194. spin_unlock_irqrestore(&n->list_lock, flags);
  1195. pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
  1196. node, total_slabs - free_slabs, total_slabs,
  1197. (total_slabs * cachep->num) - free_objs,
  1198. total_slabs * cachep->num);
  1199. }
  1200. #endif
  1201. }
  1202. /*
  1203. * Interface to system's page allocator. No need to hold the
  1204. * kmem_cache_node ->list_lock.
  1205. *
  1206. * If we requested dmaable memory, we will get it. Even if we
  1207. * did not request dmaable memory, we might get it, but that
  1208. * would be relatively rare and ignorable.
  1209. */
  1210. static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
  1211. int nodeid)
  1212. {
  1213. struct page *page;
  1214. int nr_pages;
  1215. flags |= cachep->allocflags;
  1216. if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
  1217. flags |= __GFP_RECLAIMABLE;
  1218. page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
  1219. if (!page) {
  1220. slab_out_of_memory(cachep, flags, nodeid);
  1221. return NULL;
  1222. }
  1223. if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
  1224. __free_pages(page, cachep->gfporder);
  1225. return NULL;
  1226. }
  1227. nr_pages = (1 << cachep->gfporder);
  1228. if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
  1229. add_zone_page_state(page_zone(page),
  1230. NR_SLAB_RECLAIMABLE, nr_pages);
  1231. else
  1232. add_zone_page_state(page_zone(page),
  1233. NR_SLAB_UNRECLAIMABLE, nr_pages);
  1234. __SetPageSlab(page);
  1235. /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
  1236. if (sk_memalloc_socks() && page_is_pfmemalloc(page))
  1237. SetPageSlabPfmemalloc(page);
  1238. if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
  1239. kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
  1240. if (cachep->ctor)
  1241. kmemcheck_mark_uninitialized_pages(page, nr_pages);
  1242. else
  1243. kmemcheck_mark_unallocated_pages(page, nr_pages);
  1244. }
  1245. return page;
  1246. }
  1247. /*
  1248. * Interface to system's page release.
  1249. */
  1250. static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
  1251. {
  1252. int order = cachep->gfporder;
  1253. unsigned long nr_freed = (1 << order);
  1254. kmemcheck_free_shadow(page, order);
  1255. if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
  1256. sub_zone_page_state(page_zone(page),
  1257. NR_SLAB_RECLAIMABLE, nr_freed);
  1258. else
  1259. sub_zone_page_state(page_zone(page),
  1260. NR_SLAB_UNRECLAIMABLE, nr_freed);
  1261. BUG_ON(!PageSlab(page));
  1262. __ClearPageSlabPfmemalloc(page);
  1263. __ClearPageSlab(page);
  1264. page_mapcount_reset(page);
  1265. page->mapping = NULL;
  1266. if (current->reclaim_state)
  1267. current->reclaim_state->reclaimed_slab += nr_freed;
  1268. memcg_uncharge_slab(page, order, cachep);
  1269. __free_pages(page, order);
  1270. }
  1271. static void kmem_rcu_free(struct rcu_head *head)
  1272. {
  1273. struct kmem_cache *cachep;
  1274. struct page *page;
  1275. page = container_of(head, struct page, rcu_head);
  1276. cachep = page->slab_cache;
  1277. kmem_freepages(cachep, page);
  1278. }
  1279. #if DEBUG
  1280. static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
  1281. {
  1282. if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
  1283. (cachep->size % PAGE_SIZE) == 0)
  1284. return true;
  1285. return false;
  1286. }
  1287. #ifdef CONFIG_DEBUG_PAGEALLOC
  1288. static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
  1289. unsigned long caller)
  1290. {
  1291. int size = cachep->object_size;
  1292. addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
  1293. if (size < 5 * sizeof(unsigned long))
  1294. return;
  1295. *addr++ = 0x12345678;
  1296. *addr++ = caller;
  1297. *addr++ = smp_processor_id();
  1298. size -= 3 * sizeof(unsigned long);
  1299. {
  1300. unsigned long *sptr = &caller;
  1301. unsigned long svalue;
  1302. while (!kstack_end(sptr)) {
  1303. svalue = *sptr++;
  1304. if (kernel_text_address(svalue)) {
  1305. *addr++ = svalue;
  1306. size -= sizeof(unsigned long);
  1307. if (size <= sizeof(unsigned long))
  1308. break;
  1309. }
  1310. }
  1311. }
  1312. *addr++ = 0x87654321;
  1313. }
  1314. static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
  1315. int map, unsigned long caller)
  1316. {
  1317. if (!is_debug_pagealloc_cache(cachep))
  1318. return;
  1319. if (caller)
  1320. store_stackinfo(cachep, objp, caller);
  1321. kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
  1322. }
  1323. #else
  1324. static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
  1325. int map, unsigned long caller) {}
  1326. #endif
  1327. static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
  1328. {
  1329. int size = cachep->object_size;
  1330. addr = &((char *)addr)[obj_offset(cachep)];
  1331. memset(addr, val, size);
  1332. *(unsigned char *)(addr + size - 1) = POISON_END;
  1333. }
  1334. static void dump_line(char *data, int offset, int limit)
  1335. {
  1336. int i;
  1337. unsigned char error = 0;
  1338. int bad_count = 0;
  1339. pr_err("%03x: ", offset);
  1340. for (i = 0; i < limit; i++) {
  1341. if (data[offset + i] != POISON_FREE) {
  1342. error = data[offset + i];
  1343. bad_count++;
  1344. }
  1345. }
  1346. print_hex_dump(KERN_CONT, "", 0, 16, 1,
  1347. &data[offset], limit, 1);
  1348. if (bad_count == 1) {
  1349. error ^= POISON_FREE;
  1350. if (!(error & (error - 1))) {
  1351. pr_err("Single bit error detected. Probably bad RAM.\n");
  1352. #ifdef CONFIG_X86
  1353. pr_err("Run memtest86+ or a similar memory test tool.\n");
  1354. #else
  1355. pr_err("Run a memory test tool.\n");
  1356. #endif
  1357. }
  1358. }
  1359. }
  1360. #endif
  1361. #if DEBUG
  1362. static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
  1363. {
  1364. int i, size;
  1365. char *realobj;
  1366. if (cachep->flags & SLAB_RED_ZONE) {
  1367. pr_err("Redzone: 0x%llx/0x%llx\n",
  1368. *dbg_redzone1(cachep, objp),
  1369. *dbg_redzone2(cachep, objp));
  1370. }
  1371. if (cachep->flags & SLAB_STORE_USER) {
  1372. pr_err("Last user: [<%p>](%pSR)\n",
  1373. *dbg_userword(cachep, objp),
  1374. *dbg_userword(cachep, objp));
  1375. }
  1376. realobj = (char *)objp + obj_offset(cachep);
  1377. size = cachep->object_size;
  1378. for (i = 0; i < size && lines; i += 16, lines--) {
  1379. int limit;
  1380. limit = 16;
  1381. if (i + limit > size)
  1382. limit = size - i;
  1383. dump_line(realobj, i, limit);
  1384. }
  1385. }
  1386. static void check_poison_obj(struct kmem_cache *cachep, void *objp)
  1387. {
  1388. char *realobj;
  1389. int size, i;
  1390. int lines = 0;
  1391. if (is_debug_pagealloc_cache(cachep))
  1392. return;
  1393. realobj = (char *)objp + obj_offset(cachep);
  1394. size = cachep->object_size;
  1395. for (i = 0; i < size; i++) {
  1396. char exp = POISON_FREE;
  1397. if (i == size - 1)
  1398. exp = POISON_END;
  1399. if (realobj[i] != exp) {
  1400. int limit;
  1401. /* Mismatch ! */
  1402. /* Print header */
  1403. if (lines == 0) {
  1404. pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
  1405. print_tainted(), cachep->name,
  1406. realobj, size);
  1407. print_objinfo(cachep, objp, 0);
  1408. }
  1409. /* Hexdump the affected line */
  1410. i = (i / 16) * 16;
  1411. limit = 16;
  1412. if (i + limit > size)
  1413. limit = size - i;
  1414. dump_line(realobj, i, limit);
  1415. i += 16;
  1416. lines++;
  1417. /* Limit to 5 lines */
  1418. if (lines > 5)
  1419. break;
  1420. }
  1421. }
  1422. if (lines != 0) {
  1423. /* Print some data about the neighboring objects, if they
  1424. * exist:
  1425. */
  1426. struct page *page = virt_to_head_page(objp);
  1427. unsigned int objnr;
  1428. objnr = obj_to_index(cachep, page, objp);
  1429. if (objnr) {
  1430. objp = index_to_obj(cachep, page, objnr - 1);
  1431. realobj = (char *)objp + obj_offset(cachep);
  1432. pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
  1433. print_objinfo(cachep, objp, 2);
  1434. }
  1435. if (objnr + 1 < cachep->num) {
  1436. objp = index_to_obj(cachep, page, objnr + 1);
  1437. realobj = (char *)objp + obj_offset(cachep);
  1438. pr_err("Next obj: start=%p, len=%d\n", realobj, size);
  1439. print_objinfo(cachep, objp, 2);
  1440. }
  1441. }
  1442. }
  1443. #endif
  1444. #if DEBUG
  1445. static void slab_destroy_debugcheck(struct kmem_cache *cachep,
  1446. struct page *page)
  1447. {
  1448. int i;
  1449. if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
  1450. poison_obj(cachep, page->freelist - obj_offset(cachep),
  1451. POISON_FREE);
  1452. }
  1453. for (i = 0; i < cachep->num; i++) {
  1454. void *objp = index_to_obj(cachep, page, i);
  1455. if (cachep->flags & SLAB_POISON) {
  1456. check_poison_obj(cachep, objp);
  1457. slab_kernel_map(cachep, objp, 1, 0);
  1458. }
  1459. if (cachep->flags & SLAB_RED_ZONE) {
  1460. if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
  1461. slab_error(cachep, "start of a freed object was overwritten");
  1462. if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
  1463. slab_error(cachep, "end of a freed object was overwritten");
  1464. }
  1465. }
  1466. }
  1467. #else
  1468. static void slab_destroy_debugcheck(struct kmem_cache *cachep,
  1469. struct page *page)
  1470. {
  1471. }
  1472. #endif
  1473. /**
  1474. * slab_destroy - destroy and release all objects in a slab
  1475. * @cachep: cache pointer being destroyed
  1476. * @page: page pointer being destroyed
  1477. *
  1478. * Destroy all the objs in a slab page, and release the mem back to the system.
  1479. * Before calling the slab page must have been unlinked from the cache. The
  1480. * kmem_cache_node ->list_lock is not held/needed.
  1481. */
  1482. static void slab_destroy(struct kmem_cache *cachep, struct page *page)
  1483. {
  1484. void *freelist;
  1485. freelist = page->freelist;
  1486. slab_destroy_debugcheck(cachep, page);
  1487. if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
  1488. call_rcu(&page->rcu_head, kmem_rcu_free);
  1489. else
  1490. kmem_freepages(cachep, page);
  1491. /*
  1492. * From now on, we don't use freelist
  1493. * although actual page can be freed in rcu context
  1494. */
  1495. if (OFF_SLAB(cachep))
  1496. kmem_cache_free(cachep->freelist_cache, freelist);
  1497. }
  1498. static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
  1499. {
  1500. struct page *page, *n;
  1501. list_for_each_entry_safe(page, n, list, lru) {
  1502. list_del(&page->lru);
  1503. slab_destroy(cachep, page);
  1504. }
  1505. }
  1506. /**
  1507. * calculate_slab_order - calculate size (page order) of slabs
  1508. * @cachep: pointer to the cache that is being created
  1509. * @size: size of objects to be created in this cache.
  1510. * @flags: slab allocation flags
  1511. *
  1512. * Also calculates the number of objects per slab.
  1513. *
  1514. * This could be made much more intelligent. For now, try to avoid using
  1515. * high order pages for slabs. When the gfp() functions are more friendly
  1516. * towards high-order requests, this should be changed.
  1517. */
  1518. static size_t calculate_slab_order(struct kmem_cache *cachep,
  1519. size_t size, unsigned long flags)
  1520. {
  1521. size_t left_over = 0;
  1522. int gfporder;
  1523. for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
  1524. unsigned int num;
  1525. size_t remainder;
  1526. num = cache_estimate(gfporder, size, flags, &remainder);
  1527. if (!num)
  1528. continue;
  1529. /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
  1530. if (num > SLAB_OBJ_MAX_NUM)
  1531. break;
  1532. if (flags & CFLGS_OFF_SLAB) {
  1533. struct kmem_cache *freelist_cache;
  1534. size_t freelist_size;
  1535. freelist_size = num * sizeof(freelist_idx_t);
  1536. freelist_cache = kmalloc_slab(freelist_size, 0u);
  1537. if (!freelist_cache)
  1538. continue;
  1539. /*
  1540. * Needed to avoid possible looping condition
  1541. * in cache_grow_begin()
  1542. */
  1543. if (OFF_SLAB(freelist_cache))
  1544. continue;
  1545. /* check if off slab has enough benefit */
  1546. if (freelist_cache->size > cachep->size / 2)
  1547. continue;
  1548. }
  1549. /* Found something acceptable - save it away */
  1550. cachep->num = num;
  1551. cachep->gfporder = gfporder;
  1552. left_over = remainder;
  1553. /*
  1554. * A VFS-reclaimable slab tends to have most allocations
  1555. * as GFP_NOFS and we really don't want to have to be allocating
  1556. * higher-order pages when we are unable to shrink dcache.
  1557. */
  1558. if (flags & SLAB_RECLAIM_ACCOUNT)
  1559. break;
  1560. /*
  1561. * Large number of objects is good, but very large slabs are
  1562. * currently bad for the gfp()s.
  1563. */
  1564. if (gfporder >= slab_max_order)
  1565. break;
  1566. /*
  1567. * Acceptable internal fragmentation?
  1568. */
  1569. if (left_over * 8 <= (PAGE_SIZE << gfporder))
  1570. break;
  1571. }
  1572. return left_over;
  1573. }
  1574. static struct array_cache __percpu *alloc_kmem_cache_cpus(
  1575. struct kmem_cache *cachep, int entries, int batchcount)
  1576. {
  1577. int cpu;
  1578. size_t size;
  1579. struct array_cache __percpu *cpu_cache;
  1580. size = sizeof(void *) * entries + sizeof(struct array_cache);
  1581. cpu_cache = __alloc_percpu(size, sizeof(void *));
  1582. if (!cpu_cache)
  1583. return NULL;
  1584. for_each_possible_cpu(cpu) {
  1585. init_arraycache(per_cpu_ptr(cpu_cache, cpu),
  1586. entries, batchcount);
  1587. }
  1588. return cpu_cache;
  1589. }
  1590. static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
  1591. {
  1592. if (slab_state >= FULL)
  1593. return enable_cpucache(cachep, gfp);
  1594. cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
  1595. if (!cachep->cpu_cache)
  1596. return 1;
  1597. if (slab_state == DOWN) {
  1598. /* Creation of first cache (kmem_cache). */
  1599. set_up_node(kmem_cache, CACHE_CACHE);
  1600. } else if (slab_state == PARTIAL) {
  1601. /* For kmem_cache_node */
  1602. set_up_node(cachep, SIZE_NODE);
  1603. } else {
  1604. int node;
  1605. for_each_online_node(node) {
  1606. cachep->node[node] = kmalloc_node(
  1607. sizeof(struct kmem_cache_node), gfp, node);
  1608. BUG_ON(!cachep->node[node]);
  1609. kmem_cache_node_init(cachep->node[node]);
  1610. }
  1611. }
  1612. cachep->node[numa_mem_id()]->next_reap =
  1613. jiffies + REAPTIMEOUT_NODE +
  1614. ((unsigned long)cachep) % REAPTIMEOUT_NODE;
  1615. cpu_cache_get(cachep)->avail = 0;
  1616. cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
  1617. cpu_cache_get(cachep)->batchcount = 1;
  1618. cpu_cache_get(cachep)->touched = 0;
  1619. cachep->batchcount = 1;
  1620. cachep->limit = BOOT_CPUCACHE_ENTRIES;
  1621. return 0;
  1622. }
  1623. unsigned long kmem_cache_flags(unsigned long object_size,
  1624. unsigned long flags, const char *name,
  1625. void (*ctor)(void *))
  1626. {
  1627. return flags;
  1628. }
  1629. struct kmem_cache *
  1630. __kmem_cache_alias(const char *name, size_t size, size_t align,
  1631. unsigned long flags, void (*ctor)(void *))
  1632. {
  1633. struct kmem_cache *cachep;
  1634. cachep = find_mergeable(size, align, flags, name, ctor);
  1635. if (cachep) {
  1636. cachep->refcount++;
  1637. /*
  1638. * Adjust the object sizes so that we clear
  1639. * the complete object on kzalloc.
  1640. */
  1641. cachep->object_size = max_t(int, cachep->object_size, size);
  1642. }
  1643. return cachep;
  1644. }
  1645. static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
  1646. size_t size, unsigned long flags)
  1647. {
  1648. size_t left;
  1649. cachep->num = 0;
  1650. if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
  1651. return false;
  1652. left = calculate_slab_order(cachep, size,
  1653. flags | CFLGS_OBJFREELIST_SLAB);
  1654. if (!cachep->num)
  1655. return false;
  1656. if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
  1657. return false;
  1658. cachep->colour = left / cachep->colour_off;
  1659. return true;
  1660. }
  1661. static bool set_off_slab_cache(struct kmem_cache *cachep,
  1662. size_t size, unsigned long flags)
  1663. {
  1664. size_t left;
  1665. cachep->num = 0;
  1666. /*
  1667. * Always use on-slab management when SLAB_NOLEAKTRACE
  1668. * to avoid recursive calls into kmemleak.
  1669. */
  1670. if (flags & SLAB_NOLEAKTRACE)
  1671. return false;
  1672. /*
  1673. * Size is large, assume best to place the slab management obj
  1674. * off-slab (should allow better packing of objs).
  1675. */
  1676. left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
  1677. if (!cachep->num)
  1678. return false;
  1679. /*
  1680. * If the slab has been placed off-slab, and we have enough space then
  1681. * move it on-slab. This is at the expense of any extra colouring.
  1682. */
  1683. if (left >= cachep->num * sizeof(freelist_idx_t))
  1684. return false;
  1685. cachep->colour = left / cachep->colour_off;
  1686. return true;
  1687. }
  1688. static bool set_on_slab_cache(struct kmem_cache *cachep,
  1689. size_t size, unsigned long flags)
  1690. {
  1691. size_t left;
  1692. cachep->num = 0;
  1693. left = calculate_slab_order(cachep, size, flags);
  1694. if (!cachep->num)
  1695. return false;
  1696. cachep->colour = left / cachep->colour_off;
  1697. return true;
  1698. }
  1699. /**
  1700. * __kmem_cache_create - Create a cache.
  1701. * @cachep: cache management descriptor
  1702. * @flags: SLAB flags
  1703. *
  1704. * Returns a ptr to the cache on success, NULL on failure.
  1705. * Cannot be called within a int, but can be interrupted.
  1706. * The @ctor is run when new pages are allocated by the cache.
  1707. *
  1708. * The flags are
  1709. *
  1710. * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
  1711. * to catch references to uninitialised memory.
  1712. *
  1713. * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
  1714. * for buffer overruns.
  1715. *
  1716. * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
  1717. * cacheline. This can be beneficial if you're counting cycles as closely
  1718. * as davem.
  1719. */
  1720. int
  1721. __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
  1722. {
  1723. size_t ralign = BYTES_PER_WORD;
  1724. gfp_t gfp;
  1725. int err;
  1726. size_t size = cachep->size;
  1727. #if DEBUG
  1728. #if FORCED_DEBUG
  1729. /*
  1730. * Enable redzoning and last user accounting, except for caches with
  1731. * large objects, if the increased size would increase the object size
  1732. * above the next power of two: caches with object sizes just above a
  1733. * power of two have a significant amount of internal fragmentation.
  1734. */
  1735. if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
  1736. 2 * sizeof(unsigned long long)))
  1737. flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
  1738. if (!(flags & SLAB_DESTROY_BY_RCU))
  1739. flags |= SLAB_POISON;
  1740. #endif
  1741. #endif
  1742. /*
  1743. * Check that size is in terms of words. This is needed to avoid
  1744. * unaligned accesses for some archs when redzoning is used, and makes
  1745. * sure any on-slab bufctl's are also correctly aligned.
  1746. */
  1747. if (size & (BYTES_PER_WORD - 1)) {
  1748. size += (BYTES_PER_WORD - 1);
  1749. size &= ~(BYTES_PER_WORD - 1);
  1750. }
  1751. if (flags & SLAB_RED_ZONE) {
  1752. ralign = REDZONE_ALIGN;
  1753. /* If redzoning, ensure that the second redzone is suitably
  1754. * aligned, by adjusting the object size accordingly. */
  1755. size += REDZONE_ALIGN - 1;
  1756. size &= ~(REDZONE_ALIGN - 1);
  1757. }
  1758. /* 3) caller mandated alignment */
  1759. if (ralign < cachep->align) {
  1760. ralign = cachep->align;
  1761. }
  1762. /* disable debug if necessary */
  1763. if (ralign > __alignof__(unsigned long long))
  1764. flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
  1765. /*
  1766. * 4) Store it.
  1767. */
  1768. cachep->align = ralign;
  1769. cachep->colour_off = cache_line_size();
  1770. /* Offset must be a multiple of the alignment. */
  1771. if (cachep->colour_off < cachep->align)
  1772. cachep->colour_off = cachep->align;
  1773. if (slab_is_available())
  1774. gfp = GFP_KERNEL;
  1775. else
  1776. gfp = GFP_NOWAIT;
  1777. #if DEBUG
  1778. /*
  1779. * Both debugging options require word-alignment which is calculated
  1780. * into align above.
  1781. */
  1782. if (flags & SLAB_RED_ZONE) {
  1783. /* add space for red zone words */
  1784. cachep->obj_offset += sizeof(unsigned long long);
  1785. size += 2 * sizeof(unsigned long long);
  1786. }
  1787. if (flags & SLAB_STORE_USER) {
  1788. /* user store requires one word storage behind the end of
  1789. * the real object. But if the second red zone needs to be
  1790. * aligned to 64 bits, we must allow that much space.
  1791. */
  1792. if (flags & SLAB_RED_ZONE)
  1793. size += REDZONE_ALIGN;
  1794. else
  1795. size += BYTES_PER_WORD;
  1796. }
  1797. #endif
  1798. kasan_cache_create(cachep, &size, &flags);
  1799. size = ALIGN(size, cachep->align);
  1800. /*
  1801. * We should restrict the number of objects in a slab to implement
  1802. * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
  1803. */
  1804. if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
  1805. size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
  1806. #if DEBUG
  1807. /*
  1808. * To activate debug pagealloc, off-slab management is necessary
  1809. * requirement. In early phase of initialization, small sized slab
  1810. * doesn't get initialized so it would not be possible. So, we need
  1811. * to check size >= 256. It guarantees that all necessary small
  1812. * sized slab is initialized in current slab initialization sequence.
  1813. */
  1814. if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
  1815. size >= 256 && cachep->object_size > cache_line_size()) {
  1816. if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
  1817. size_t tmp_size = ALIGN(size, PAGE_SIZE);
  1818. if (set_off_slab_cache(cachep, tmp_size, flags)) {
  1819. flags |= CFLGS_OFF_SLAB;
  1820. cachep->obj_offset += tmp_size - size;
  1821. size = tmp_size;
  1822. goto done;
  1823. }
  1824. }
  1825. }
  1826. #endif
  1827. if (set_objfreelist_slab_cache(cachep, size, flags)) {
  1828. flags |= CFLGS_OBJFREELIST_SLAB;
  1829. goto done;
  1830. }
  1831. if (set_off_slab_cache(cachep, size, flags)) {
  1832. flags |= CFLGS_OFF_SLAB;
  1833. goto done;
  1834. }
  1835. if (set_on_slab_cache(cachep, size, flags))
  1836. goto done;
  1837. return -E2BIG;
  1838. done:
  1839. cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
  1840. cachep->flags = flags;
  1841. cachep->allocflags = __GFP_COMP;
  1842. if (flags & SLAB_CACHE_DMA)
  1843. cachep->allocflags |= GFP_DMA;
  1844. cachep->size = size;
  1845. cachep->reciprocal_buffer_size = reciprocal_value(size);
  1846. #if DEBUG
  1847. /*
  1848. * If we're going to use the generic kernel_map_pages()
  1849. * poisoning, then it's going to smash the contents of
  1850. * the redzone and userword anyhow, so switch them off.
  1851. */
  1852. if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
  1853. (cachep->flags & SLAB_POISON) &&
  1854. is_debug_pagealloc_cache(cachep))
  1855. cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
  1856. #endif
  1857. if (OFF_SLAB(cachep)) {
  1858. cachep->freelist_cache =
  1859. kmalloc_slab(cachep->freelist_size, 0u);
  1860. }
  1861. err = setup_cpu_cache(cachep, gfp);
  1862. if (err) {
  1863. __kmem_cache_release(cachep);
  1864. return err;
  1865. }
  1866. return 0;
  1867. }
  1868. #if DEBUG
  1869. static void check_irq_off(void)
  1870. {
  1871. BUG_ON(!irqs_disabled());
  1872. }
  1873. static void check_irq_on(void)
  1874. {
  1875. BUG_ON(irqs_disabled());
  1876. }
  1877. static void check_mutex_acquired(void)
  1878. {
  1879. BUG_ON(!mutex_is_locked(&slab_mutex));
  1880. }
  1881. static void check_spinlock_acquired(struct kmem_cache *cachep)
  1882. {
  1883. #ifdef CONFIG_SMP
  1884. check_irq_off();
  1885. assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
  1886. #endif
  1887. }
  1888. static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
  1889. {
  1890. #ifdef CONFIG_SMP
  1891. check_irq_off();
  1892. assert_spin_locked(&get_node(cachep, node)->list_lock);
  1893. #endif
  1894. }
  1895. #else
  1896. #define check_irq_off() do { } while(0)
  1897. #define check_irq_on() do { } while(0)
  1898. #define check_mutex_acquired() do { } while(0)
  1899. #define check_spinlock_acquired(x) do { } while(0)
  1900. #define check_spinlock_acquired_node(x, y) do { } while(0)
  1901. #endif
  1902. static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
  1903. int node, bool free_all, struct list_head *list)
  1904. {
  1905. int tofree;
  1906. if (!ac || !ac->avail)
  1907. return;
  1908. tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
  1909. if (tofree > ac->avail)
  1910. tofree = (ac->avail + 1) / 2;
  1911. free_block(cachep, ac->entry, tofree, node, list);
  1912. ac->avail -= tofree;
  1913. memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
  1914. }
  1915. static void do_drain(void *arg)
  1916. {
  1917. struct kmem_cache *cachep = arg;
  1918. struct array_cache *ac;
  1919. int node = numa_mem_id();
  1920. struct kmem_cache_node *n;
  1921. LIST_HEAD(list);
  1922. check_irq_off();
  1923. ac = cpu_cache_get(cachep);
  1924. n = get_node(cachep, node);
  1925. spin_lock(&n->list_lock);
  1926. free_block(cachep, ac->entry, ac->avail, node, &list);
  1927. spin_unlock(&n->list_lock);
  1928. slabs_destroy(cachep, &list);
  1929. ac->avail = 0;
  1930. }
  1931. static void drain_cpu_caches(struct kmem_cache *cachep)
  1932. {
  1933. struct kmem_cache_node *n;
  1934. int node;
  1935. LIST_HEAD(list);
  1936. on_each_cpu(do_drain, cachep, 1);
  1937. check_irq_on();
  1938. for_each_kmem_cache_node(cachep, node, n)
  1939. if (n->alien)
  1940. drain_alien_cache(cachep, n->alien);
  1941. for_each_kmem_cache_node(cachep, node, n) {
  1942. spin_lock_irq(&n->list_lock);
  1943. drain_array_locked(cachep, n->shared, node, true, &list);
  1944. spin_unlock_irq(&n->list_lock);
  1945. slabs_destroy(cachep, &list);
  1946. }
  1947. }
  1948. /*
  1949. * Remove slabs from the list of free slabs.
  1950. * Specify the number of slabs to drain in tofree.
  1951. *
  1952. * Returns the actual number of slabs released.
  1953. */
  1954. static int drain_freelist(struct kmem_cache *cache,
  1955. struct kmem_cache_node *n, int tofree)
  1956. {
  1957. struct list_head *p;
  1958. int nr_freed;
  1959. struct page *page;
  1960. nr_freed = 0;
  1961. while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
  1962. spin_lock_irq(&n->list_lock);
  1963. p = n->slabs_free.prev;
  1964. if (p == &n->slabs_free) {
  1965. spin_unlock_irq(&n->list_lock);
  1966. goto out;
  1967. }
  1968. page = list_entry(p, struct page, lru);
  1969. list_del(&page->lru);
  1970. n->free_slabs--;
  1971. n->total_slabs--;
  1972. /*
  1973. * Safe to drop the lock. The slab is no longer linked
  1974. * to the cache.
  1975. */
  1976. n->free_objects -= cache->num;
  1977. spin_unlock_irq(&n->list_lock);
  1978. slab_destroy(cache, page);
  1979. nr_freed++;
  1980. }
  1981. out:
  1982. return nr_freed;
  1983. }
  1984. int __kmem_cache_shrink(struct kmem_cache *cachep)
  1985. {
  1986. int ret = 0;
  1987. int node;
  1988. struct kmem_cache_node *n;
  1989. drain_cpu_caches(cachep);
  1990. check_irq_on();
  1991. for_each_kmem_cache_node(cachep, node, n) {
  1992. drain_freelist(cachep, n, INT_MAX);
  1993. ret += !list_empty(&n->slabs_full) ||
  1994. !list_empty(&n->slabs_partial);
  1995. }
  1996. return (ret ? 1 : 0);
  1997. }
  1998. int __kmem_cache_shutdown(struct kmem_cache *cachep)
  1999. {
  2000. return __kmem_cache_shrink(cachep);
  2001. }
  2002. void __kmem_cache_release(struct kmem_cache *cachep)
  2003. {
  2004. int i;
  2005. struct kmem_cache_node *n;
  2006. cache_random_seq_destroy(cachep);
  2007. free_percpu(cachep->cpu_cache);
  2008. /* NUMA: free the node structures */
  2009. for_each_kmem_cache_node(cachep, i, n) {
  2010. kfree(n->shared);
  2011. free_alien_cache(n->alien);
  2012. kfree(n);
  2013. cachep->node[i] = NULL;
  2014. }
  2015. }
  2016. /*
  2017. * Get the memory for a slab management obj.
  2018. *
  2019. * For a slab cache when the slab descriptor is off-slab, the
  2020. * slab descriptor can't come from the same cache which is being created,
  2021. * Because if it is the case, that means we defer the creation of
  2022. * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
  2023. * And we eventually call down to __kmem_cache_create(), which
  2024. * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
  2025. * This is a "chicken-and-egg" problem.
  2026. *
  2027. * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
  2028. * which are all initialized during kmem_cache_init().
  2029. */
  2030. static void *alloc_slabmgmt(struct kmem_cache *cachep,
  2031. struct page *page, int colour_off,
  2032. gfp_t local_flags, int nodeid)
  2033. {
  2034. void *freelist;
  2035. void *addr = page_address(page);
  2036. page->s_mem = addr + colour_off;
  2037. page->active = 0;
  2038. if (OBJFREELIST_SLAB(cachep))
  2039. freelist = NULL;
  2040. else if (OFF_SLAB(cachep)) {
  2041. /* Slab management obj is off-slab. */
  2042. freelist = kmem_cache_alloc_node(cachep->freelist_cache,
  2043. local_flags, nodeid);
  2044. if (!freelist)
  2045. return NULL;
  2046. } else {
  2047. /* We will use last bytes at the slab for freelist */
  2048. freelist = addr + (PAGE_SIZE << cachep->gfporder) -
  2049. cachep->freelist_size;
  2050. }
  2051. return freelist;
  2052. }
  2053. static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
  2054. {
  2055. return ((freelist_idx_t *)page->freelist)[idx];
  2056. }
  2057. static inline void set_free_obj(struct page *page,
  2058. unsigned int idx, freelist_idx_t val)
  2059. {
  2060. ((freelist_idx_t *)(page->freelist))[idx] = val;
  2061. }
  2062. static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
  2063. {
  2064. #if DEBUG
  2065. int i;
  2066. for (i = 0; i < cachep->num; i++) {
  2067. void *objp = index_to_obj(cachep, page, i);
  2068. if (cachep->flags & SLAB_STORE_USER)
  2069. *dbg_userword(cachep, objp) = NULL;
  2070. if (cachep->flags & SLAB_RED_ZONE) {
  2071. *dbg_redzone1(cachep, objp) = RED_INACTIVE;
  2072. *dbg_redzone2(cachep, objp) = RED_INACTIVE;
  2073. }
  2074. /*
  2075. * Constructors are not allowed to allocate memory from the same
  2076. * cache which they are a constructor for. Otherwise, deadlock.
  2077. * They must also be threaded.
  2078. */
  2079. if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
  2080. kasan_unpoison_object_data(cachep,
  2081. objp + obj_offset(cachep));
  2082. cachep->ctor(objp + obj_offset(cachep));
  2083. kasan_poison_object_data(
  2084. cachep, objp + obj_offset(cachep));
  2085. }
  2086. if (cachep->flags & SLAB_RED_ZONE) {
  2087. if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
  2088. slab_error(cachep, "constructor overwrote the end of an object");
  2089. if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
  2090. slab_error(cachep, "constructor overwrote the start of an object");
  2091. }
  2092. /* need to poison the objs? */
  2093. if (cachep->flags & SLAB_POISON) {
  2094. poison_obj(cachep, objp, POISON_FREE);
  2095. slab_kernel_map(cachep, objp, 0, 0);
  2096. }
  2097. }
  2098. #endif
  2099. }
  2100. #ifdef CONFIG_SLAB_FREELIST_RANDOM
  2101. /* Hold information during a freelist initialization */
  2102. union freelist_init_state {
  2103. struct {
  2104. unsigned int pos;
  2105. unsigned int *list;
  2106. unsigned int count;
  2107. };
  2108. struct rnd_state rnd_state;
  2109. };
  2110. /*
  2111. * Initialize the state based on the randomization methode available.
  2112. * return true if the pre-computed list is available, false otherwize.
  2113. */
  2114. static bool freelist_state_initialize(union freelist_init_state *state,
  2115. struct kmem_cache *cachep,
  2116. unsigned int count)
  2117. {
  2118. bool ret;
  2119. unsigned int rand;
  2120. /* Use best entropy available to define a random shift */
  2121. rand = get_random_int();
  2122. /* Use a random state if the pre-computed list is not available */
  2123. if (!cachep->random_seq) {
  2124. prandom_seed_state(&state->rnd_state, rand);
  2125. ret = false;
  2126. } else {
  2127. state->list = cachep->random_seq;
  2128. state->count = count;
  2129. state->pos = rand % count;
  2130. ret = true;
  2131. }
  2132. return ret;
  2133. }
  2134. /* Get the next entry on the list and randomize it using a random shift */
  2135. static freelist_idx_t next_random_slot(union freelist_init_state *state)
  2136. {
  2137. if (state->pos >= state->count)
  2138. state->pos = 0;
  2139. return state->list[state->pos++];
  2140. }
  2141. /* Swap two freelist entries */
  2142. static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
  2143. {
  2144. swap(((freelist_idx_t *)page->freelist)[a],
  2145. ((freelist_idx_t *)page->freelist)[b]);
  2146. }
  2147. /*
  2148. * Shuffle the freelist initialization state based on pre-computed lists.
  2149. * return true if the list was successfully shuffled, false otherwise.
  2150. */
  2151. static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
  2152. {
  2153. unsigned int objfreelist = 0, i, rand, count = cachep->num;
  2154. union freelist_init_state state;
  2155. bool precomputed;
  2156. if (count < 2)
  2157. return false;
  2158. precomputed = freelist_state_initialize(&state, cachep, count);
  2159. /* Take a random entry as the objfreelist */
  2160. if (OBJFREELIST_SLAB(cachep)) {
  2161. if (!precomputed)
  2162. objfreelist = count - 1;
  2163. else
  2164. objfreelist = next_random_slot(&state);
  2165. page->freelist = index_to_obj(cachep, page, objfreelist) +
  2166. obj_offset(cachep);
  2167. count--;
  2168. }
  2169. /*
  2170. * On early boot, generate the list dynamically.
  2171. * Later use a pre-computed list for speed.
  2172. */
  2173. if (!precomputed) {
  2174. for (i = 0; i < count; i++)
  2175. set_free_obj(page, i, i);
  2176. /* Fisher-Yates shuffle */
  2177. for (i = count - 1; i > 0; i--) {
  2178. rand = prandom_u32_state(&state.rnd_state);
  2179. rand %= (i + 1);
  2180. swap_free_obj(page, i, rand);
  2181. }
  2182. } else {
  2183. for (i = 0; i < count; i++)
  2184. set_free_obj(page, i, next_random_slot(&state));
  2185. }
  2186. if (OBJFREELIST_SLAB(cachep))
  2187. set_free_obj(page, cachep->num - 1, objfreelist);
  2188. return true;
  2189. }
  2190. #else
  2191. static inline bool shuffle_freelist(struct kmem_cache *cachep,
  2192. struct page *page)
  2193. {
  2194. return false;
  2195. }
  2196. #endif /* CONFIG_SLAB_FREELIST_RANDOM */
  2197. static void cache_init_objs(struct kmem_cache *cachep,
  2198. struct page *page)
  2199. {
  2200. int i;
  2201. void *objp;
  2202. bool shuffled;
  2203. cache_init_objs_debug(cachep, page);
  2204. /* Try to randomize the freelist if enabled */
  2205. shuffled = shuffle_freelist(cachep, page);
  2206. if (!shuffled && OBJFREELIST_SLAB(cachep)) {
  2207. page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
  2208. obj_offset(cachep);
  2209. }
  2210. for (i = 0; i < cachep->num; i++) {
  2211. objp = index_to_obj(cachep, page, i);
  2212. kasan_init_slab_obj(cachep, objp);
  2213. /* constructor could break poison info */
  2214. if (DEBUG == 0 && cachep->ctor) {
  2215. kasan_unpoison_object_data(cachep, objp);
  2216. cachep->ctor(objp);
  2217. kasan_poison_object_data(cachep, objp);
  2218. }
  2219. if (!shuffled)
  2220. set_free_obj(page, i, i);
  2221. }
  2222. }
  2223. static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
  2224. {
  2225. void *objp;
  2226. objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
  2227. page->active++;
  2228. #if DEBUG
  2229. if (cachep->flags & SLAB_STORE_USER)
  2230. set_store_user_dirty(cachep);
  2231. #endif
  2232. return objp;
  2233. }
  2234. static void slab_put_obj(struct kmem_cache *cachep,
  2235. struct page *page, void *objp)
  2236. {
  2237. unsigned int objnr = obj_to_index(cachep, page, objp);
  2238. #if DEBUG
  2239. unsigned int i;
  2240. /* Verify double free bug */
  2241. for (i = page->active; i < cachep->num; i++) {
  2242. if (get_free_obj(page, i) == objnr) {
  2243. pr_err("slab: double free detected in cache '%s', objp %p\n",
  2244. cachep->name, objp);
  2245. BUG();
  2246. }
  2247. }
  2248. #endif
  2249. page->active--;
  2250. if (!page->freelist)
  2251. page->freelist = objp + obj_offset(cachep);
  2252. set_free_obj(page, page->active, objnr);
  2253. }
  2254. /*
  2255. * Map pages beginning at addr to the given cache and slab. This is required
  2256. * for the slab allocator to be able to lookup the cache and slab of a
  2257. * virtual address for kfree, ksize, and slab debugging.
  2258. */
  2259. static void slab_map_pages(struct kmem_cache *cache, struct page *page,
  2260. void *freelist)
  2261. {
  2262. page->slab_cache = cache;
  2263. page->freelist = freelist;
  2264. }
  2265. /*
  2266. * Grow (by 1) the number of slabs within a cache. This is called by
  2267. * kmem_cache_alloc() when there are no active objs left in a cache.
  2268. */
  2269. static struct page *cache_grow_begin(struct kmem_cache *cachep,
  2270. gfp_t flags, int nodeid)
  2271. {
  2272. void *freelist;
  2273. size_t offset;
  2274. gfp_t local_flags;
  2275. int page_node;
  2276. struct kmem_cache_node *n;
  2277. struct page *page;
  2278. /*
  2279. * Be lazy and only check for valid flags here, keeping it out of the
  2280. * critical path in kmem_cache_alloc().
  2281. */
  2282. if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
  2283. gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
  2284. flags &= ~GFP_SLAB_BUG_MASK;
  2285. pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
  2286. invalid_mask, &invalid_mask, flags, &flags);
  2287. dump_stack();
  2288. }
  2289. local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
  2290. check_irq_off();
  2291. if (gfpflags_allow_blocking(local_flags))
  2292. local_irq_enable();
  2293. /*
  2294. * Get mem for the objs. Attempt to allocate a physical page from
  2295. * 'nodeid'.
  2296. */
  2297. page = kmem_getpages(cachep, local_flags, nodeid);
  2298. if (!page)
  2299. goto failed;
  2300. page_node = page_to_nid(page);
  2301. n = get_node(cachep, page_node);
  2302. /* Get colour for the slab, and cal the next value. */
  2303. n->colour_next++;
  2304. if (n->colour_next >= cachep->colour)
  2305. n->colour_next = 0;
  2306. offset = n->colour_next;
  2307. if (offset >= cachep->colour)
  2308. offset = 0;
  2309. offset *= cachep->colour_off;
  2310. /* Get slab management. */
  2311. freelist = alloc_slabmgmt(cachep, page, offset,
  2312. local_flags & ~GFP_CONSTRAINT_MASK, page_node);
  2313. if (OFF_SLAB(cachep) && !freelist)
  2314. goto opps1;
  2315. slab_map_pages(cachep, page, freelist);
  2316. kasan_poison_slab(page);
  2317. cache_init_objs(cachep, page);
  2318. if (gfpflags_allow_blocking(local_flags))
  2319. local_irq_disable();
  2320. return page;
  2321. opps1:
  2322. kmem_freepages(cachep, page);
  2323. failed:
  2324. if (gfpflags_allow_blocking(local_flags))
  2325. local_irq_disable();
  2326. return NULL;
  2327. }
  2328. static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
  2329. {
  2330. struct kmem_cache_node *n;
  2331. void *list = NULL;
  2332. check_irq_off();
  2333. if (!page)
  2334. return;
  2335. INIT_LIST_HEAD(&page->lru);
  2336. n = get_node(cachep, page_to_nid(page));
  2337. spin_lock(&n->list_lock);
  2338. n->total_slabs++;
  2339. if (!page->active) {
  2340. list_add_tail(&page->lru, &(n->slabs_free));
  2341. n->free_slabs++;
  2342. } else
  2343. fixup_slab_list(cachep, n, page, &list);
  2344. STATS_INC_GROWN(cachep);
  2345. n->free_objects += cachep->num - page->active;
  2346. spin_unlock(&n->list_lock);
  2347. fixup_objfreelist_debug(cachep, &list);
  2348. }
  2349. #if DEBUG
  2350. /*
  2351. * Perform extra freeing checks:
  2352. * - detect bad pointers.
  2353. * - POISON/RED_ZONE checking
  2354. */
  2355. static void kfree_debugcheck(const void *objp)
  2356. {
  2357. if (!virt_addr_valid(objp)) {
  2358. pr_err("kfree_debugcheck: out of range ptr %lxh\n",
  2359. (unsigned long)objp);
  2360. BUG();
  2361. }
  2362. }
  2363. static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
  2364. {
  2365. unsigned long long redzone1, redzone2;
  2366. redzone1 = *dbg_redzone1(cache, obj);
  2367. redzone2 = *dbg_redzone2(cache, obj);
  2368. /*
  2369. * Redzone is ok.
  2370. */
  2371. if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
  2372. return;
  2373. if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
  2374. slab_error(cache, "double free detected");
  2375. else
  2376. slab_error(cache, "memory outside object was overwritten");
  2377. pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
  2378. obj, redzone1, redzone2);
  2379. }
  2380. static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
  2381. unsigned long caller)
  2382. {
  2383. unsigned int objnr;
  2384. struct page *page;
  2385. BUG_ON(virt_to_cache(objp) != cachep);
  2386. objp -= obj_offset(cachep);
  2387. kfree_debugcheck(objp);
  2388. page = virt_to_head_page(objp);
  2389. if (cachep->flags & SLAB_RED_ZONE) {
  2390. verify_redzone_free(cachep, objp);
  2391. *dbg_redzone1(cachep, objp) = RED_INACTIVE;
  2392. *dbg_redzone2(cachep, objp) = RED_INACTIVE;
  2393. }
  2394. if (cachep->flags & SLAB_STORE_USER) {
  2395. set_store_user_dirty(cachep);
  2396. *dbg_userword(cachep, objp) = (void *)caller;
  2397. }
  2398. objnr = obj_to_index(cachep, page, objp);
  2399. BUG_ON(objnr >= cachep->num);
  2400. BUG_ON(objp != index_to_obj(cachep, page, objnr));
  2401. if (cachep->flags & SLAB_POISON) {
  2402. poison_obj(cachep, objp, POISON_FREE);
  2403. slab_kernel_map(cachep, objp, 0, caller);
  2404. }
  2405. return objp;
  2406. }
  2407. #else
  2408. #define kfree_debugcheck(x) do { } while(0)
  2409. #define cache_free_debugcheck(x,objp,z) (objp)
  2410. #endif
  2411. static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
  2412. void **list)
  2413. {
  2414. #if DEBUG
  2415. void *next = *list;
  2416. void *objp;
  2417. while (next) {
  2418. objp = next - obj_offset(cachep);
  2419. next = *(void **)next;
  2420. poison_obj(cachep, objp, POISON_FREE);
  2421. }
  2422. #endif
  2423. }
  2424. static inline void fixup_slab_list(struct kmem_cache *cachep,
  2425. struct kmem_cache_node *n, struct page *page,
  2426. void **list)
  2427. {
  2428. /* move slabp to correct slabp list: */
  2429. list_del(&page->lru);
  2430. if (page->active == cachep->num) {
  2431. list_add(&page->lru, &n->slabs_full);
  2432. if (OBJFREELIST_SLAB(cachep)) {
  2433. #if DEBUG
  2434. /* Poisoning will be done without holding the lock */
  2435. if (cachep->flags & SLAB_POISON) {
  2436. void **objp = page->freelist;
  2437. *objp = *list;
  2438. *list = objp;
  2439. }
  2440. #endif
  2441. page->freelist = NULL;
  2442. }
  2443. } else
  2444. list_add(&page->lru, &n->slabs_partial);
  2445. }
  2446. /* Try to find non-pfmemalloc slab if needed */
  2447. static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
  2448. struct page *page, bool pfmemalloc)
  2449. {
  2450. if (!page)
  2451. return NULL;
  2452. if (pfmemalloc)
  2453. return page;
  2454. if (!PageSlabPfmemalloc(page))
  2455. return page;
  2456. /* No need to keep pfmemalloc slab if we have enough free objects */
  2457. if (n->free_objects > n->free_limit) {
  2458. ClearPageSlabPfmemalloc(page);
  2459. return page;
  2460. }
  2461. /* Move pfmemalloc slab to the end of list to speed up next search */
  2462. list_del(&page->lru);
  2463. if (!page->active) {
  2464. list_add_tail(&page->lru, &n->slabs_free);
  2465. n->free_slabs++;
  2466. } else
  2467. list_add_tail(&page->lru, &n->slabs_partial);
  2468. list_for_each_entry(page, &n->slabs_partial, lru) {
  2469. if (!PageSlabPfmemalloc(page))
  2470. return page;
  2471. }
  2472. n->free_touched = 1;
  2473. list_for_each_entry(page, &n->slabs_free, lru) {
  2474. if (!PageSlabPfmemalloc(page)) {
  2475. n->free_slabs--;
  2476. return page;
  2477. }
  2478. }
  2479. return NULL;
  2480. }
  2481. static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
  2482. {
  2483. struct page *page;
  2484. assert_spin_locked(&n->list_lock);
  2485. page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
  2486. if (!page) {
  2487. n->free_touched = 1;
  2488. page = list_first_entry_or_null(&n->slabs_free, struct page,
  2489. lru);
  2490. if (page)
  2491. n->free_slabs--;
  2492. }
  2493. if (sk_memalloc_socks())
  2494. page = get_valid_first_slab(n, page, pfmemalloc);
  2495. return page;
  2496. }
  2497. static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
  2498. struct kmem_cache_node *n, gfp_t flags)
  2499. {
  2500. struct page *page;
  2501. void *obj;
  2502. void *list = NULL;
  2503. if (!gfp_pfmemalloc_allowed(flags))
  2504. return NULL;
  2505. spin_lock(&n->list_lock);
  2506. page = get_first_slab(n, true);
  2507. if (!page) {
  2508. spin_unlock(&n->list_lock);
  2509. return NULL;
  2510. }
  2511. obj = slab_get_obj(cachep, page);
  2512. n->free_objects--;
  2513. fixup_slab_list(cachep, n, page, &list);
  2514. spin_unlock(&n->list_lock);
  2515. fixup_objfreelist_debug(cachep, &list);
  2516. return obj;
  2517. }
  2518. /*
  2519. * Slab list should be fixed up by fixup_slab_list() for existing slab
  2520. * or cache_grow_end() for new slab
  2521. */
  2522. static __always_inline int alloc_block(struct kmem_cache *cachep,
  2523. struct array_cache *ac, struct page *page, int batchcount)
  2524. {
  2525. /*
  2526. * There must be at least one object available for
  2527. * allocation.
  2528. */
  2529. BUG_ON(page->active >= cachep->num);
  2530. while (page->active < cachep->num && batchcount--) {
  2531. STATS_INC_ALLOCED(cachep);
  2532. STATS_INC_ACTIVE(cachep);
  2533. STATS_SET_HIGH(cachep);
  2534. ac->entry[ac->avail++] = slab_get_obj(cachep, page);
  2535. }
  2536. return batchcount;
  2537. }
  2538. static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
  2539. {
  2540. int batchcount;
  2541. struct kmem_cache_node *n;
  2542. struct array_cache *ac, *shared;
  2543. int node;
  2544. void *list = NULL;
  2545. struct page *page;
  2546. check_irq_off();
  2547. node = numa_mem_id();
  2548. ac = cpu_cache_get(cachep);
  2549. batchcount = ac->batchcount;
  2550. if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
  2551. /*
  2552. * If there was little recent activity on this cache, then
  2553. * perform only a partial refill. Otherwise we could generate
  2554. * refill bouncing.
  2555. */
  2556. batchcount = BATCHREFILL_LIMIT;
  2557. }
  2558. n = get_node(cachep, node);
  2559. BUG_ON(ac->avail > 0 || !n);
  2560. shared = READ_ONCE(n->shared);
  2561. if (!n->free_objects && (!shared || !shared->avail))
  2562. goto direct_grow;
  2563. spin_lock(&n->list_lock);
  2564. shared = READ_ONCE(n->shared);
  2565. /* See if we can refill from the shared array */
  2566. if (shared && transfer_objects(ac, shared, batchcount)) {
  2567. shared->touched = 1;
  2568. goto alloc_done;
  2569. }
  2570. while (batchcount > 0) {
  2571. /* Get slab alloc is to come from. */
  2572. page = get_first_slab(n, false);
  2573. if (!page)
  2574. goto must_grow;
  2575. check_spinlock_acquired(cachep);
  2576. batchcount = alloc_block(cachep, ac, page, batchcount);
  2577. fixup_slab_list(cachep, n, page, &list);
  2578. }
  2579. must_grow:
  2580. n->free_objects -= ac->avail;
  2581. alloc_done:
  2582. spin_unlock(&n->list_lock);
  2583. fixup_objfreelist_debug(cachep, &list);
  2584. direct_grow:
  2585. if (unlikely(!ac->avail)) {
  2586. /* Check if we can use obj in pfmemalloc slab */
  2587. if (sk_memalloc_socks()) {
  2588. void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
  2589. if (obj)
  2590. return obj;
  2591. }
  2592. page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
  2593. /*
  2594. * cache_grow_begin() can reenable interrupts,
  2595. * then ac could change.
  2596. */
  2597. ac = cpu_cache_get(cachep);
  2598. if (!ac->avail && page)
  2599. alloc_block(cachep, ac, page, batchcount);
  2600. cache_grow_end(cachep, page);
  2601. if (!ac->avail)
  2602. return NULL;
  2603. }
  2604. ac->touched = 1;
  2605. return ac->entry[--ac->avail];
  2606. }
  2607. static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
  2608. gfp_t flags)
  2609. {
  2610. might_sleep_if(gfpflags_allow_blocking(flags));
  2611. }
  2612. #if DEBUG
  2613. static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
  2614. gfp_t flags, void *objp, unsigned long caller)
  2615. {
  2616. if (!objp)
  2617. return objp;
  2618. if (cachep->flags & SLAB_POISON) {
  2619. check_poison_obj(cachep, objp);
  2620. slab_kernel_map(cachep, objp, 1, 0);
  2621. poison_obj(cachep, objp, POISON_INUSE);
  2622. }
  2623. if (cachep->flags & SLAB_STORE_USER)
  2624. *dbg_userword(cachep, objp) = (void *)caller;
  2625. if (cachep->flags & SLAB_RED_ZONE) {
  2626. if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
  2627. *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
  2628. slab_error(cachep, "double free, or memory outside object was overwritten");
  2629. pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
  2630. objp, *dbg_redzone1(cachep, objp),
  2631. *dbg_redzone2(cachep, objp));
  2632. }
  2633. *dbg_redzone1(cachep, objp) = RED_ACTIVE;
  2634. *dbg_redzone2(cachep, objp) = RED_ACTIVE;
  2635. }
  2636. objp += obj_offset(cachep);
  2637. if (cachep->ctor && cachep->flags & SLAB_POISON)
  2638. cachep->ctor(objp);
  2639. if (ARCH_SLAB_MINALIGN &&
  2640. ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
  2641. pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
  2642. objp, (int)ARCH_SLAB_MINALIGN);
  2643. }
  2644. return objp;
  2645. }
  2646. #else
  2647. #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
  2648. #endif
  2649. static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  2650. {
  2651. void *objp;
  2652. struct array_cache *ac;
  2653. check_irq_off();
  2654. ac = cpu_cache_get(cachep);
  2655. if (likely(ac->avail)) {
  2656. ac->touched = 1;
  2657. objp = ac->entry[--ac->avail];
  2658. STATS_INC_ALLOCHIT(cachep);
  2659. goto out;
  2660. }
  2661. STATS_INC_ALLOCMISS(cachep);
  2662. objp = cache_alloc_refill(cachep, flags);
  2663. /*
  2664. * the 'ac' may be updated by cache_alloc_refill(),
  2665. * and kmemleak_erase() requires its correct value.
  2666. */
  2667. ac = cpu_cache_get(cachep);
  2668. out:
  2669. /*
  2670. * To avoid a false negative, if an object that is in one of the
  2671. * per-CPU caches is leaked, we need to make sure kmemleak doesn't
  2672. * treat the array pointers as a reference to the object.
  2673. */
  2674. if (objp)
  2675. kmemleak_erase(&ac->entry[ac->avail]);
  2676. return objp;
  2677. }
  2678. #ifdef CONFIG_NUMA
  2679. /*
  2680. * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
  2681. *
  2682. * If we are in_interrupt, then process context, including cpusets and
  2683. * mempolicy, may not apply and should not be used for allocation policy.
  2684. */
  2685. static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
  2686. {
  2687. int nid_alloc, nid_here;
  2688. if (in_interrupt() || (flags & __GFP_THISNODE))
  2689. return NULL;
  2690. nid_alloc = nid_here = numa_mem_id();
  2691. if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
  2692. nid_alloc = cpuset_slab_spread_node();
  2693. else if (current->mempolicy)
  2694. nid_alloc = mempolicy_slab_node();
  2695. if (nid_alloc != nid_here)
  2696. return ____cache_alloc_node(cachep, flags, nid_alloc);
  2697. return NULL;
  2698. }
  2699. /*
  2700. * Fallback function if there was no memory available and no objects on a
  2701. * certain node and fall back is permitted. First we scan all the
  2702. * available node for available objects. If that fails then we
  2703. * perform an allocation without specifying a node. This allows the page
  2704. * allocator to do its reclaim / fallback magic. We then insert the
  2705. * slab into the proper nodelist and then allocate from it.
  2706. */
  2707. static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
  2708. {
  2709. struct zonelist *zonelist;
  2710. struct zoneref *z;
  2711. struct zone *zone;
  2712. enum zone_type high_zoneidx = gfp_zone(flags);
  2713. void *obj = NULL;
  2714. struct page *page;
  2715. int nid;
  2716. unsigned int cpuset_mems_cookie;
  2717. if (flags & __GFP_THISNODE)
  2718. return NULL;
  2719. retry_cpuset:
  2720. cpuset_mems_cookie = read_mems_allowed_begin();
  2721. zonelist = node_zonelist(mempolicy_slab_node(), flags);
  2722. retry:
  2723. /*
  2724. * Look through allowed nodes for objects available
  2725. * from existing per node queues.
  2726. */
  2727. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  2728. nid = zone_to_nid(zone);
  2729. if (cpuset_zone_allowed(zone, flags) &&
  2730. get_node(cache, nid) &&
  2731. get_node(cache, nid)->free_objects) {
  2732. obj = ____cache_alloc_node(cache,
  2733. gfp_exact_node(flags), nid);
  2734. if (obj)
  2735. break;
  2736. }
  2737. }
  2738. if (!obj) {
  2739. /*
  2740. * This allocation will be performed within the constraints
  2741. * of the current cpuset / memory policy requirements.
  2742. * We may trigger various forms of reclaim on the allowed
  2743. * set and go into memory reserves if necessary.
  2744. */
  2745. page = cache_grow_begin(cache, flags, numa_mem_id());
  2746. cache_grow_end(cache, page);
  2747. if (page) {
  2748. nid = page_to_nid(page);
  2749. obj = ____cache_alloc_node(cache,
  2750. gfp_exact_node(flags), nid);
  2751. /*
  2752. * Another processor may allocate the objects in
  2753. * the slab since we are not holding any locks.
  2754. */
  2755. if (!obj)
  2756. goto retry;
  2757. }
  2758. }
  2759. if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
  2760. goto retry_cpuset;
  2761. return obj;
  2762. }
  2763. /*
  2764. * A interface to enable slab creation on nodeid
  2765. */
  2766. static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
  2767. int nodeid)
  2768. {
  2769. struct page *page;
  2770. struct kmem_cache_node *n;
  2771. void *obj = NULL;
  2772. void *list = NULL;
  2773. VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
  2774. n = get_node(cachep, nodeid);
  2775. BUG_ON(!n);
  2776. check_irq_off();
  2777. spin_lock(&n->list_lock);
  2778. page = get_first_slab(n, false);
  2779. if (!page)
  2780. goto must_grow;
  2781. check_spinlock_acquired_node(cachep, nodeid);
  2782. STATS_INC_NODEALLOCS(cachep);
  2783. STATS_INC_ACTIVE(cachep);
  2784. STATS_SET_HIGH(cachep);
  2785. BUG_ON(page->active == cachep->num);
  2786. obj = slab_get_obj(cachep, page);
  2787. n->free_objects--;
  2788. fixup_slab_list(cachep, n, page, &list);
  2789. spin_unlock(&n->list_lock);
  2790. fixup_objfreelist_debug(cachep, &list);
  2791. return obj;
  2792. must_grow:
  2793. spin_unlock(&n->list_lock);
  2794. page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
  2795. if (page) {
  2796. /* This slab isn't counted yet so don't update free_objects */
  2797. obj = slab_get_obj(cachep, page);
  2798. }
  2799. cache_grow_end(cachep, page);
  2800. return obj ? obj : fallback_alloc(cachep, flags);
  2801. }
  2802. static __always_inline void *
  2803. slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
  2804. unsigned long caller)
  2805. {
  2806. unsigned long save_flags;
  2807. void *ptr;
  2808. int slab_node = numa_mem_id();
  2809. flags &= gfp_allowed_mask;
  2810. cachep = slab_pre_alloc_hook(cachep, flags);
  2811. if (unlikely(!cachep))
  2812. return NULL;
  2813. cache_alloc_debugcheck_before(cachep, flags);
  2814. local_irq_save(save_flags);
  2815. if (nodeid == NUMA_NO_NODE)
  2816. nodeid = slab_node;
  2817. if (unlikely(!get_node(cachep, nodeid))) {
  2818. /* Node not bootstrapped yet */
  2819. ptr = fallback_alloc(cachep, flags);
  2820. goto out;
  2821. }
  2822. if (nodeid == slab_node) {
  2823. /*
  2824. * Use the locally cached objects if possible.
  2825. * However ____cache_alloc does not allow fallback
  2826. * to other nodes. It may fail while we still have
  2827. * objects on other nodes available.
  2828. */
  2829. ptr = ____cache_alloc(cachep, flags);
  2830. if (ptr)
  2831. goto out;
  2832. }
  2833. /* ___cache_alloc_node can fall back to other nodes */
  2834. ptr = ____cache_alloc_node(cachep, flags, nodeid);
  2835. out:
  2836. local_irq_restore(save_flags);
  2837. ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
  2838. if (unlikely(flags & __GFP_ZERO) && ptr)
  2839. memset(ptr, 0, cachep->object_size);
  2840. slab_post_alloc_hook(cachep, flags, 1, &ptr);
  2841. return ptr;
  2842. }
  2843. static __always_inline void *
  2844. __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
  2845. {
  2846. void *objp;
  2847. if (current->mempolicy || cpuset_do_slab_mem_spread()) {
  2848. objp = alternate_node_alloc(cache, flags);
  2849. if (objp)
  2850. goto out;
  2851. }
  2852. objp = ____cache_alloc(cache, flags);
  2853. /*
  2854. * We may just have run out of memory on the local node.
  2855. * ____cache_alloc_node() knows how to locate memory on other nodes
  2856. */
  2857. if (!objp)
  2858. objp = ____cache_alloc_node(cache, flags, numa_mem_id());
  2859. out:
  2860. return objp;
  2861. }
  2862. #else
  2863. static __always_inline void *
  2864. __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  2865. {
  2866. return ____cache_alloc(cachep, flags);
  2867. }
  2868. #endif /* CONFIG_NUMA */
  2869. static __always_inline void *
  2870. slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
  2871. {
  2872. unsigned long save_flags;
  2873. void *objp;
  2874. flags &= gfp_allowed_mask;
  2875. cachep = slab_pre_alloc_hook(cachep, flags);
  2876. if (unlikely(!cachep))
  2877. return NULL;
  2878. cache_alloc_debugcheck_before(cachep, flags);
  2879. local_irq_save(save_flags);
  2880. objp = __do_cache_alloc(cachep, flags);
  2881. local_irq_restore(save_flags);
  2882. objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
  2883. prefetchw(objp);
  2884. if (unlikely(flags & __GFP_ZERO) && objp)
  2885. memset(objp, 0, cachep->object_size);
  2886. slab_post_alloc_hook(cachep, flags, 1, &objp);
  2887. return objp;
  2888. }
  2889. /*
  2890. * Caller needs to acquire correct kmem_cache_node's list_lock
  2891. * @list: List of detached free slabs should be freed by caller
  2892. */
  2893. static void free_block(struct kmem_cache *cachep, void **objpp,
  2894. int nr_objects, int node, struct list_head *list)
  2895. {
  2896. int i;
  2897. struct kmem_cache_node *n = get_node(cachep, node);
  2898. struct page *page;
  2899. n->free_objects += nr_objects;
  2900. for (i = 0; i < nr_objects; i++) {
  2901. void *objp;
  2902. struct page *page;
  2903. objp = objpp[i];
  2904. page = virt_to_head_page(objp);
  2905. list_del(&page->lru);
  2906. check_spinlock_acquired_node(cachep, node);
  2907. slab_put_obj(cachep, page, objp);
  2908. STATS_DEC_ACTIVE(cachep);
  2909. /* fixup slab chains */
  2910. if (page->active == 0) {
  2911. list_add(&page->lru, &n->slabs_free);
  2912. n->free_slabs++;
  2913. } else {
  2914. /* Unconditionally move a slab to the end of the
  2915. * partial list on free - maximum time for the
  2916. * other objects to be freed, too.
  2917. */
  2918. list_add_tail(&page->lru, &n->slabs_partial);
  2919. }
  2920. }
  2921. while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
  2922. n->free_objects -= cachep->num;
  2923. page = list_last_entry(&n->slabs_free, struct page, lru);
  2924. list_move(&page->lru, list);
  2925. n->free_slabs--;
  2926. n->total_slabs--;
  2927. }
  2928. }
  2929. static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
  2930. {
  2931. int batchcount;
  2932. struct kmem_cache_node *n;
  2933. int node = numa_mem_id();
  2934. LIST_HEAD(list);
  2935. batchcount = ac->batchcount;
  2936. check_irq_off();
  2937. n = get_node(cachep, node);
  2938. spin_lock(&n->list_lock);
  2939. if (n->shared) {
  2940. struct array_cache *shared_array = n->shared;
  2941. int max = shared_array->limit - shared_array->avail;
  2942. if (max) {
  2943. if (batchcount > max)
  2944. batchcount = max;
  2945. memcpy(&(shared_array->entry[shared_array->avail]),
  2946. ac->entry, sizeof(void *) * batchcount);
  2947. shared_array->avail += batchcount;
  2948. goto free_done;
  2949. }
  2950. }
  2951. free_block(cachep, ac->entry, batchcount, node, &list);
  2952. free_done:
  2953. #if STATS
  2954. {
  2955. int i = 0;
  2956. struct page *page;
  2957. list_for_each_entry(page, &n->slabs_free, lru) {
  2958. BUG_ON(page->active);
  2959. i++;
  2960. }
  2961. STATS_SET_FREEABLE(cachep, i);
  2962. }
  2963. #endif
  2964. spin_unlock(&n->list_lock);
  2965. slabs_destroy(cachep, &list);
  2966. ac->avail -= batchcount;
  2967. memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
  2968. }
  2969. /*
  2970. * Release an obj back to its cache. If the obj has a constructed state, it must
  2971. * be in this state _before_ it is released. Called with disabled ints.
  2972. */
  2973. static inline void __cache_free(struct kmem_cache *cachep, void *objp,
  2974. unsigned long caller)
  2975. {
  2976. /* Put the object into the quarantine, don't touch it for now. */
  2977. if (kasan_slab_free(cachep, objp))
  2978. return;
  2979. ___cache_free(cachep, objp, caller);
  2980. }
  2981. void ___cache_free(struct kmem_cache *cachep, void *objp,
  2982. unsigned long caller)
  2983. {
  2984. struct array_cache *ac = cpu_cache_get(cachep);
  2985. check_irq_off();
  2986. kmemleak_free_recursive(objp, cachep->flags);
  2987. objp = cache_free_debugcheck(cachep, objp, caller);
  2988. kmemcheck_slab_free(cachep, objp, cachep->object_size);
  2989. /*
  2990. * Skip calling cache_free_alien() when the platform is not numa.
  2991. * This will avoid cache misses that happen while accessing slabp (which
  2992. * is per page memory reference) to get nodeid. Instead use a global
  2993. * variable to skip the call, which is mostly likely to be present in
  2994. * the cache.
  2995. */
  2996. if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
  2997. return;
  2998. if (ac->avail < ac->limit) {
  2999. STATS_INC_FREEHIT(cachep);
  3000. } else {
  3001. STATS_INC_FREEMISS(cachep);
  3002. cache_flusharray(cachep, ac);
  3003. }
  3004. if (sk_memalloc_socks()) {
  3005. struct page *page = virt_to_head_page(objp);
  3006. if (unlikely(PageSlabPfmemalloc(page))) {
  3007. cache_free_pfmemalloc(cachep, page, objp);
  3008. return;
  3009. }
  3010. }
  3011. ac->entry[ac->avail++] = objp;
  3012. }
  3013. /**
  3014. * kmem_cache_alloc - Allocate an object
  3015. * @cachep: The cache to allocate from.
  3016. * @flags: See kmalloc().
  3017. *
  3018. * Allocate an object from this cache. The flags are only relevant
  3019. * if the cache has no available objects.
  3020. */
  3021. void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  3022. {
  3023. void *ret = slab_alloc(cachep, flags, _RET_IP_);
  3024. kasan_slab_alloc(cachep, ret, flags);
  3025. trace_kmem_cache_alloc(_RET_IP_, ret,
  3026. cachep->object_size, cachep->size, flags);
  3027. return ret;
  3028. }
  3029. EXPORT_SYMBOL(kmem_cache_alloc);
  3030. static __always_inline void
  3031. cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
  3032. size_t size, void **p, unsigned long caller)
  3033. {
  3034. size_t i;
  3035. for (i = 0; i < size; i++)
  3036. p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
  3037. }
  3038. int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  3039. void **p)
  3040. {
  3041. size_t i;
  3042. s = slab_pre_alloc_hook(s, flags);
  3043. if (!s)
  3044. return 0;
  3045. cache_alloc_debugcheck_before(s, flags);
  3046. local_irq_disable();
  3047. for (i = 0; i < size; i++) {
  3048. void *objp = __do_cache_alloc(s, flags);
  3049. if (unlikely(!objp))
  3050. goto error;
  3051. p[i] = objp;
  3052. }
  3053. local_irq_enable();
  3054. cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
  3055. /* Clear memory outside IRQ disabled section */
  3056. if (unlikely(flags & __GFP_ZERO))
  3057. for (i = 0; i < size; i++)
  3058. memset(p[i], 0, s->object_size);
  3059. slab_post_alloc_hook(s, flags, size, p);
  3060. /* FIXME: Trace call missing. Christoph would like a bulk variant */
  3061. return size;
  3062. error:
  3063. local_irq_enable();
  3064. cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
  3065. slab_post_alloc_hook(s, flags, i, p);
  3066. __kmem_cache_free_bulk(s, i, p);
  3067. return 0;
  3068. }
  3069. EXPORT_SYMBOL(kmem_cache_alloc_bulk);
  3070. #ifdef CONFIG_TRACING
  3071. void *
  3072. kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
  3073. {
  3074. void *ret;
  3075. ret = slab_alloc(cachep, flags, _RET_IP_);
  3076. kasan_kmalloc(cachep, ret, size, flags);
  3077. trace_kmalloc(_RET_IP_, ret,
  3078. size, cachep->size, flags);
  3079. return ret;
  3080. }
  3081. EXPORT_SYMBOL(kmem_cache_alloc_trace);
  3082. #endif
  3083. #ifdef CONFIG_NUMA
  3084. /**
  3085. * kmem_cache_alloc_node - Allocate an object on the specified node
  3086. * @cachep: The cache to allocate from.
  3087. * @flags: See kmalloc().
  3088. * @nodeid: node number of the target node.
  3089. *
  3090. * Identical to kmem_cache_alloc but it will allocate memory on the given
  3091. * node, which can improve the performance for cpu bound structures.
  3092. *
  3093. * Fallback to other node is possible if __GFP_THISNODE is not set.
  3094. */
  3095. void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
  3096. {
  3097. void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
  3098. kasan_slab_alloc(cachep, ret, flags);
  3099. trace_kmem_cache_alloc_node(_RET_IP_, ret,
  3100. cachep->object_size, cachep->size,
  3101. flags, nodeid);
  3102. return ret;
  3103. }
  3104. EXPORT_SYMBOL(kmem_cache_alloc_node);
  3105. #ifdef CONFIG_TRACING
  3106. void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
  3107. gfp_t flags,
  3108. int nodeid,
  3109. size_t size)
  3110. {
  3111. void *ret;
  3112. ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
  3113. kasan_kmalloc(cachep, ret, size, flags);
  3114. trace_kmalloc_node(_RET_IP_, ret,
  3115. size, cachep->size,
  3116. flags, nodeid);
  3117. return ret;
  3118. }
  3119. EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
  3120. #endif
  3121. static __always_inline void *
  3122. __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
  3123. {
  3124. struct kmem_cache *cachep;
  3125. void *ret;
  3126. cachep = kmalloc_slab(size, flags);
  3127. if (unlikely(ZERO_OR_NULL_PTR(cachep)))
  3128. return cachep;
  3129. ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
  3130. kasan_kmalloc(cachep, ret, size, flags);
  3131. return ret;
  3132. }
  3133. void *__kmalloc_node(size_t size, gfp_t flags, int node)
  3134. {
  3135. return __do_kmalloc_node(size, flags, node, _RET_IP_);
  3136. }
  3137. EXPORT_SYMBOL(__kmalloc_node);
  3138. void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
  3139. int node, unsigned long caller)
  3140. {
  3141. return __do_kmalloc_node(size, flags, node, caller);
  3142. }
  3143. EXPORT_SYMBOL(__kmalloc_node_track_caller);
  3144. #endif /* CONFIG_NUMA */
  3145. /**
  3146. * __do_kmalloc - allocate memory
  3147. * @size: how many bytes of memory are required.
  3148. * @flags: the type of memory to allocate (see kmalloc).
  3149. * @caller: function caller for debug tracking of the caller
  3150. */
  3151. static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
  3152. unsigned long caller)
  3153. {
  3154. struct kmem_cache *cachep;
  3155. void *ret;
  3156. cachep = kmalloc_slab(size, flags);
  3157. if (unlikely(ZERO_OR_NULL_PTR(cachep)))
  3158. return cachep;
  3159. ret = slab_alloc(cachep, flags, caller);
  3160. kasan_kmalloc(cachep, ret, size, flags);
  3161. trace_kmalloc(caller, ret,
  3162. size, cachep->size, flags);
  3163. return ret;
  3164. }
  3165. void *__kmalloc(size_t size, gfp_t flags)
  3166. {
  3167. return __do_kmalloc(size, flags, _RET_IP_);
  3168. }
  3169. EXPORT_SYMBOL(__kmalloc);
  3170. void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
  3171. {
  3172. return __do_kmalloc(size, flags, caller);
  3173. }
  3174. EXPORT_SYMBOL(__kmalloc_track_caller);
  3175. /**
  3176. * kmem_cache_free - Deallocate an object
  3177. * @cachep: The cache the allocation was from.
  3178. * @objp: The previously allocated object.
  3179. *
  3180. * Free an object which was previously allocated from this
  3181. * cache.
  3182. */
  3183. void kmem_cache_free(struct kmem_cache *cachep, void *objp)
  3184. {
  3185. unsigned long flags;
  3186. cachep = cache_from_obj(cachep, objp);
  3187. if (!cachep)
  3188. return;
  3189. local_irq_save(flags);
  3190. debug_check_no_locks_freed(objp, cachep->object_size);
  3191. if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
  3192. debug_check_no_obj_freed(objp, cachep->object_size);
  3193. __cache_free(cachep, objp, _RET_IP_);
  3194. local_irq_restore(flags);
  3195. trace_kmem_cache_free(_RET_IP_, objp);
  3196. }
  3197. EXPORT_SYMBOL(kmem_cache_free);
  3198. void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
  3199. {
  3200. struct kmem_cache *s;
  3201. size_t i;
  3202. local_irq_disable();
  3203. for (i = 0; i < size; i++) {
  3204. void *objp = p[i];
  3205. if (!orig_s) /* called via kfree_bulk */
  3206. s = virt_to_cache(objp);
  3207. else
  3208. s = cache_from_obj(orig_s, objp);
  3209. debug_check_no_locks_freed(objp, s->object_size);
  3210. if (!(s->flags & SLAB_DEBUG_OBJECTS))
  3211. debug_check_no_obj_freed(objp, s->object_size);
  3212. __cache_free(s, objp, _RET_IP_);
  3213. }
  3214. local_irq_enable();
  3215. /* FIXME: add tracing */
  3216. }
  3217. EXPORT_SYMBOL(kmem_cache_free_bulk);
  3218. /**
  3219. * kfree - free previously allocated memory
  3220. * @objp: pointer returned by kmalloc.
  3221. *
  3222. * If @objp is NULL, no operation is performed.
  3223. *
  3224. * Don't free memory not originally allocated by kmalloc()
  3225. * or you will run into trouble.
  3226. */
  3227. void kfree(const void *objp)
  3228. {
  3229. struct kmem_cache *c;
  3230. unsigned long flags;
  3231. trace_kfree(_RET_IP_, objp);
  3232. if (unlikely(ZERO_OR_NULL_PTR(objp)))
  3233. return;
  3234. local_irq_save(flags);
  3235. kfree_debugcheck(objp);
  3236. c = virt_to_cache(objp);
  3237. debug_check_no_locks_freed(objp, c->object_size);
  3238. debug_check_no_obj_freed(objp, c->object_size);
  3239. __cache_free(c, (void *)objp, _RET_IP_);
  3240. local_irq_restore(flags);
  3241. }
  3242. EXPORT_SYMBOL(kfree);
  3243. /*
  3244. * This initializes kmem_cache_node or resizes various caches for all nodes.
  3245. */
  3246. static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
  3247. {
  3248. int ret;
  3249. int node;
  3250. struct kmem_cache_node *n;
  3251. for_each_online_node(node) {
  3252. ret = setup_kmem_cache_node(cachep, node, gfp, true);
  3253. if (ret)
  3254. goto fail;
  3255. }
  3256. return 0;
  3257. fail:
  3258. if (!cachep->list.next) {
  3259. /* Cache is not active yet. Roll back what we did */
  3260. node--;
  3261. while (node >= 0) {
  3262. n = get_node(cachep, node);
  3263. if (n) {
  3264. kfree(n->shared);
  3265. free_alien_cache(n->alien);
  3266. kfree(n);
  3267. cachep->node[node] = NULL;
  3268. }
  3269. node--;
  3270. }
  3271. }
  3272. return -ENOMEM;
  3273. }
  3274. /* Always called with the slab_mutex held */
  3275. static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
  3276. int batchcount, int shared, gfp_t gfp)
  3277. {
  3278. struct array_cache __percpu *cpu_cache, *prev;
  3279. int cpu;
  3280. cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
  3281. if (!cpu_cache)
  3282. return -ENOMEM;
  3283. prev = cachep->cpu_cache;
  3284. cachep->cpu_cache = cpu_cache;
  3285. kick_all_cpus_sync();
  3286. check_irq_on();
  3287. cachep->batchcount = batchcount;
  3288. cachep->limit = limit;
  3289. cachep->shared = shared;
  3290. if (!prev)
  3291. goto setup_node;
  3292. for_each_online_cpu(cpu) {
  3293. LIST_HEAD(list);
  3294. int node;
  3295. struct kmem_cache_node *n;
  3296. struct array_cache *ac = per_cpu_ptr(prev, cpu);
  3297. node = cpu_to_mem(cpu);
  3298. n = get_node(cachep, node);
  3299. spin_lock_irq(&n->list_lock);
  3300. free_block(cachep, ac->entry, ac->avail, node, &list);
  3301. spin_unlock_irq(&n->list_lock);
  3302. slabs_destroy(cachep, &list);
  3303. }
  3304. free_percpu(prev);
  3305. setup_node:
  3306. return setup_kmem_cache_nodes(cachep, gfp);
  3307. }
  3308. static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
  3309. int batchcount, int shared, gfp_t gfp)
  3310. {
  3311. int ret;
  3312. struct kmem_cache *c;
  3313. ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
  3314. if (slab_state < FULL)
  3315. return ret;
  3316. if ((ret < 0) || !is_root_cache(cachep))
  3317. return ret;
  3318. lockdep_assert_held(&slab_mutex);
  3319. for_each_memcg_cache(c, cachep) {
  3320. /* return value determined by the root cache only */
  3321. __do_tune_cpucache(c, limit, batchcount, shared, gfp);
  3322. }
  3323. return ret;
  3324. }
  3325. /* Called with slab_mutex held always */
  3326. static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
  3327. {
  3328. int err;
  3329. int limit = 0;
  3330. int shared = 0;
  3331. int batchcount = 0;
  3332. err = cache_random_seq_create(cachep, cachep->num, gfp);
  3333. if (err)
  3334. goto end;
  3335. if (!is_root_cache(cachep)) {
  3336. struct kmem_cache *root = memcg_root_cache(cachep);
  3337. limit = root->limit;
  3338. shared = root->shared;
  3339. batchcount = root->batchcount;
  3340. }
  3341. if (limit && shared && batchcount)
  3342. goto skip_setup;
  3343. /*
  3344. * The head array serves three purposes:
  3345. * - create a LIFO ordering, i.e. return objects that are cache-warm
  3346. * - reduce the number of spinlock operations.
  3347. * - reduce the number of linked list operations on the slab and
  3348. * bufctl chains: array operations are cheaper.
  3349. * The numbers are guessed, we should auto-tune as described by
  3350. * Bonwick.
  3351. */
  3352. if (cachep->size > 131072)
  3353. limit = 1;
  3354. else if (cachep->size > PAGE_SIZE)
  3355. limit = 8;
  3356. else if (cachep->size > 1024)
  3357. limit = 24;
  3358. else if (cachep->size > 256)
  3359. limit = 54;
  3360. else
  3361. limit = 120;
  3362. /*
  3363. * CPU bound tasks (e.g. network routing) can exhibit cpu bound
  3364. * allocation behaviour: Most allocs on one cpu, most free operations
  3365. * on another cpu. For these cases, an efficient object passing between
  3366. * cpus is necessary. This is provided by a shared array. The array
  3367. * replaces Bonwick's magazine layer.
  3368. * On uniprocessor, it's functionally equivalent (but less efficient)
  3369. * to a larger limit. Thus disabled by default.
  3370. */
  3371. shared = 0;
  3372. if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
  3373. shared = 8;
  3374. #if DEBUG
  3375. /*
  3376. * With debugging enabled, large batchcount lead to excessively long
  3377. * periods with disabled local interrupts. Limit the batchcount
  3378. */
  3379. if (limit > 32)
  3380. limit = 32;
  3381. #endif
  3382. batchcount = (limit + 1) / 2;
  3383. skip_setup:
  3384. err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
  3385. end:
  3386. if (err)
  3387. pr_err("enable_cpucache failed for %s, error %d\n",
  3388. cachep->name, -err);
  3389. return err;
  3390. }
  3391. /*
  3392. * Drain an array if it contains any elements taking the node lock only if
  3393. * necessary. Note that the node listlock also protects the array_cache
  3394. * if drain_array() is used on the shared array.
  3395. */
  3396. static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
  3397. struct array_cache *ac, int node)
  3398. {
  3399. LIST_HEAD(list);
  3400. /* ac from n->shared can be freed if we don't hold the slab_mutex. */
  3401. check_mutex_acquired();
  3402. if (!ac || !ac->avail)
  3403. return;
  3404. if (ac->touched) {
  3405. ac->touched = 0;
  3406. return;
  3407. }
  3408. spin_lock_irq(&n->list_lock);
  3409. drain_array_locked(cachep, ac, node, false, &list);
  3410. spin_unlock_irq(&n->list_lock);
  3411. slabs_destroy(cachep, &list);
  3412. }
  3413. /**
  3414. * cache_reap - Reclaim memory from caches.
  3415. * @w: work descriptor
  3416. *
  3417. * Called from workqueue/eventd every few seconds.
  3418. * Purpose:
  3419. * - clear the per-cpu caches for this CPU.
  3420. * - return freeable pages to the main free memory pool.
  3421. *
  3422. * If we cannot acquire the cache chain mutex then just give up - we'll try
  3423. * again on the next iteration.
  3424. */
  3425. static void cache_reap(struct work_struct *w)
  3426. {
  3427. struct kmem_cache *searchp;
  3428. struct kmem_cache_node *n;
  3429. int node = numa_mem_id();
  3430. struct delayed_work *work = to_delayed_work(w);
  3431. if (!mutex_trylock(&slab_mutex))
  3432. /* Give up. Setup the next iteration. */
  3433. goto out;
  3434. list_for_each_entry(searchp, &slab_caches, list) {
  3435. check_irq_on();
  3436. /*
  3437. * We only take the node lock if absolutely necessary and we
  3438. * have established with reasonable certainty that
  3439. * we can do some work if the lock was obtained.
  3440. */
  3441. n = get_node(searchp, node);
  3442. reap_alien(searchp, n);
  3443. drain_array(searchp, n, cpu_cache_get(searchp), node);
  3444. /*
  3445. * These are racy checks but it does not matter
  3446. * if we skip one check or scan twice.
  3447. */
  3448. if (time_after(n->next_reap, jiffies))
  3449. goto next;
  3450. n->next_reap = jiffies + REAPTIMEOUT_NODE;
  3451. drain_array(searchp, n, n->shared, node);
  3452. if (n->free_touched)
  3453. n->free_touched = 0;
  3454. else {
  3455. int freed;
  3456. freed = drain_freelist(searchp, n, (n->free_limit +
  3457. 5 * searchp->num - 1) / (5 * searchp->num));
  3458. STATS_ADD_REAPED(searchp, freed);
  3459. }
  3460. next:
  3461. cond_resched();
  3462. }
  3463. check_irq_on();
  3464. mutex_unlock(&slab_mutex);
  3465. next_reap_node();
  3466. out:
  3467. /* Set up the next iteration */
  3468. schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
  3469. }
  3470. #ifdef CONFIG_SLABINFO
  3471. void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
  3472. {
  3473. unsigned long active_objs, num_objs, active_slabs;
  3474. unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
  3475. unsigned long free_slabs = 0;
  3476. int node;
  3477. struct kmem_cache_node *n;
  3478. for_each_kmem_cache_node(cachep, node, n) {
  3479. check_irq_on();
  3480. spin_lock_irq(&n->list_lock);
  3481. total_slabs += n->total_slabs;
  3482. free_slabs += n->free_slabs;
  3483. free_objs += n->free_objects;
  3484. if (n->shared)
  3485. shared_avail += n->shared->avail;
  3486. spin_unlock_irq(&n->list_lock);
  3487. }
  3488. num_objs = total_slabs * cachep->num;
  3489. active_slabs = total_slabs - free_slabs;
  3490. active_objs = num_objs - free_objs;
  3491. sinfo->active_objs = active_objs;
  3492. sinfo->num_objs = num_objs;
  3493. sinfo->active_slabs = active_slabs;
  3494. sinfo->num_slabs = total_slabs;
  3495. sinfo->shared_avail = shared_avail;
  3496. sinfo->limit = cachep->limit;
  3497. sinfo->batchcount = cachep->batchcount;
  3498. sinfo->shared = cachep->shared;
  3499. sinfo->objects_per_slab = cachep->num;
  3500. sinfo->cache_order = cachep->gfporder;
  3501. }
  3502. void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
  3503. {
  3504. #if STATS
  3505. { /* node stats */
  3506. unsigned long high = cachep->high_mark;
  3507. unsigned long allocs = cachep->num_allocations;
  3508. unsigned long grown = cachep->grown;
  3509. unsigned long reaped = cachep->reaped;
  3510. unsigned long errors = cachep->errors;
  3511. unsigned long max_freeable = cachep->max_freeable;
  3512. unsigned long node_allocs = cachep->node_allocs;
  3513. unsigned long node_frees = cachep->node_frees;
  3514. unsigned long overflows = cachep->node_overflow;
  3515. seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
  3516. allocs, high, grown,
  3517. reaped, errors, max_freeable, node_allocs,
  3518. node_frees, overflows);
  3519. }
  3520. /* cpu stats */
  3521. {
  3522. unsigned long allochit = atomic_read(&cachep->allochit);
  3523. unsigned long allocmiss = atomic_read(&cachep->allocmiss);
  3524. unsigned long freehit = atomic_read(&cachep->freehit);
  3525. unsigned long freemiss = atomic_read(&cachep->freemiss);
  3526. seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
  3527. allochit, allocmiss, freehit, freemiss);
  3528. }
  3529. #endif
  3530. }
  3531. #define MAX_SLABINFO_WRITE 128
  3532. /**
  3533. * slabinfo_write - Tuning for the slab allocator
  3534. * @file: unused
  3535. * @buffer: user buffer
  3536. * @count: data length
  3537. * @ppos: unused
  3538. */
  3539. ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  3540. size_t count, loff_t *ppos)
  3541. {
  3542. char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
  3543. int limit, batchcount, shared, res;
  3544. struct kmem_cache *cachep;
  3545. if (count > MAX_SLABINFO_WRITE)
  3546. return -EINVAL;
  3547. if (copy_from_user(&kbuf, buffer, count))
  3548. return -EFAULT;
  3549. kbuf[MAX_SLABINFO_WRITE] = '\0';
  3550. tmp = strchr(kbuf, ' ');
  3551. if (!tmp)
  3552. return -EINVAL;
  3553. *tmp = '\0';
  3554. tmp++;
  3555. if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
  3556. return -EINVAL;
  3557. /* Find the cache in the chain of caches. */
  3558. mutex_lock(&slab_mutex);
  3559. res = -EINVAL;
  3560. list_for_each_entry(cachep, &slab_caches, list) {
  3561. if (!strcmp(cachep->name, kbuf)) {
  3562. if (limit < 1 || batchcount < 1 ||
  3563. batchcount > limit || shared < 0) {
  3564. res = 0;
  3565. } else {
  3566. res = do_tune_cpucache(cachep, limit,
  3567. batchcount, shared,
  3568. GFP_KERNEL);
  3569. }
  3570. break;
  3571. }
  3572. }
  3573. mutex_unlock(&slab_mutex);
  3574. if (res >= 0)
  3575. res = count;
  3576. return res;
  3577. }
  3578. #ifdef CONFIG_DEBUG_SLAB_LEAK
  3579. static inline int add_caller(unsigned long *n, unsigned long v)
  3580. {
  3581. unsigned long *p;
  3582. int l;
  3583. if (!v)
  3584. return 1;
  3585. l = n[1];
  3586. p = n + 2;
  3587. while (l) {
  3588. int i = l/2;
  3589. unsigned long *q = p + 2 * i;
  3590. if (*q == v) {
  3591. q[1]++;
  3592. return 1;
  3593. }
  3594. if (*q > v) {
  3595. l = i;
  3596. } else {
  3597. p = q + 2;
  3598. l -= i + 1;
  3599. }
  3600. }
  3601. if (++n[1] == n[0])
  3602. return 0;
  3603. memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
  3604. p[0] = v;
  3605. p[1] = 1;
  3606. return 1;
  3607. }
  3608. static void handle_slab(unsigned long *n, struct kmem_cache *c,
  3609. struct page *page)
  3610. {
  3611. void *p;
  3612. int i, j;
  3613. unsigned long v;
  3614. if (n[0] == n[1])
  3615. return;
  3616. for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
  3617. bool active = true;
  3618. for (j = page->active; j < c->num; j++) {
  3619. if (get_free_obj(page, j) == i) {
  3620. active = false;
  3621. break;
  3622. }
  3623. }
  3624. if (!active)
  3625. continue;
  3626. /*
  3627. * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
  3628. * mapping is established when actual object allocation and
  3629. * we could mistakenly access the unmapped object in the cpu
  3630. * cache.
  3631. */
  3632. if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
  3633. continue;
  3634. if (!add_caller(n, v))
  3635. return;
  3636. }
  3637. }
  3638. static void show_symbol(struct seq_file *m, unsigned long address)
  3639. {
  3640. #ifdef CONFIG_KALLSYMS
  3641. unsigned long offset, size;
  3642. char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
  3643. if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
  3644. seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
  3645. if (modname[0])
  3646. seq_printf(m, " [%s]", modname);
  3647. return;
  3648. }
  3649. #endif
  3650. seq_printf(m, "%p", (void *)address);
  3651. }
  3652. static int leaks_show(struct seq_file *m, void *p)
  3653. {
  3654. struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
  3655. struct page *page;
  3656. struct kmem_cache_node *n;
  3657. const char *name;
  3658. unsigned long *x = m->private;
  3659. int node;
  3660. int i;
  3661. if (!(cachep->flags & SLAB_STORE_USER))
  3662. return 0;
  3663. if (!(cachep->flags & SLAB_RED_ZONE))
  3664. return 0;
  3665. /*
  3666. * Set store_user_clean and start to grab stored user information
  3667. * for all objects on this cache. If some alloc/free requests comes
  3668. * during the processing, information would be wrong so restart
  3669. * whole processing.
  3670. */
  3671. do {
  3672. set_store_user_clean(cachep);
  3673. drain_cpu_caches(cachep);
  3674. x[1] = 0;
  3675. for_each_kmem_cache_node(cachep, node, n) {
  3676. check_irq_on();
  3677. spin_lock_irq(&n->list_lock);
  3678. list_for_each_entry(page, &n->slabs_full, lru)
  3679. handle_slab(x, cachep, page);
  3680. list_for_each_entry(page, &n->slabs_partial, lru)
  3681. handle_slab(x, cachep, page);
  3682. spin_unlock_irq(&n->list_lock);
  3683. }
  3684. } while (!is_store_user_clean(cachep));
  3685. name = cachep->name;
  3686. if (x[0] == x[1]) {
  3687. /* Increase the buffer size */
  3688. mutex_unlock(&slab_mutex);
  3689. m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
  3690. if (!m->private) {
  3691. /* Too bad, we are really out */
  3692. m->private = x;
  3693. mutex_lock(&slab_mutex);
  3694. return -ENOMEM;
  3695. }
  3696. *(unsigned long *)m->private = x[0] * 2;
  3697. kfree(x);
  3698. mutex_lock(&slab_mutex);
  3699. /* Now make sure this entry will be retried */
  3700. m->count = m->size;
  3701. return 0;
  3702. }
  3703. for (i = 0; i < x[1]; i++) {
  3704. seq_printf(m, "%s: %lu ", name, x[2*i+3]);
  3705. show_symbol(m, x[2*i+2]);
  3706. seq_putc(m, '\n');
  3707. }
  3708. return 0;
  3709. }
  3710. static const struct seq_operations slabstats_op = {
  3711. .start = slab_start,
  3712. .next = slab_next,
  3713. .stop = slab_stop,
  3714. .show = leaks_show,
  3715. };
  3716. static int slabstats_open(struct inode *inode, struct file *file)
  3717. {
  3718. unsigned long *n;
  3719. n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
  3720. if (!n)
  3721. return -ENOMEM;
  3722. *n = PAGE_SIZE / (2 * sizeof(unsigned long));
  3723. return 0;
  3724. }
  3725. static const struct file_operations proc_slabstats_operations = {
  3726. .open = slabstats_open,
  3727. .read = seq_read,
  3728. .llseek = seq_lseek,
  3729. .release = seq_release_private,
  3730. };
  3731. #endif
  3732. static int __init slab_proc_init(void)
  3733. {
  3734. #ifdef CONFIG_DEBUG_SLAB_LEAK
  3735. proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
  3736. #endif
  3737. return 0;
  3738. }
  3739. module_init(slab_proc_init);
  3740. #endif
  3741. #ifdef CONFIG_HARDENED_USERCOPY
  3742. /*
  3743. * Rejects objects that are incorrectly sized.
  3744. *
  3745. * Returns NULL if check passes, otherwise const char * to name of cache
  3746. * to indicate an error.
  3747. */
  3748. const char *__check_heap_object(const void *ptr, unsigned long n,
  3749. struct page *page)
  3750. {
  3751. struct kmem_cache *cachep;
  3752. unsigned int objnr;
  3753. unsigned long offset;
  3754. /* Find and validate object. */
  3755. cachep = page->slab_cache;
  3756. objnr = obj_to_index(cachep, page, (void *)ptr);
  3757. BUG_ON(objnr >= cachep->num);
  3758. /* Find offset within object. */
  3759. offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
  3760. /* Allow address range falling entirely within object size. */
  3761. if (offset <= cachep->object_size && n <= cachep->object_size - offset)
  3762. return NULL;
  3763. return cachep->name;
  3764. }
  3765. #endif /* CONFIG_HARDENED_USERCOPY */
  3766. /**
  3767. * ksize - get the actual amount of memory allocated for a given object
  3768. * @objp: Pointer to the object
  3769. *
  3770. * kmalloc may internally round up allocations and return more memory
  3771. * than requested. ksize() can be used to determine the actual amount of
  3772. * memory allocated. The caller may use this additional memory, even though
  3773. * a smaller amount of memory was initially specified with the kmalloc call.
  3774. * The caller must guarantee that objp points to a valid object previously
  3775. * allocated with either kmalloc() or kmem_cache_alloc(). The object
  3776. * must not be freed during the duration of the call.
  3777. */
  3778. size_t ksize(const void *objp)
  3779. {
  3780. size_t size;
  3781. BUG_ON(!objp);
  3782. if (unlikely(objp == ZERO_SIZE_PTR))
  3783. return 0;
  3784. size = virt_to_cache(objp)->object_size;
  3785. /* We assume that ksize callers could use the whole allocated area,
  3786. * so we need to unpoison this area.
  3787. */
  3788. kasan_unpoison_shadow(objp, size);
  3789. return size;
  3790. }
  3791. EXPORT_SYMBOL(ksize);