dm-cache-target.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include "dm-bio-record.h"
  9. #include "dm-cache-metadata.h"
  10. #include <linux/dm-io.h>
  11. #include <linux/dm-kcopyd.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/init.h>
  14. #include <linux/mempool.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #define DM_MSG_PREFIX "cache"
  19. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  20. "A percentage of time allocated for copying to and/or from cache");
  21. /*----------------------------------------------------------------*/
  22. #define IOT_RESOLUTION 4
  23. struct io_tracker {
  24. spinlock_t lock;
  25. /*
  26. * Sectors of in-flight IO.
  27. */
  28. sector_t in_flight;
  29. /*
  30. * The time, in jiffies, when this device became idle (if it is
  31. * indeed idle).
  32. */
  33. unsigned long idle_time;
  34. unsigned long last_update_time;
  35. };
  36. static void iot_init(struct io_tracker *iot)
  37. {
  38. spin_lock_init(&iot->lock);
  39. iot->in_flight = 0ul;
  40. iot->idle_time = 0ul;
  41. iot->last_update_time = jiffies;
  42. }
  43. static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
  44. {
  45. if (iot->in_flight)
  46. return false;
  47. return time_after(jiffies, iot->idle_time + jifs);
  48. }
  49. static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
  50. {
  51. bool r;
  52. unsigned long flags;
  53. spin_lock_irqsave(&iot->lock, flags);
  54. r = __iot_idle_for(iot, jifs);
  55. spin_unlock_irqrestore(&iot->lock, flags);
  56. return r;
  57. }
  58. static void iot_io_begin(struct io_tracker *iot, sector_t len)
  59. {
  60. unsigned long flags;
  61. spin_lock_irqsave(&iot->lock, flags);
  62. iot->in_flight += len;
  63. spin_unlock_irqrestore(&iot->lock, flags);
  64. }
  65. static void __iot_io_end(struct io_tracker *iot, sector_t len)
  66. {
  67. iot->in_flight -= len;
  68. if (!iot->in_flight)
  69. iot->idle_time = jiffies;
  70. }
  71. static void iot_io_end(struct io_tracker *iot, sector_t len)
  72. {
  73. unsigned long flags;
  74. spin_lock_irqsave(&iot->lock, flags);
  75. __iot_io_end(iot, len);
  76. spin_unlock_irqrestore(&iot->lock, flags);
  77. }
  78. /*----------------------------------------------------------------*/
  79. /*
  80. * Glossary:
  81. *
  82. * oblock: index of an origin block
  83. * cblock: index of a cache block
  84. * promotion: movement of a block from origin to cache
  85. * demotion: movement of a block from cache to origin
  86. * migration: movement of a block between the origin and cache device,
  87. * either direction
  88. */
  89. /*----------------------------------------------------------------*/
  90. /*
  91. * There are a couple of places where we let a bio run, but want to do some
  92. * work before calling its endio function. We do this by temporarily
  93. * changing the endio fn.
  94. */
  95. struct dm_hook_info {
  96. bio_end_io_t *bi_end_io;
  97. };
  98. static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
  99. bio_end_io_t *bi_end_io, void *bi_private)
  100. {
  101. h->bi_end_io = bio->bi_end_io;
  102. bio->bi_end_io = bi_end_io;
  103. bio->bi_private = bi_private;
  104. }
  105. static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
  106. {
  107. bio->bi_end_io = h->bi_end_io;
  108. }
  109. /*----------------------------------------------------------------*/
  110. #define MIGRATION_POOL_SIZE 128
  111. #define COMMIT_PERIOD HZ
  112. #define MIGRATION_COUNT_WINDOW 10
  113. /*
  114. * The block size of the device holding cache data must be
  115. * between 32KB and 1GB.
  116. */
  117. #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
  118. #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  119. enum cache_metadata_mode {
  120. CM_WRITE, /* metadata may be changed */
  121. CM_READ_ONLY, /* metadata may not be changed */
  122. CM_FAIL
  123. };
  124. enum cache_io_mode {
  125. /*
  126. * Data is written to cached blocks only. These blocks are marked
  127. * dirty. If you lose the cache device you will lose data.
  128. * Potential performance increase for both reads and writes.
  129. */
  130. CM_IO_WRITEBACK,
  131. /*
  132. * Data is written to both cache and origin. Blocks are never
  133. * dirty. Potential performance benfit for reads only.
  134. */
  135. CM_IO_WRITETHROUGH,
  136. /*
  137. * A degraded mode useful for various cache coherency situations
  138. * (eg, rolling back snapshots). Reads and writes always go to the
  139. * origin. If a write goes to a cached oblock, then the cache
  140. * block is invalidated.
  141. */
  142. CM_IO_PASSTHROUGH
  143. };
  144. struct cache_features {
  145. enum cache_metadata_mode mode;
  146. enum cache_io_mode io_mode;
  147. unsigned metadata_version;
  148. };
  149. struct cache_stats {
  150. atomic_t read_hit;
  151. atomic_t read_miss;
  152. atomic_t write_hit;
  153. atomic_t write_miss;
  154. atomic_t demotion;
  155. atomic_t promotion;
  156. atomic_t copies_avoided;
  157. atomic_t cache_cell_clash;
  158. atomic_t commit_count;
  159. atomic_t discard_count;
  160. };
  161. /*
  162. * Defines a range of cblocks, begin to (end - 1) are in the range. end is
  163. * the one-past-the-end value.
  164. */
  165. struct cblock_range {
  166. dm_cblock_t begin;
  167. dm_cblock_t end;
  168. };
  169. struct invalidation_request {
  170. struct list_head list;
  171. struct cblock_range *cblocks;
  172. atomic_t complete;
  173. int err;
  174. wait_queue_head_t result_wait;
  175. };
  176. struct cache {
  177. struct dm_target *ti;
  178. struct dm_target_callbacks callbacks;
  179. struct dm_cache_metadata *cmd;
  180. /*
  181. * Metadata is written to this device.
  182. */
  183. struct dm_dev *metadata_dev;
  184. /*
  185. * The slower of the two data devices. Typically a spindle.
  186. */
  187. struct dm_dev *origin_dev;
  188. /*
  189. * The faster of the two data devices. Typically an SSD.
  190. */
  191. struct dm_dev *cache_dev;
  192. /*
  193. * Size of the origin device in _complete_ blocks and native sectors.
  194. */
  195. dm_oblock_t origin_blocks;
  196. sector_t origin_sectors;
  197. /*
  198. * Size of the cache device in blocks.
  199. */
  200. dm_cblock_t cache_size;
  201. /*
  202. * Fields for converting from sectors to blocks.
  203. */
  204. sector_t sectors_per_block;
  205. int sectors_per_block_shift;
  206. spinlock_t lock;
  207. struct list_head deferred_cells;
  208. struct bio_list deferred_bios;
  209. struct bio_list deferred_flush_bios;
  210. struct bio_list deferred_writethrough_bios;
  211. struct list_head quiesced_migrations;
  212. struct list_head completed_migrations;
  213. struct list_head need_commit_migrations;
  214. sector_t migration_threshold;
  215. wait_queue_head_t migration_wait;
  216. atomic_t nr_allocated_migrations;
  217. /*
  218. * The number of in flight migrations that are performing
  219. * background io. eg, promotion, writeback.
  220. */
  221. atomic_t nr_io_migrations;
  222. wait_queue_head_t quiescing_wait;
  223. atomic_t quiescing;
  224. atomic_t quiescing_ack;
  225. /*
  226. * cache_size entries, dirty if set
  227. */
  228. atomic_t nr_dirty;
  229. unsigned long *dirty_bitset;
  230. /*
  231. * origin_blocks entries, discarded if set.
  232. */
  233. dm_dblock_t discard_nr_blocks;
  234. unsigned long *discard_bitset;
  235. uint32_t discard_block_size; /* a power of 2 times sectors per block */
  236. /*
  237. * Rather than reconstructing the table line for the status we just
  238. * save it and regurgitate.
  239. */
  240. unsigned nr_ctr_args;
  241. const char **ctr_args;
  242. struct dm_kcopyd_client *copier;
  243. struct workqueue_struct *wq;
  244. struct work_struct worker;
  245. struct delayed_work waker;
  246. unsigned long last_commit_jiffies;
  247. struct dm_bio_prison *prison;
  248. struct dm_deferred_set *all_io_ds;
  249. mempool_t *migration_pool;
  250. struct dm_cache_policy *policy;
  251. unsigned policy_nr_args;
  252. bool need_tick_bio:1;
  253. bool sized:1;
  254. bool invalidate:1;
  255. bool commit_requested:1;
  256. bool loaded_mappings:1;
  257. bool loaded_discards:1;
  258. /*
  259. * Cache features such as write-through.
  260. */
  261. struct cache_features features;
  262. struct cache_stats stats;
  263. /*
  264. * Invalidation fields.
  265. */
  266. spinlock_t invalidation_lock;
  267. struct list_head invalidation_requests;
  268. struct io_tracker origin_tracker;
  269. };
  270. struct per_bio_data {
  271. bool tick:1;
  272. unsigned req_nr:2;
  273. struct dm_deferred_entry *all_io_entry;
  274. struct dm_hook_info hook_info;
  275. sector_t len;
  276. /*
  277. * writethrough fields. These MUST remain at the end of this
  278. * structure and the 'cache' member must be the first as it
  279. * is used to determine the offset of the writethrough fields.
  280. */
  281. struct cache *cache;
  282. dm_cblock_t cblock;
  283. struct dm_bio_details bio_details;
  284. };
  285. struct dm_cache_migration {
  286. struct list_head list;
  287. struct cache *cache;
  288. unsigned long start_jiffies;
  289. dm_oblock_t old_oblock;
  290. dm_oblock_t new_oblock;
  291. dm_cblock_t cblock;
  292. bool err:1;
  293. bool discard:1;
  294. bool writeback:1;
  295. bool demote:1;
  296. bool promote:1;
  297. bool requeue_holder:1;
  298. bool invalidate:1;
  299. struct dm_bio_prison_cell *old_ocell;
  300. struct dm_bio_prison_cell *new_ocell;
  301. };
  302. /*
  303. * Processing a bio in the worker thread may require these memory
  304. * allocations. We prealloc to avoid deadlocks (the same worker thread
  305. * frees them back to the mempool).
  306. */
  307. struct prealloc {
  308. struct dm_cache_migration *mg;
  309. struct dm_bio_prison_cell *cell1;
  310. struct dm_bio_prison_cell *cell2;
  311. };
  312. static enum cache_metadata_mode get_cache_mode(struct cache *cache);
  313. static void wake_worker(struct cache *cache)
  314. {
  315. queue_work(cache->wq, &cache->worker);
  316. }
  317. /*----------------------------------------------------------------*/
  318. static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
  319. {
  320. /* FIXME: change to use a local slab. */
  321. return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
  322. }
  323. static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
  324. {
  325. dm_bio_prison_free_cell(cache->prison, cell);
  326. }
  327. static struct dm_cache_migration *alloc_migration(struct cache *cache)
  328. {
  329. struct dm_cache_migration *mg;
  330. mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  331. if (mg) {
  332. mg->cache = cache;
  333. atomic_inc(&mg->cache->nr_allocated_migrations);
  334. }
  335. return mg;
  336. }
  337. static void free_migration(struct dm_cache_migration *mg)
  338. {
  339. struct cache *cache = mg->cache;
  340. if (atomic_dec_and_test(&cache->nr_allocated_migrations))
  341. wake_up(&cache->migration_wait);
  342. mempool_free(mg, cache->migration_pool);
  343. }
  344. static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
  345. {
  346. if (!p->mg) {
  347. p->mg = alloc_migration(cache);
  348. if (!p->mg)
  349. return -ENOMEM;
  350. }
  351. if (!p->cell1) {
  352. p->cell1 = alloc_prison_cell(cache);
  353. if (!p->cell1)
  354. return -ENOMEM;
  355. }
  356. if (!p->cell2) {
  357. p->cell2 = alloc_prison_cell(cache);
  358. if (!p->cell2)
  359. return -ENOMEM;
  360. }
  361. return 0;
  362. }
  363. static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
  364. {
  365. if (p->cell2)
  366. free_prison_cell(cache, p->cell2);
  367. if (p->cell1)
  368. free_prison_cell(cache, p->cell1);
  369. if (p->mg)
  370. free_migration(p->mg);
  371. }
  372. static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
  373. {
  374. struct dm_cache_migration *mg = p->mg;
  375. BUG_ON(!mg);
  376. p->mg = NULL;
  377. return mg;
  378. }
  379. /*
  380. * You must have a cell within the prealloc struct to return. If not this
  381. * function will BUG() rather than returning NULL.
  382. */
  383. static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
  384. {
  385. struct dm_bio_prison_cell *r = NULL;
  386. if (p->cell1) {
  387. r = p->cell1;
  388. p->cell1 = NULL;
  389. } else if (p->cell2) {
  390. r = p->cell2;
  391. p->cell2 = NULL;
  392. } else
  393. BUG();
  394. return r;
  395. }
  396. /*
  397. * You can't have more than two cells in a prealloc struct. BUG() will be
  398. * called if you try and overfill.
  399. */
  400. static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
  401. {
  402. if (!p->cell2)
  403. p->cell2 = cell;
  404. else if (!p->cell1)
  405. p->cell1 = cell;
  406. else
  407. BUG();
  408. }
  409. /*----------------------------------------------------------------*/
  410. static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key)
  411. {
  412. key->virtual = 0;
  413. key->dev = 0;
  414. key->block_begin = from_oblock(begin);
  415. key->block_end = from_oblock(end);
  416. }
  417. /*
  418. * The caller hands in a preallocated cell, and a free function for it.
  419. * The cell will be freed if there's an error, or if it wasn't used because
  420. * a cell with that key already exists.
  421. */
  422. typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
  423. static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end,
  424. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  425. cell_free_fn free_fn, void *free_context,
  426. struct dm_bio_prison_cell **cell_result)
  427. {
  428. int r;
  429. struct dm_cell_key key;
  430. build_key(oblock_begin, oblock_end, &key);
  431. r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
  432. if (r)
  433. free_fn(free_context, cell_prealloc);
  434. return r;
  435. }
  436. static int bio_detain(struct cache *cache, dm_oblock_t oblock,
  437. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  438. cell_free_fn free_fn, void *free_context,
  439. struct dm_bio_prison_cell **cell_result)
  440. {
  441. dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
  442. return bio_detain_range(cache, oblock, end, bio,
  443. cell_prealloc, free_fn, free_context, cell_result);
  444. }
  445. static int get_cell(struct cache *cache,
  446. dm_oblock_t oblock,
  447. struct prealloc *structs,
  448. struct dm_bio_prison_cell **cell_result)
  449. {
  450. int r;
  451. struct dm_cell_key key;
  452. struct dm_bio_prison_cell *cell_prealloc;
  453. cell_prealloc = prealloc_get_cell(structs);
  454. build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key);
  455. r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
  456. if (r)
  457. prealloc_put_cell(structs, cell_prealloc);
  458. return r;
  459. }
  460. /*----------------------------------------------------------------*/
  461. static bool is_dirty(struct cache *cache, dm_cblock_t b)
  462. {
  463. return test_bit(from_cblock(b), cache->dirty_bitset);
  464. }
  465. static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  466. {
  467. if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
  468. atomic_inc(&cache->nr_dirty);
  469. policy_set_dirty(cache->policy, oblock);
  470. }
  471. }
  472. static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  473. {
  474. if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
  475. policy_clear_dirty(cache->policy, oblock);
  476. if (atomic_dec_return(&cache->nr_dirty) == 0)
  477. dm_table_event(cache->ti->table);
  478. }
  479. }
  480. /*----------------------------------------------------------------*/
  481. static bool block_size_is_power_of_two(struct cache *cache)
  482. {
  483. return cache->sectors_per_block_shift >= 0;
  484. }
  485. /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
  486. #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
  487. __always_inline
  488. #endif
  489. static dm_block_t block_div(dm_block_t b, uint32_t n)
  490. {
  491. do_div(b, n);
  492. return b;
  493. }
  494. static dm_block_t oblocks_per_dblock(struct cache *cache)
  495. {
  496. dm_block_t oblocks = cache->discard_block_size;
  497. if (block_size_is_power_of_two(cache))
  498. oblocks >>= cache->sectors_per_block_shift;
  499. else
  500. oblocks = block_div(oblocks, cache->sectors_per_block);
  501. return oblocks;
  502. }
  503. static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
  504. {
  505. return to_dblock(block_div(from_oblock(oblock),
  506. oblocks_per_dblock(cache)));
  507. }
  508. static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock)
  509. {
  510. return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache));
  511. }
  512. static void set_discard(struct cache *cache, dm_dblock_t b)
  513. {
  514. unsigned long flags;
  515. BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
  516. atomic_inc(&cache->stats.discard_count);
  517. spin_lock_irqsave(&cache->lock, flags);
  518. set_bit(from_dblock(b), cache->discard_bitset);
  519. spin_unlock_irqrestore(&cache->lock, flags);
  520. }
  521. static void clear_discard(struct cache *cache, dm_dblock_t b)
  522. {
  523. unsigned long flags;
  524. spin_lock_irqsave(&cache->lock, flags);
  525. clear_bit(from_dblock(b), cache->discard_bitset);
  526. spin_unlock_irqrestore(&cache->lock, flags);
  527. }
  528. static bool is_discarded(struct cache *cache, dm_dblock_t b)
  529. {
  530. int r;
  531. unsigned long flags;
  532. spin_lock_irqsave(&cache->lock, flags);
  533. r = test_bit(from_dblock(b), cache->discard_bitset);
  534. spin_unlock_irqrestore(&cache->lock, flags);
  535. return r;
  536. }
  537. static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  538. {
  539. int r;
  540. unsigned long flags;
  541. spin_lock_irqsave(&cache->lock, flags);
  542. r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
  543. cache->discard_bitset);
  544. spin_unlock_irqrestore(&cache->lock, flags);
  545. return r;
  546. }
  547. /*----------------------------------------------------------------*/
  548. static void load_stats(struct cache *cache)
  549. {
  550. struct dm_cache_statistics stats;
  551. dm_cache_metadata_get_stats(cache->cmd, &stats);
  552. atomic_set(&cache->stats.read_hit, stats.read_hits);
  553. atomic_set(&cache->stats.read_miss, stats.read_misses);
  554. atomic_set(&cache->stats.write_hit, stats.write_hits);
  555. atomic_set(&cache->stats.write_miss, stats.write_misses);
  556. }
  557. static void save_stats(struct cache *cache)
  558. {
  559. struct dm_cache_statistics stats;
  560. if (get_cache_mode(cache) >= CM_READ_ONLY)
  561. return;
  562. stats.read_hits = atomic_read(&cache->stats.read_hit);
  563. stats.read_misses = atomic_read(&cache->stats.read_miss);
  564. stats.write_hits = atomic_read(&cache->stats.write_hit);
  565. stats.write_misses = atomic_read(&cache->stats.write_miss);
  566. dm_cache_metadata_set_stats(cache->cmd, &stats);
  567. }
  568. /*----------------------------------------------------------------
  569. * Per bio data
  570. *--------------------------------------------------------------*/
  571. /*
  572. * If using writeback, leave out struct per_bio_data's writethrough fields.
  573. */
  574. #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
  575. #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
  576. static bool writethrough_mode(struct cache_features *f)
  577. {
  578. return f->io_mode == CM_IO_WRITETHROUGH;
  579. }
  580. static bool writeback_mode(struct cache_features *f)
  581. {
  582. return f->io_mode == CM_IO_WRITEBACK;
  583. }
  584. static bool passthrough_mode(struct cache_features *f)
  585. {
  586. return f->io_mode == CM_IO_PASSTHROUGH;
  587. }
  588. static size_t get_per_bio_data_size(struct cache *cache)
  589. {
  590. return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
  591. }
  592. static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
  593. {
  594. struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
  595. BUG_ON(!pb);
  596. return pb;
  597. }
  598. static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
  599. {
  600. struct per_bio_data *pb = get_per_bio_data(bio, data_size);
  601. pb->tick = false;
  602. pb->req_nr = dm_bio_get_target_bio_nr(bio);
  603. pb->all_io_entry = NULL;
  604. pb->len = 0;
  605. return pb;
  606. }
  607. /*----------------------------------------------------------------
  608. * Remapping
  609. *--------------------------------------------------------------*/
  610. static void remap_to_origin(struct cache *cache, struct bio *bio)
  611. {
  612. bio->bi_bdev = cache->origin_dev->bdev;
  613. }
  614. static void remap_to_cache(struct cache *cache, struct bio *bio,
  615. dm_cblock_t cblock)
  616. {
  617. sector_t bi_sector = bio->bi_iter.bi_sector;
  618. sector_t block = from_cblock(cblock);
  619. bio->bi_bdev = cache->cache_dev->bdev;
  620. if (!block_size_is_power_of_two(cache))
  621. bio->bi_iter.bi_sector =
  622. (block * cache->sectors_per_block) +
  623. sector_div(bi_sector, cache->sectors_per_block);
  624. else
  625. bio->bi_iter.bi_sector =
  626. (block << cache->sectors_per_block_shift) |
  627. (bi_sector & (cache->sectors_per_block - 1));
  628. }
  629. static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
  630. {
  631. unsigned long flags;
  632. size_t pb_data_size = get_per_bio_data_size(cache);
  633. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  634. spin_lock_irqsave(&cache->lock, flags);
  635. if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
  636. bio_op(bio) != REQ_OP_DISCARD) {
  637. pb->tick = true;
  638. cache->need_tick_bio = false;
  639. }
  640. spin_unlock_irqrestore(&cache->lock, flags);
  641. }
  642. static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
  643. dm_oblock_t oblock)
  644. {
  645. check_if_tick_bio_needed(cache, bio);
  646. remap_to_origin(cache, bio);
  647. if (bio_data_dir(bio) == WRITE)
  648. clear_discard(cache, oblock_to_dblock(cache, oblock));
  649. }
  650. static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
  651. dm_oblock_t oblock, dm_cblock_t cblock)
  652. {
  653. check_if_tick_bio_needed(cache, bio);
  654. remap_to_cache(cache, bio, cblock);
  655. if (bio_data_dir(bio) == WRITE) {
  656. set_dirty(cache, oblock, cblock);
  657. clear_discard(cache, oblock_to_dblock(cache, oblock));
  658. }
  659. }
  660. static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
  661. {
  662. sector_t block_nr = bio->bi_iter.bi_sector;
  663. if (!block_size_is_power_of_two(cache))
  664. (void) sector_div(block_nr, cache->sectors_per_block);
  665. else
  666. block_nr >>= cache->sectors_per_block_shift;
  667. return to_oblock(block_nr);
  668. }
  669. /*
  670. * You must increment the deferred set whilst the prison cell is held. To
  671. * encourage this, we ask for 'cell' to be passed in.
  672. */
  673. static void inc_ds(struct cache *cache, struct bio *bio,
  674. struct dm_bio_prison_cell *cell)
  675. {
  676. size_t pb_data_size = get_per_bio_data_size(cache);
  677. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  678. BUG_ON(!cell);
  679. BUG_ON(pb->all_io_entry);
  680. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  681. }
  682. static bool accountable_bio(struct cache *cache, struct bio *bio)
  683. {
  684. return ((bio->bi_bdev == cache->origin_dev->bdev) &&
  685. bio_op(bio) != REQ_OP_DISCARD);
  686. }
  687. static void accounted_begin(struct cache *cache, struct bio *bio)
  688. {
  689. size_t pb_data_size = get_per_bio_data_size(cache);
  690. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  691. if (accountable_bio(cache, bio)) {
  692. pb->len = bio_sectors(bio);
  693. iot_io_begin(&cache->origin_tracker, pb->len);
  694. }
  695. }
  696. static void accounted_complete(struct cache *cache, struct bio *bio)
  697. {
  698. size_t pb_data_size = get_per_bio_data_size(cache);
  699. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  700. iot_io_end(&cache->origin_tracker, pb->len);
  701. }
  702. static void accounted_request(struct cache *cache, struct bio *bio)
  703. {
  704. accounted_begin(cache, bio);
  705. generic_make_request(bio);
  706. }
  707. static void issue(struct cache *cache, struct bio *bio)
  708. {
  709. unsigned long flags;
  710. if (!op_is_flush(bio->bi_opf)) {
  711. accounted_request(cache, bio);
  712. return;
  713. }
  714. /*
  715. * Batch together any bios that trigger commits and then issue a
  716. * single commit for them in do_worker().
  717. */
  718. spin_lock_irqsave(&cache->lock, flags);
  719. cache->commit_requested = true;
  720. bio_list_add(&cache->deferred_flush_bios, bio);
  721. spin_unlock_irqrestore(&cache->lock, flags);
  722. }
  723. static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
  724. {
  725. inc_ds(cache, bio, cell);
  726. issue(cache, bio);
  727. }
  728. static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
  729. {
  730. unsigned long flags;
  731. spin_lock_irqsave(&cache->lock, flags);
  732. bio_list_add(&cache->deferred_writethrough_bios, bio);
  733. spin_unlock_irqrestore(&cache->lock, flags);
  734. wake_worker(cache);
  735. }
  736. static void writethrough_endio(struct bio *bio)
  737. {
  738. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  739. dm_unhook_bio(&pb->hook_info, bio);
  740. if (bio->bi_error) {
  741. bio_endio(bio);
  742. return;
  743. }
  744. dm_bio_restore(&pb->bio_details, bio);
  745. remap_to_cache(pb->cache, bio, pb->cblock);
  746. /*
  747. * We can't issue this bio directly, since we're in interrupt
  748. * context. So it gets put on a bio list for processing by the
  749. * worker thread.
  750. */
  751. defer_writethrough_bio(pb->cache, bio);
  752. }
  753. /*
  754. * When running in writethrough mode we need to send writes to clean blocks
  755. * to both the cache and origin devices. In future we'd like to clone the
  756. * bio and send them in parallel, but for now we're doing them in
  757. * series as this is easier.
  758. */
  759. static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  760. dm_oblock_t oblock, dm_cblock_t cblock)
  761. {
  762. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  763. pb->cache = cache;
  764. pb->cblock = cblock;
  765. dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
  766. dm_bio_record(&pb->bio_details, bio);
  767. remap_to_origin_clear_discard(pb->cache, bio, oblock);
  768. }
  769. /*----------------------------------------------------------------
  770. * Failure modes
  771. *--------------------------------------------------------------*/
  772. static enum cache_metadata_mode get_cache_mode(struct cache *cache)
  773. {
  774. return cache->features.mode;
  775. }
  776. static const char *cache_device_name(struct cache *cache)
  777. {
  778. return dm_device_name(dm_table_get_md(cache->ti->table));
  779. }
  780. static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
  781. {
  782. const char *descs[] = {
  783. "write",
  784. "read-only",
  785. "fail"
  786. };
  787. dm_table_event(cache->ti->table);
  788. DMINFO("%s: switching cache to %s mode",
  789. cache_device_name(cache), descs[(int)mode]);
  790. }
  791. static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
  792. {
  793. bool needs_check;
  794. enum cache_metadata_mode old_mode = get_cache_mode(cache);
  795. if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
  796. DMERR("%s: unable to read needs_check flag, setting failure mode.",
  797. cache_device_name(cache));
  798. new_mode = CM_FAIL;
  799. }
  800. if (new_mode == CM_WRITE && needs_check) {
  801. DMERR("%s: unable to switch cache to write mode until repaired.",
  802. cache_device_name(cache));
  803. if (old_mode != new_mode)
  804. new_mode = old_mode;
  805. else
  806. new_mode = CM_READ_ONLY;
  807. }
  808. /* Never move out of fail mode */
  809. if (old_mode == CM_FAIL)
  810. new_mode = CM_FAIL;
  811. switch (new_mode) {
  812. case CM_FAIL:
  813. case CM_READ_ONLY:
  814. dm_cache_metadata_set_read_only(cache->cmd);
  815. break;
  816. case CM_WRITE:
  817. dm_cache_metadata_set_read_write(cache->cmd);
  818. break;
  819. }
  820. cache->features.mode = new_mode;
  821. if (new_mode != old_mode)
  822. notify_mode_switch(cache, new_mode);
  823. }
  824. static void abort_transaction(struct cache *cache)
  825. {
  826. const char *dev_name = cache_device_name(cache);
  827. if (get_cache_mode(cache) >= CM_READ_ONLY)
  828. return;
  829. if (dm_cache_metadata_set_needs_check(cache->cmd)) {
  830. DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
  831. set_cache_mode(cache, CM_FAIL);
  832. }
  833. DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
  834. if (dm_cache_metadata_abort(cache->cmd)) {
  835. DMERR("%s: failed to abort metadata transaction", dev_name);
  836. set_cache_mode(cache, CM_FAIL);
  837. }
  838. }
  839. static void metadata_operation_failed(struct cache *cache, const char *op, int r)
  840. {
  841. DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
  842. cache_device_name(cache), op, r);
  843. abort_transaction(cache);
  844. set_cache_mode(cache, CM_READ_ONLY);
  845. }
  846. /*----------------------------------------------------------------
  847. * Migration processing
  848. *
  849. * Migration covers moving data from the origin device to the cache, or
  850. * vice versa.
  851. *--------------------------------------------------------------*/
  852. static void inc_io_migrations(struct cache *cache)
  853. {
  854. atomic_inc(&cache->nr_io_migrations);
  855. }
  856. static void dec_io_migrations(struct cache *cache)
  857. {
  858. atomic_dec(&cache->nr_io_migrations);
  859. }
  860. static bool discard_or_flush(struct bio *bio)
  861. {
  862. return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
  863. }
  864. static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
  865. {
  866. if (discard_or_flush(cell->holder)) {
  867. /*
  868. * We have to handle these bios individually.
  869. */
  870. dm_cell_release(cache->prison, cell, &cache->deferred_bios);
  871. free_prison_cell(cache, cell);
  872. } else
  873. list_add_tail(&cell->user_list, &cache->deferred_cells);
  874. }
  875. static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder)
  876. {
  877. unsigned long flags;
  878. if (!holder && dm_cell_promote_or_release(cache->prison, cell)) {
  879. /*
  880. * There was no prisoner to promote to holder, the
  881. * cell has been released.
  882. */
  883. free_prison_cell(cache, cell);
  884. return;
  885. }
  886. spin_lock_irqsave(&cache->lock, flags);
  887. __cell_defer(cache, cell);
  888. spin_unlock_irqrestore(&cache->lock, flags);
  889. wake_worker(cache);
  890. }
  891. static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
  892. {
  893. dm_cell_error(cache->prison, cell, err);
  894. free_prison_cell(cache, cell);
  895. }
  896. static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
  897. {
  898. cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE);
  899. }
  900. static void free_io_migration(struct dm_cache_migration *mg)
  901. {
  902. struct cache *cache = mg->cache;
  903. dec_io_migrations(cache);
  904. free_migration(mg);
  905. wake_worker(cache);
  906. }
  907. static void migration_failure(struct dm_cache_migration *mg)
  908. {
  909. struct cache *cache = mg->cache;
  910. const char *dev_name = cache_device_name(cache);
  911. if (mg->writeback) {
  912. DMERR_LIMIT("%s: writeback failed; couldn't copy block", dev_name);
  913. set_dirty(cache, mg->old_oblock, mg->cblock);
  914. cell_defer(cache, mg->old_ocell, false);
  915. } else if (mg->demote) {
  916. DMERR_LIMIT("%s: demotion failed; couldn't copy block", dev_name);
  917. policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
  918. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  919. if (mg->promote)
  920. cell_defer(cache, mg->new_ocell, true);
  921. } else {
  922. DMERR_LIMIT("%s: promotion failed; couldn't copy block", dev_name);
  923. policy_remove_mapping(cache->policy, mg->new_oblock);
  924. cell_defer(cache, mg->new_ocell, true);
  925. }
  926. free_io_migration(mg);
  927. }
  928. static void migration_success_pre_commit(struct dm_cache_migration *mg)
  929. {
  930. int r;
  931. unsigned long flags;
  932. struct cache *cache = mg->cache;
  933. if (mg->writeback) {
  934. clear_dirty(cache, mg->old_oblock, mg->cblock);
  935. cell_defer(cache, mg->old_ocell, false);
  936. free_io_migration(mg);
  937. return;
  938. } else if (mg->demote) {
  939. r = dm_cache_remove_mapping(cache->cmd, mg->cblock);
  940. if (r) {
  941. DMERR_LIMIT("%s: demotion failed; couldn't update on disk metadata",
  942. cache_device_name(cache));
  943. metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
  944. policy_force_mapping(cache->policy, mg->new_oblock,
  945. mg->old_oblock);
  946. if (mg->promote)
  947. cell_defer(cache, mg->new_ocell, true);
  948. free_io_migration(mg);
  949. return;
  950. }
  951. } else {
  952. r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock);
  953. if (r) {
  954. DMERR_LIMIT("%s: promotion failed; couldn't update on disk metadata",
  955. cache_device_name(cache));
  956. metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
  957. policy_remove_mapping(cache->policy, mg->new_oblock);
  958. free_io_migration(mg);
  959. return;
  960. }
  961. }
  962. spin_lock_irqsave(&cache->lock, flags);
  963. list_add_tail(&mg->list, &cache->need_commit_migrations);
  964. cache->commit_requested = true;
  965. spin_unlock_irqrestore(&cache->lock, flags);
  966. }
  967. static void migration_success_post_commit(struct dm_cache_migration *mg)
  968. {
  969. unsigned long flags;
  970. struct cache *cache = mg->cache;
  971. if (mg->writeback) {
  972. DMWARN_LIMIT("%s: writeback unexpectedly triggered commit",
  973. cache_device_name(cache));
  974. return;
  975. } else if (mg->demote) {
  976. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  977. if (mg->promote) {
  978. mg->demote = false;
  979. spin_lock_irqsave(&cache->lock, flags);
  980. list_add_tail(&mg->list, &cache->quiesced_migrations);
  981. spin_unlock_irqrestore(&cache->lock, flags);
  982. } else {
  983. if (mg->invalidate)
  984. policy_remove_mapping(cache->policy, mg->old_oblock);
  985. free_io_migration(mg);
  986. }
  987. } else {
  988. if (mg->requeue_holder) {
  989. clear_dirty(cache, mg->new_oblock, mg->cblock);
  990. cell_defer(cache, mg->new_ocell, true);
  991. } else {
  992. /*
  993. * The block was promoted via an overwrite, so it's dirty.
  994. */
  995. set_dirty(cache, mg->new_oblock, mg->cblock);
  996. bio_endio(mg->new_ocell->holder);
  997. cell_defer(cache, mg->new_ocell, false);
  998. }
  999. free_io_migration(mg);
  1000. }
  1001. }
  1002. static void copy_complete(int read_err, unsigned long write_err, void *context)
  1003. {
  1004. unsigned long flags;
  1005. struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
  1006. struct cache *cache = mg->cache;
  1007. if (read_err || write_err)
  1008. mg->err = true;
  1009. spin_lock_irqsave(&cache->lock, flags);
  1010. list_add_tail(&mg->list, &cache->completed_migrations);
  1011. spin_unlock_irqrestore(&cache->lock, flags);
  1012. wake_worker(cache);
  1013. }
  1014. static void issue_copy(struct dm_cache_migration *mg)
  1015. {
  1016. int r;
  1017. struct dm_io_region o_region, c_region;
  1018. struct cache *cache = mg->cache;
  1019. sector_t cblock = from_cblock(mg->cblock);
  1020. o_region.bdev = cache->origin_dev->bdev;
  1021. o_region.count = cache->sectors_per_block;
  1022. c_region.bdev = cache->cache_dev->bdev;
  1023. c_region.sector = cblock * cache->sectors_per_block;
  1024. c_region.count = cache->sectors_per_block;
  1025. if (mg->writeback || mg->demote) {
  1026. /* demote */
  1027. o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
  1028. r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
  1029. } else {
  1030. /* promote */
  1031. o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
  1032. r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
  1033. }
  1034. if (r < 0) {
  1035. DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache));
  1036. migration_failure(mg);
  1037. }
  1038. }
  1039. static void overwrite_endio(struct bio *bio)
  1040. {
  1041. struct dm_cache_migration *mg = bio->bi_private;
  1042. struct cache *cache = mg->cache;
  1043. size_t pb_data_size = get_per_bio_data_size(cache);
  1044. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1045. unsigned long flags;
  1046. dm_unhook_bio(&pb->hook_info, bio);
  1047. if (bio->bi_error)
  1048. mg->err = true;
  1049. mg->requeue_holder = false;
  1050. spin_lock_irqsave(&cache->lock, flags);
  1051. list_add_tail(&mg->list, &cache->completed_migrations);
  1052. spin_unlock_irqrestore(&cache->lock, flags);
  1053. wake_worker(cache);
  1054. }
  1055. static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
  1056. {
  1057. size_t pb_data_size = get_per_bio_data_size(mg->cache);
  1058. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1059. dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
  1060. remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
  1061. /*
  1062. * No need to inc_ds() here, since the cell will be held for the
  1063. * duration of the io.
  1064. */
  1065. accounted_request(mg->cache, bio);
  1066. }
  1067. static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
  1068. {
  1069. return (bio_data_dir(bio) == WRITE) &&
  1070. (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
  1071. }
  1072. static void avoid_copy(struct dm_cache_migration *mg)
  1073. {
  1074. atomic_inc(&mg->cache->stats.copies_avoided);
  1075. migration_success_pre_commit(mg);
  1076. }
  1077. static void calc_discard_block_range(struct cache *cache, struct bio *bio,
  1078. dm_dblock_t *b, dm_dblock_t *e)
  1079. {
  1080. sector_t sb = bio->bi_iter.bi_sector;
  1081. sector_t se = bio_end_sector(bio);
  1082. *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
  1083. if (se - sb < cache->discard_block_size)
  1084. *e = *b;
  1085. else
  1086. *e = to_dblock(block_div(se, cache->discard_block_size));
  1087. }
  1088. static void issue_discard(struct dm_cache_migration *mg)
  1089. {
  1090. dm_dblock_t b, e;
  1091. struct bio *bio = mg->new_ocell->holder;
  1092. struct cache *cache = mg->cache;
  1093. calc_discard_block_range(cache, bio, &b, &e);
  1094. while (b != e) {
  1095. set_discard(cache, b);
  1096. b = to_dblock(from_dblock(b) + 1);
  1097. }
  1098. bio_endio(bio);
  1099. cell_defer(cache, mg->new_ocell, false);
  1100. free_migration(mg);
  1101. wake_worker(cache);
  1102. }
  1103. static void issue_copy_or_discard(struct dm_cache_migration *mg)
  1104. {
  1105. bool avoid;
  1106. struct cache *cache = mg->cache;
  1107. if (mg->discard) {
  1108. issue_discard(mg);
  1109. return;
  1110. }
  1111. if (mg->writeback || mg->demote)
  1112. avoid = !is_dirty(cache, mg->cblock) ||
  1113. is_discarded_oblock(cache, mg->old_oblock);
  1114. else {
  1115. struct bio *bio = mg->new_ocell->holder;
  1116. avoid = is_discarded_oblock(cache, mg->new_oblock);
  1117. if (writeback_mode(&cache->features) &&
  1118. !avoid && bio_writes_complete_block(cache, bio)) {
  1119. issue_overwrite(mg, bio);
  1120. return;
  1121. }
  1122. }
  1123. avoid ? avoid_copy(mg) : issue_copy(mg);
  1124. }
  1125. static void complete_migration(struct dm_cache_migration *mg)
  1126. {
  1127. if (mg->err)
  1128. migration_failure(mg);
  1129. else
  1130. migration_success_pre_commit(mg);
  1131. }
  1132. static void process_migrations(struct cache *cache, struct list_head *head,
  1133. void (*fn)(struct dm_cache_migration *))
  1134. {
  1135. unsigned long flags;
  1136. struct list_head list;
  1137. struct dm_cache_migration *mg, *tmp;
  1138. INIT_LIST_HEAD(&list);
  1139. spin_lock_irqsave(&cache->lock, flags);
  1140. list_splice_init(head, &list);
  1141. spin_unlock_irqrestore(&cache->lock, flags);
  1142. list_for_each_entry_safe(mg, tmp, &list, list)
  1143. fn(mg);
  1144. }
  1145. static void __queue_quiesced_migration(struct dm_cache_migration *mg)
  1146. {
  1147. list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
  1148. }
  1149. static void queue_quiesced_migration(struct dm_cache_migration *mg)
  1150. {
  1151. unsigned long flags;
  1152. struct cache *cache = mg->cache;
  1153. spin_lock_irqsave(&cache->lock, flags);
  1154. __queue_quiesced_migration(mg);
  1155. spin_unlock_irqrestore(&cache->lock, flags);
  1156. wake_worker(cache);
  1157. }
  1158. static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
  1159. {
  1160. unsigned long flags;
  1161. struct dm_cache_migration *mg, *tmp;
  1162. spin_lock_irqsave(&cache->lock, flags);
  1163. list_for_each_entry_safe(mg, tmp, work, list)
  1164. __queue_quiesced_migration(mg);
  1165. spin_unlock_irqrestore(&cache->lock, flags);
  1166. wake_worker(cache);
  1167. }
  1168. static void check_for_quiesced_migrations(struct cache *cache,
  1169. struct per_bio_data *pb)
  1170. {
  1171. struct list_head work;
  1172. if (!pb->all_io_entry)
  1173. return;
  1174. INIT_LIST_HEAD(&work);
  1175. dm_deferred_entry_dec(pb->all_io_entry, &work);
  1176. if (!list_empty(&work))
  1177. queue_quiesced_migrations(cache, &work);
  1178. }
  1179. static void quiesce_migration(struct dm_cache_migration *mg)
  1180. {
  1181. if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
  1182. queue_quiesced_migration(mg);
  1183. }
  1184. static void promote(struct cache *cache, struct prealloc *structs,
  1185. dm_oblock_t oblock, dm_cblock_t cblock,
  1186. struct dm_bio_prison_cell *cell)
  1187. {
  1188. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1189. mg->err = false;
  1190. mg->discard = false;
  1191. mg->writeback = false;
  1192. mg->demote = false;
  1193. mg->promote = true;
  1194. mg->requeue_holder = true;
  1195. mg->invalidate = false;
  1196. mg->cache = cache;
  1197. mg->new_oblock = oblock;
  1198. mg->cblock = cblock;
  1199. mg->old_ocell = NULL;
  1200. mg->new_ocell = cell;
  1201. mg->start_jiffies = jiffies;
  1202. inc_io_migrations(cache);
  1203. quiesce_migration(mg);
  1204. }
  1205. static void writeback(struct cache *cache, struct prealloc *structs,
  1206. dm_oblock_t oblock, dm_cblock_t cblock,
  1207. struct dm_bio_prison_cell *cell)
  1208. {
  1209. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1210. mg->err = false;
  1211. mg->discard = false;
  1212. mg->writeback = true;
  1213. mg->demote = false;
  1214. mg->promote = false;
  1215. mg->requeue_holder = true;
  1216. mg->invalidate = false;
  1217. mg->cache = cache;
  1218. mg->old_oblock = oblock;
  1219. mg->cblock = cblock;
  1220. mg->old_ocell = cell;
  1221. mg->new_ocell = NULL;
  1222. mg->start_jiffies = jiffies;
  1223. inc_io_migrations(cache);
  1224. quiesce_migration(mg);
  1225. }
  1226. static void demote_then_promote(struct cache *cache, struct prealloc *structs,
  1227. dm_oblock_t old_oblock, dm_oblock_t new_oblock,
  1228. dm_cblock_t cblock,
  1229. struct dm_bio_prison_cell *old_ocell,
  1230. struct dm_bio_prison_cell *new_ocell)
  1231. {
  1232. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1233. mg->err = false;
  1234. mg->discard = false;
  1235. mg->writeback = false;
  1236. mg->demote = true;
  1237. mg->promote = true;
  1238. mg->requeue_holder = true;
  1239. mg->invalidate = false;
  1240. mg->cache = cache;
  1241. mg->old_oblock = old_oblock;
  1242. mg->new_oblock = new_oblock;
  1243. mg->cblock = cblock;
  1244. mg->old_ocell = old_ocell;
  1245. mg->new_ocell = new_ocell;
  1246. mg->start_jiffies = jiffies;
  1247. inc_io_migrations(cache);
  1248. quiesce_migration(mg);
  1249. }
  1250. /*
  1251. * Invalidate a cache entry. No writeback occurs; any changes in the cache
  1252. * block are thrown away.
  1253. */
  1254. static void invalidate(struct cache *cache, struct prealloc *structs,
  1255. dm_oblock_t oblock, dm_cblock_t cblock,
  1256. struct dm_bio_prison_cell *cell)
  1257. {
  1258. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1259. mg->err = false;
  1260. mg->discard = false;
  1261. mg->writeback = false;
  1262. mg->demote = true;
  1263. mg->promote = false;
  1264. mg->requeue_holder = true;
  1265. mg->invalidate = true;
  1266. mg->cache = cache;
  1267. mg->old_oblock = oblock;
  1268. mg->cblock = cblock;
  1269. mg->old_ocell = cell;
  1270. mg->new_ocell = NULL;
  1271. mg->start_jiffies = jiffies;
  1272. inc_io_migrations(cache);
  1273. quiesce_migration(mg);
  1274. }
  1275. static void discard(struct cache *cache, struct prealloc *structs,
  1276. struct dm_bio_prison_cell *cell)
  1277. {
  1278. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1279. mg->err = false;
  1280. mg->discard = true;
  1281. mg->writeback = false;
  1282. mg->demote = false;
  1283. mg->promote = false;
  1284. mg->requeue_holder = false;
  1285. mg->invalidate = false;
  1286. mg->cache = cache;
  1287. mg->old_ocell = NULL;
  1288. mg->new_ocell = cell;
  1289. mg->start_jiffies = jiffies;
  1290. quiesce_migration(mg);
  1291. }
  1292. /*----------------------------------------------------------------
  1293. * bio processing
  1294. *--------------------------------------------------------------*/
  1295. static void defer_bio(struct cache *cache, struct bio *bio)
  1296. {
  1297. unsigned long flags;
  1298. spin_lock_irqsave(&cache->lock, flags);
  1299. bio_list_add(&cache->deferred_bios, bio);
  1300. spin_unlock_irqrestore(&cache->lock, flags);
  1301. wake_worker(cache);
  1302. }
  1303. static void process_flush_bio(struct cache *cache, struct bio *bio)
  1304. {
  1305. size_t pb_data_size = get_per_bio_data_size(cache);
  1306. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1307. BUG_ON(bio->bi_iter.bi_size);
  1308. if (!pb->req_nr)
  1309. remap_to_origin(cache, bio);
  1310. else
  1311. remap_to_cache(cache, bio, 0);
  1312. /*
  1313. * REQ_PREFLUSH is not directed at any particular block so we don't
  1314. * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH
  1315. * by dm-core.
  1316. */
  1317. issue(cache, bio);
  1318. }
  1319. static void process_discard_bio(struct cache *cache, struct prealloc *structs,
  1320. struct bio *bio)
  1321. {
  1322. int r;
  1323. dm_dblock_t b, e;
  1324. struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
  1325. calc_discard_block_range(cache, bio, &b, &e);
  1326. if (b == e) {
  1327. bio_endio(bio);
  1328. return;
  1329. }
  1330. cell_prealloc = prealloc_get_cell(structs);
  1331. r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
  1332. (cell_free_fn) prealloc_put_cell,
  1333. structs, &new_ocell);
  1334. if (r > 0)
  1335. return;
  1336. discard(cache, structs, new_ocell);
  1337. }
  1338. static bool spare_migration_bandwidth(struct cache *cache)
  1339. {
  1340. sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
  1341. cache->sectors_per_block;
  1342. return current_volume < cache->migration_threshold;
  1343. }
  1344. static void inc_hit_counter(struct cache *cache, struct bio *bio)
  1345. {
  1346. atomic_inc(bio_data_dir(bio) == READ ?
  1347. &cache->stats.read_hit : &cache->stats.write_hit);
  1348. }
  1349. static void inc_miss_counter(struct cache *cache, struct bio *bio)
  1350. {
  1351. atomic_inc(bio_data_dir(bio) == READ ?
  1352. &cache->stats.read_miss : &cache->stats.write_miss);
  1353. }
  1354. /*----------------------------------------------------------------*/
  1355. struct inc_detail {
  1356. struct cache *cache;
  1357. struct bio_list bios_for_issue;
  1358. struct bio_list unhandled_bios;
  1359. bool any_writes;
  1360. };
  1361. static void inc_fn(void *context, struct dm_bio_prison_cell *cell)
  1362. {
  1363. struct bio *bio;
  1364. struct inc_detail *detail = context;
  1365. struct cache *cache = detail->cache;
  1366. inc_ds(cache, cell->holder, cell);
  1367. if (bio_data_dir(cell->holder) == WRITE)
  1368. detail->any_writes = true;
  1369. while ((bio = bio_list_pop(&cell->bios))) {
  1370. if (discard_or_flush(bio)) {
  1371. bio_list_add(&detail->unhandled_bios, bio);
  1372. continue;
  1373. }
  1374. if (bio_data_dir(bio) == WRITE)
  1375. detail->any_writes = true;
  1376. bio_list_add(&detail->bios_for_issue, bio);
  1377. inc_ds(cache, bio, cell);
  1378. }
  1379. }
  1380. // FIXME: refactor these two
  1381. static void remap_cell_to_origin_clear_discard(struct cache *cache,
  1382. struct dm_bio_prison_cell *cell,
  1383. dm_oblock_t oblock, bool issue_holder)
  1384. {
  1385. struct bio *bio;
  1386. unsigned long flags;
  1387. struct inc_detail detail;
  1388. detail.cache = cache;
  1389. bio_list_init(&detail.bios_for_issue);
  1390. bio_list_init(&detail.unhandled_bios);
  1391. detail.any_writes = false;
  1392. spin_lock_irqsave(&cache->lock, flags);
  1393. dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
  1394. bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
  1395. spin_unlock_irqrestore(&cache->lock, flags);
  1396. remap_to_origin(cache, cell->holder);
  1397. if (issue_holder)
  1398. issue(cache, cell->holder);
  1399. else
  1400. accounted_begin(cache, cell->holder);
  1401. if (detail.any_writes)
  1402. clear_discard(cache, oblock_to_dblock(cache, oblock));
  1403. while ((bio = bio_list_pop(&detail.bios_for_issue))) {
  1404. remap_to_origin(cache, bio);
  1405. issue(cache, bio);
  1406. }
  1407. free_prison_cell(cache, cell);
  1408. }
  1409. static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
  1410. dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder)
  1411. {
  1412. struct bio *bio;
  1413. unsigned long flags;
  1414. struct inc_detail detail;
  1415. detail.cache = cache;
  1416. bio_list_init(&detail.bios_for_issue);
  1417. bio_list_init(&detail.unhandled_bios);
  1418. detail.any_writes = false;
  1419. spin_lock_irqsave(&cache->lock, flags);
  1420. dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
  1421. bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
  1422. spin_unlock_irqrestore(&cache->lock, flags);
  1423. remap_to_cache(cache, cell->holder, cblock);
  1424. if (issue_holder)
  1425. issue(cache, cell->holder);
  1426. else
  1427. accounted_begin(cache, cell->holder);
  1428. if (detail.any_writes) {
  1429. set_dirty(cache, oblock, cblock);
  1430. clear_discard(cache, oblock_to_dblock(cache, oblock));
  1431. }
  1432. while ((bio = bio_list_pop(&detail.bios_for_issue))) {
  1433. remap_to_cache(cache, bio, cblock);
  1434. issue(cache, bio);
  1435. }
  1436. free_prison_cell(cache, cell);
  1437. }
  1438. /*----------------------------------------------------------------*/
  1439. struct old_oblock_lock {
  1440. struct policy_locker locker;
  1441. struct cache *cache;
  1442. struct prealloc *structs;
  1443. struct dm_bio_prison_cell *cell;
  1444. };
  1445. static int null_locker(struct policy_locker *locker, dm_oblock_t b)
  1446. {
  1447. /* This should never be called */
  1448. BUG();
  1449. return 0;
  1450. }
  1451. static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
  1452. {
  1453. struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
  1454. struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
  1455. return bio_detain(l->cache, b, NULL, cell_prealloc,
  1456. (cell_free_fn) prealloc_put_cell,
  1457. l->structs, &l->cell);
  1458. }
  1459. static void process_cell(struct cache *cache, struct prealloc *structs,
  1460. struct dm_bio_prison_cell *new_ocell)
  1461. {
  1462. int r;
  1463. bool release_cell = true;
  1464. struct bio *bio = new_ocell->holder;
  1465. dm_oblock_t block = get_bio_block(cache, bio);
  1466. struct policy_result lookup_result;
  1467. bool passthrough = passthrough_mode(&cache->features);
  1468. bool fast_promotion, can_migrate;
  1469. struct old_oblock_lock ool;
  1470. fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
  1471. can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache));
  1472. ool.locker.fn = cell_locker;
  1473. ool.cache = cache;
  1474. ool.structs = structs;
  1475. ool.cell = NULL;
  1476. r = policy_map(cache->policy, block, true, can_migrate, fast_promotion,
  1477. bio, &ool.locker, &lookup_result);
  1478. if (r == -EWOULDBLOCK)
  1479. /* migration has been denied */
  1480. lookup_result.op = POLICY_MISS;
  1481. switch (lookup_result.op) {
  1482. case POLICY_HIT:
  1483. if (passthrough) {
  1484. inc_miss_counter(cache, bio);
  1485. /*
  1486. * Passthrough always maps to the origin,
  1487. * invalidating any cache blocks that are written
  1488. * to.
  1489. */
  1490. if (bio_data_dir(bio) == WRITE) {
  1491. atomic_inc(&cache->stats.demotion);
  1492. invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
  1493. release_cell = false;
  1494. } else {
  1495. /* FIXME: factor out issue_origin() */
  1496. remap_to_origin_clear_discard(cache, bio, block);
  1497. inc_and_issue(cache, bio, new_ocell);
  1498. }
  1499. } else {
  1500. inc_hit_counter(cache, bio);
  1501. if (bio_data_dir(bio) == WRITE &&
  1502. writethrough_mode(&cache->features) &&
  1503. !is_dirty(cache, lookup_result.cblock)) {
  1504. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  1505. inc_and_issue(cache, bio, new_ocell);
  1506. } else {
  1507. remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true);
  1508. release_cell = false;
  1509. }
  1510. }
  1511. break;
  1512. case POLICY_MISS:
  1513. inc_miss_counter(cache, bio);
  1514. remap_cell_to_origin_clear_discard(cache, new_ocell, block, true);
  1515. release_cell = false;
  1516. break;
  1517. case POLICY_NEW:
  1518. atomic_inc(&cache->stats.promotion);
  1519. promote(cache, structs, block, lookup_result.cblock, new_ocell);
  1520. release_cell = false;
  1521. break;
  1522. case POLICY_REPLACE:
  1523. atomic_inc(&cache->stats.demotion);
  1524. atomic_inc(&cache->stats.promotion);
  1525. demote_then_promote(cache, structs, lookup_result.old_oblock,
  1526. block, lookup_result.cblock,
  1527. ool.cell, new_ocell);
  1528. release_cell = false;
  1529. break;
  1530. default:
  1531. DMERR_LIMIT("%s: %s: erroring bio, unknown policy op: %u",
  1532. cache_device_name(cache), __func__,
  1533. (unsigned) lookup_result.op);
  1534. bio_io_error(bio);
  1535. }
  1536. if (release_cell)
  1537. cell_defer(cache, new_ocell, false);
  1538. }
  1539. static void process_bio(struct cache *cache, struct prealloc *structs,
  1540. struct bio *bio)
  1541. {
  1542. int r;
  1543. dm_oblock_t block = get_bio_block(cache, bio);
  1544. struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
  1545. /*
  1546. * Check to see if that block is currently migrating.
  1547. */
  1548. cell_prealloc = prealloc_get_cell(structs);
  1549. r = bio_detain(cache, block, bio, cell_prealloc,
  1550. (cell_free_fn) prealloc_put_cell,
  1551. structs, &new_ocell);
  1552. if (r > 0)
  1553. return;
  1554. process_cell(cache, structs, new_ocell);
  1555. }
  1556. static int need_commit_due_to_time(struct cache *cache)
  1557. {
  1558. return jiffies < cache->last_commit_jiffies ||
  1559. jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
  1560. }
  1561. /*
  1562. * A non-zero return indicates read_only or fail_io mode.
  1563. */
  1564. static int commit(struct cache *cache, bool clean_shutdown)
  1565. {
  1566. int r;
  1567. if (get_cache_mode(cache) >= CM_READ_ONLY)
  1568. return -EINVAL;
  1569. atomic_inc(&cache->stats.commit_count);
  1570. r = dm_cache_commit(cache->cmd, clean_shutdown);
  1571. if (r)
  1572. metadata_operation_failed(cache, "dm_cache_commit", r);
  1573. return r;
  1574. }
  1575. static int commit_if_needed(struct cache *cache)
  1576. {
  1577. int r = 0;
  1578. if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
  1579. dm_cache_changed_this_transaction(cache->cmd)) {
  1580. r = commit(cache, false);
  1581. cache->commit_requested = false;
  1582. cache->last_commit_jiffies = jiffies;
  1583. }
  1584. return r;
  1585. }
  1586. static void process_deferred_bios(struct cache *cache)
  1587. {
  1588. bool prealloc_used = false;
  1589. unsigned long flags;
  1590. struct bio_list bios;
  1591. struct bio *bio;
  1592. struct prealloc structs;
  1593. memset(&structs, 0, sizeof(structs));
  1594. bio_list_init(&bios);
  1595. spin_lock_irqsave(&cache->lock, flags);
  1596. bio_list_merge(&bios, &cache->deferred_bios);
  1597. bio_list_init(&cache->deferred_bios);
  1598. spin_unlock_irqrestore(&cache->lock, flags);
  1599. while (!bio_list_empty(&bios)) {
  1600. /*
  1601. * If we've got no free migration structs, and processing
  1602. * this bio might require one, we pause until there are some
  1603. * prepared mappings to process.
  1604. */
  1605. prealloc_used = true;
  1606. if (prealloc_data_structs(cache, &structs)) {
  1607. spin_lock_irqsave(&cache->lock, flags);
  1608. bio_list_merge(&cache->deferred_bios, &bios);
  1609. spin_unlock_irqrestore(&cache->lock, flags);
  1610. break;
  1611. }
  1612. bio = bio_list_pop(&bios);
  1613. if (bio->bi_opf & REQ_PREFLUSH)
  1614. process_flush_bio(cache, bio);
  1615. else if (bio_op(bio) == REQ_OP_DISCARD)
  1616. process_discard_bio(cache, &structs, bio);
  1617. else
  1618. process_bio(cache, &structs, bio);
  1619. }
  1620. if (prealloc_used)
  1621. prealloc_free_structs(cache, &structs);
  1622. }
  1623. static void process_deferred_cells(struct cache *cache)
  1624. {
  1625. bool prealloc_used = false;
  1626. unsigned long flags;
  1627. struct dm_bio_prison_cell *cell, *tmp;
  1628. struct list_head cells;
  1629. struct prealloc structs;
  1630. memset(&structs, 0, sizeof(structs));
  1631. INIT_LIST_HEAD(&cells);
  1632. spin_lock_irqsave(&cache->lock, flags);
  1633. list_splice_init(&cache->deferred_cells, &cells);
  1634. spin_unlock_irqrestore(&cache->lock, flags);
  1635. list_for_each_entry_safe(cell, tmp, &cells, user_list) {
  1636. /*
  1637. * If we've got no free migration structs, and processing
  1638. * this bio might require one, we pause until there are some
  1639. * prepared mappings to process.
  1640. */
  1641. prealloc_used = true;
  1642. if (prealloc_data_structs(cache, &structs)) {
  1643. spin_lock_irqsave(&cache->lock, flags);
  1644. list_splice(&cells, &cache->deferred_cells);
  1645. spin_unlock_irqrestore(&cache->lock, flags);
  1646. break;
  1647. }
  1648. process_cell(cache, &structs, cell);
  1649. }
  1650. if (prealloc_used)
  1651. prealloc_free_structs(cache, &structs);
  1652. }
  1653. static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
  1654. {
  1655. unsigned long flags;
  1656. struct bio_list bios;
  1657. struct bio *bio;
  1658. bio_list_init(&bios);
  1659. spin_lock_irqsave(&cache->lock, flags);
  1660. bio_list_merge(&bios, &cache->deferred_flush_bios);
  1661. bio_list_init(&cache->deferred_flush_bios);
  1662. spin_unlock_irqrestore(&cache->lock, flags);
  1663. /*
  1664. * These bios have already been through inc_ds()
  1665. */
  1666. while ((bio = bio_list_pop(&bios)))
  1667. submit_bios ? accounted_request(cache, bio) : bio_io_error(bio);
  1668. }
  1669. static void process_deferred_writethrough_bios(struct cache *cache)
  1670. {
  1671. unsigned long flags;
  1672. struct bio_list bios;
  1673. struct bio *bio;
  1674. bio_list_init(&bios);
  1675. spin_lock_irqsave(&cache->lock, flags);
  1676. bio_list_merge(&bios, &cache->deferred_writethrough_bios);
  1677. bio_list_init(&cache->deferred_writethrough_bios);
  1678. spin_unlock_irqrestore(&cache->lock, flags);
  1679. /*
  1680. * These bios have already been through inc_ds()
  1681. */
  1682. while ((bio = bio_list_pop(&bios)))
  1683. accounted_request(cache, bio);
  1684. }
  1685. static void writeback_some_dirty_blocks(struct cache *cache)
  1686. {
  1687. bool prealloc_used = false;
  1688. dm_oblock_t oblock;
  1689. dm_cblock_t cblock;
  1690. struct prealloc structs;
  1691. struct dm_bio_prison_cell *old_ocell;
  1692. bool busy = !iot_idle_for(&cache->origin_tracker, HZ);
  1693. memset(&structs, 0, sizeof(structs));
  1694. while (spare_migration_bandwidth(cache)) {
  1695. if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
  1696. break; /* no work to do */
  1697. prealloc_used = true;
  1698. if (prealloc_data_structs(cache, &structs) ||
  1699. get_cell(cache, oblock, &structs, &old_ocell)) {
  1700. policy_set_dirty(cache->policy, oblock);
  1701. break;
  1702. }
  1703. writeback(cache, &structs, oblock, cblock, old_ocell);
  1704. }
  1705. if (prealloc_used)
  1706. prealloc_free_structs(cache, &structs);
  1707. }
  1708. /*----------------------------------------------------------------
  1709. * Invalidations.
  1710. * Dropping something from the cache *without* writing back.
  1711. *--------------------------------------------------------------*/
  1712. static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
  1713. {
  1714. int r = 0;
  1715. uint64_t begin = from_cblock(req->cblocks->begin);
  1716. uint64_t end = from_cblock(req->cblocks->end);
  1717. while (begin != end) {
  1718. r = policy_remove_cblock(cache->policy, to_cblock(begin));
  1719. if (!r) {
  1720. r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
  1721. if (r) {
  1722. metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
  1723. break;
  1724. }
  1725. } else if (r == -ENODATA) {
  1726. /* harmless, already unmapped */
  1727. r = 0;
  1728. } else {
  1729. DMERR("%s: policy_remove_cblock failed", cache_device_name(cache));
  1730. break;
  1731. }
  1732. begin++;
  1733. }
  1734. cache->commit_requested = true;
  1735. req->err = r;
  1736. atomic_set(&req->complete, 1);
  1737. wake_up(&req->result_wait);
  1738. }
  1739. static void process_invalidation_requests(struct cache *cache)
  1740. {
  1741. struct list_head list;
  1742. struct invalidation_request *req, *tmp;
  1743. INIT_LIST_HEAD(&list);
  1744. spin_lock(&cache->invalidation_lock);
  1745. list_splice_init(&cache->invalidation_requests, &list);
  1746. spin_unlock(&cache->invalidation_lock);
  1747. list_for_each_entry_safe (req, tmp, &list, list)
  1748. process_invalidation_request(cache, req);
  1749. }
  1750. /*----------------------------------------------------------------
  1751. * Main worker loop
  1752. *--------------------------------------------------------------*/
  1753. static bool is_quiescing(struct cache *cache)
  1754. {
  1755. return atomic_read(&cache->quiescing);
  1756. }
  1757. static void ack_quiescing(struct cache *cache)
  1758. {
  1759. if (is_quiescing(cache)) {
  1760. atomic_inc(&cache->quiescing_ack);
  1761. wake_up(&cache->quiescing_wait);
  1762. }
  1763. }
  1764. static void wait_for_quiescing_ack(struct cache *cache)
  1765. {
  1766. wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
  1767. }
  1768. static void start_quiescing(struct cache *cache)
  1769. {
  1770. atomic_inc(&cache->quiescing);
  1771. wait_for_quiescing_ack(cache);
  1772. }
  1773. static void stop_quiescing(struct cache *cache)
  1774. {
  1775. atomic_set(&cache->quiescing, 0);
  1776. atomic_set(&cache->quiescing_ack, 0);
  1777. }
  1778. static void wait_for_migrations(struct cache *cache)
  1779. {
  1780. wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
  1781. }
  1782. static void stop_worker(struct cache *cache)
  1783. {
  1784. cancel_delayed_work(&cache->waker);
  1785. flush_workqueue(cache->wq);
  1786. }
  1787. static void requeue_deferred_cells(struct cache *cache)
  1788. {
  1789. unsigned long flags;
  1790. struct list_head cells;
  1791. struct dm_bio_prison_cell *cell, *tmp;
  1792. INIT_LIST_HEAD(&cells);
  1793. spin_lock_irqsave(&cache->lock, flags);
  1794. list_splice_init(&cache->deferred_cells, &cells);
  1795. spin_unlock_irqrestore(&cache->lock, flags);
  1796. list_for_each_entry_safe(cell, tmp, &cells, user_list)
  1797. cell_requeue(cache, cell);
  1798. }
  1799. static void requeue_deferred_bios(struct cache *cache)
  1800. {
  1801. struct bio *bio;
  1802. struct bio_list bios;
  1803. bio_list_init(&bios);
  1804. bio_list_merge(&bios, &cache->deferred_bios);
  1805. bio_list_init(&cache->deferred_bios);
  1806. while ((bio = bio_list_pop(&bios))) {
  1807. bio->bi_error = DM_ENDIO_REQUEUE;
  1808. bio_endio(bio);
  1809. }
  1810. }
  1811. static int more_work(struct cache *cache)
  1812. {
  1813. if (is_quiescing(cache))
  1814. return !list_empty(&cache->quiesced_migrations) ||
  1815. !list_empty(&cache->completed_migrations) ||
  1816. !list_empty(&cache->need_commit_migrations);
  1817. else
  1818. return !bio_list_empty(&cache->deferred_bios) ||
  1819. !list_empty(&cache->deferred_cells) ||
  1820. !bio_list_empty(&cache->deferred_flush_bios) ||
  1821. !bio_list_empty(&cache->deferred_writethrough_bios) ||
  1822. !list_empty(&cache->quiesced_migrations) ||
  1823. !list_empty(&cache->completed_migrations) ||
  1824. !list_empty(&cache->need_commit_migrations) ||
  1825. cache->invalidate;
  1826. }
  1827. static void do_worker(struct work_struct *ws)
  1828. {
  1829. struct cache *cache = container_of(ws, struct cache, worker);
  1830. do {
  1831. if (!is_quiescing(cache)) {
  1832. writeback_some_dirty_blocks(cache);
  1833. process_deferred_writethrough_bios(cache);
  1834. process_deferred_bios(cache);
  1835. process_deferred_cells(cache);
  1836. process_invalidation_requests(cache);
  1837. }
  1838. process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard);
  1839. process_migrations(cache, &cache->completed_migrations, complete_migration);
  1840. if (commit_if_needed(cache)) {
  1841. process_deferred_flush_bios(cache, false);
  1842. process_migrations(cache, &cache->need_commit_migrations, migration_failure);
  1843. } else {
  1844. process_deferred_flush_bios(cache, true);
  1845. process_migrations(cache, &cache->need_commit_migrations,
  1846. migration_success_post_commit);
  1847. }
  1848. ack_quiescing(cache);
  1849. } while (more_work(cache));
  1850. }
  1851. /*
  1852. * We want to commit periodically so that not too much
  1853. * unwritten metadata builds up.
  1854. */
  1855. static void do_waker(struct work_struct *ws)
  1856. {
  1857. struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
  1858. policy_tick(cache->policy, true);
  1859. wake_worker(cache);
  1860. queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
  1861. }
  1862. /*----------------------------------------------------------------*/
  1863. static int is_congested(struct dm_dev *dev, int bdi_bits)
  1864. {
  1865. struct request_queue *q = bdev_get_queue(dev->bdev);
  1866. return bdi_congested(q->backing_dev_info, bdi_bits);
  1867. }
  1868. static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
  1869. {
  1870. struct cache *cache = container_of(cb, struct cache, callbacks);
  1871. return is_congested(cache->origin_dev, bdi_bits) ||
  1872. is_congested(cache->cache_dev, bdi_bits);
  1873. }
  1874. /*----------------------------------------------------------------
  1875. * Target methods
  1876. *--------------------------------------------------------------*/
  1877. /*
  1878. * This function gets called on the error paths of the constructor, so we
  1879. * have to cope with a partially initialised struct.
  1880. */
  1881. static void destroy(struct cache *cache)
  1882. {
  1883. unsigned i;
  1884. mempool_destroy(cache->migration_pool);
  1885. if (cache->all_io_ds)
  1886. dm_deferred_set_destroy(cache->all_io_ds);
  1887. if (cache->prison)
  1888. dm_bio_prison_destroy(cache->prison);
  1889. if (cache->wq)
  1890. destroy_workqueue(cache->wq);
  1891. if (cache->dirty_bitset)
  1892. free_bitset(cache->dirty_bitset);
  1893. if (cache->discard_bitset)
  1894. free_bitset(cache->discard_bitset);
  1895. if (cache->copier)
  1896. dm_kcopyd_client_destroy(cache->copier);
  1897. if (cache->cmd)
  1898. dm_cache_metadata_close(cache->cmd);
  1899. if (cache->metadata_dev)
  1900. dm_put_device(cache->ti, cache->metadata_dev);
  1901. if (cache->origin_dev)
  1902. dm_put_device(cache->ti, cache->origin_dev);
  1903. if (cache->cache_dev)
  1904. dm_put_device(cache->ti, cache->cache_dev);
  1905. if (cache->policy)
  1906. dm_cache_policy_destroy(cache->policy);
  1907. for (i = 0; i < cache->nr_ctr_args ; i++)
  1908. kfree(cache->ctr_args[i]);
  1909. kfree(cache->ctr_args);
  1910. kfree(cache);
  1911. }
  1912. static void cache_dtr(struct dm_target *ti)
  1913. {
  1914. struct cache *cache = ti->private;
  1915. destroy(cache);
  1916. }
  1917. static sector_t get_dev_size(struct dm_dev *dev)
  1918. {
  1919. return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
  1920. }
  1921. /*----------------------------------------------------------------*/
  1922. /*
  1923. * Construct a cache device mapping.
  1924. *
  1925. * cache <metadata dev> <cache dev> <origin dev> <block size>
  1926. * <#feature args> [<feature arg>]*
  1927. * <policy> <#policy args> [<policy arg>]*
  1928. *
  1929. * metadata dev : fast device holding the persistent metadata
  1930. * cache dev : fast device holding cached data blocks
  1931. * origin dev : slow device holding original data blocks
  1932. * block size : cache unit size in sectors
  1933. *
  1934. * #feature args : number of feature arguments passed
  1935. * feature args : writethrough. (The default is writeback.)
  1936. *
  1937. * policy : the replacement policy to use
  1938. * #policy args : an even number of policy arguments corresponding
  1939. * to key/value pairs passed to the policy
  1940. * policy args : key/value pairs passed to the policy
  1941. * E.g. 'sequential_threshold 1024'
  1942. * See cache-policies.txt for details.
  1943. *
  1944. * Optional feature arguments are:
  1945. * writethrough : write through caching that prohibits cache block
  1946. * content from being different from origin block content.
  1947. * Without this argument, the default behaviour is to write
  1948. * back cache block contents later for performance reasons,
  1949. * so they may differ from the corresponding origin blocks.
  1950. */
  1951. struct cache_args {
  1952. struct dm_target *ti;
  1953. struct dm_dev *metadata_dev;
  1954. struct dm_dev *cache_dev;
  1955. sector_t cache_sectors;
  1956. struct dm_dev *origin_dev;
  1957. sector_t origin_sectors;
  1958. uint32_t block_size;
  1959. const char *policy_name;
  1960. int policy_argc;
  1961. const char **policy_argv;
  1962. struct cache_features features;
  1963. };
  1964. static void destroy_cache_args(struct cache_args *ca)
  1965. {
  1966. if (ca->metadata_dev)
  1967. dm_put_device(ca->ti, ca->metadata_dev);
  1968. if (ca->cache_dev)
  1969. dm_put_device(ca->ti, ca->cache_dev);
  1970. if (ca->origin_dev)
  1971. dm_put_device(ca->ti, ca->origin_dev);
  1972. kfree(ca);
  1973. }
  1974. static bool at_least_one_arg(struct dm_arg_set *as, char **error)
  1975. {
  1976. if (!as->argc) {
  1977. *error = "Insufficient args";
  1978. return false;
  1979. }
  1980. return true;
  1981. }
  1982. static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
  1983. char **error)
  1984. {
  1985. int r;
  1986. sector_t metadata_dev_size;
  1987. char b[BDEVNAME_SIZE];
  1988. if (!at_least_one_arg(as, error))
  1989. return -EINVAL;
  1990. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1991. &ca->metadata_dev);
  1992. if (r) {
  1993. *error = "Error opening metadata device";
  1994. return r;
  1995. }
  1996. metadata_dev_size = get_dev_size(ca->metadata_dev);
  1997. if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
  1998. DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
  1999. bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
  2000. return 0;
  2001. }
  2002. static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
  2003. char **error)
  2004. {
  2005. int r;
  2006. if (!at_least_one_arg(as, error))
  2007. return -EINVAL;
  2008. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  2009. &ca->cache_dev);
  2010. if (r) {
  2011. *error = "Error opening cache device";
  2012. return r;
  2013. }
  2014. ca->cache_sectors = get_dev_size(ca->cache_dev);
  2015. return 0;
  2016. }
  2017. static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
  2018. char **error)
  2019. {
  2020. int r;
  2021. if (!at_least_one_arg(as, error))
  2022. return -EINVAL;
  2023. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  2024. &ca->origin_dev);
  2025. if (r) {
  2026. *error = "Error opening origin device";
  2027. return r;
  2028. }
  2029. ca->origin_sectors = get_dev_size(ca->origin_dev);
  2030. if (ca->ti->len > ca->origin_sectors) {
  2031. *error = "Device size larger than cached device";
  2032. return -EINVAL;
  2033. }
  2034. return 0;
  2035. }
  2036. static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
  2037. char **error)
  2038. {
  2039. unsigned long block_size;
  2040. if (!at_least_one_arg(as, error))
  2041. return -EINVAL;
  2042. if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
  2043. block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
  2044. block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
  2045. block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
  2046. *error = "Invalid data block size";
  2047. return -EINVAL;
  2048. }
  2049. if (block_size > ca->cache_sectors) {
  2050. *error = "Data block size is larger than the cache device";
  2051. return -EINVAL;
  2052. }
  2053. ca->block_size = block_size;
  2054. return 0;
  2055. }
  2056. static void init_features(struct cache_features *cf)
  2057. {
  2058. cf->mode = CM_WRITE;
  2059. cf->io_mode = CM_IO_WRITEBACK;
  2060. cf->metadata_version = 1;
  2061. }
  2062. static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
  2063. char **error)
  2064. {
  2065. static struct dm_arg _args[] = {
  2066. {0, 2, "Invalid number of cache feature arguments"},
  2067. };
  2068. int r;
  2069. unsigned argc;
  2070. const char *arg;
  2071. struct cache_features *cf = &ca->features;
  2072. init_features(cf);
  2073. r = dm_read_arg_group(_args, as, &argc, error);
  2074. if (r)
  2075. return -EINVAL;
  2076. while (argc--) {
  2077. arg = dm_shift_arg(as);
  2078. if (!strcasecmp(arg, "writeback"))
  2079. cf->io_mode = CM_IO_WRITEBACK;
  2080. else if (!strcasecmp(arg, "writethrough"))
  2081. cf->io_mode = CM_IO_WRITETHROUGH;
  2082. else if (!strcasecmp(arg, "passthrough"))
  2083. cf->io_mode = CM_IO_PASSTHROUGH;
  2084. else if (!strcasecmp(arg, "metadata2"))
  2085. cf->metadata_version = 2;
  2086. else {
  2087. *error = "Unrecognised cache feature requested";
  2088. return -EINVAL;
  2089. }
  2090. }
  2091. return 0;
  2092. }
  2093. static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
  2094. char **error)
  2095. {
  2096. static struct dm_arg _args[] = {
  2097. {0, 1024, "Invalid number of policy arguments"},
  2098. };
  2099. int r;
  2100. if (!at_least_one_arg(as, error))
  2101. return -EINVAL;
  2102. ca->policy_name = dm_shift_arg(as);
  2103. r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
  2104. if (r)
  2105. return -EINVAL;
  2106. ca->policy_argv = (const char **)as->argv;
  2107. dm_consume_args(as, ca->policy_argc);
  2108. return 0;
  2109. }
  2110. static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
  2111. char **error)
  2112. {
  2113. int r;
  2114. struct dm_arg_set as;
  2115. as.argc = argc;
  2116. as.argv = argv;
  2117. r = parse_metadata_dev(ca, &as, error);
  2118. if (r)
  2119. return r;
  2120. r = parse_cache_dev(ca, &as, error);
  2121. if (r)
  2122. return r;
  2123. r = parse_origin_dev(ca, &as, error);
  2124. if (r)
  2125. return r;
  2126. r = parse_block_size(ca, &as, error);
  2127. if (r)
  2128. return r;
  2129. r = parse_features(ca, &as, error);
  2130. if (r)
  2131. return r;
  2132. r = parse_policy(ca, &as, error);
  2133. if (r)
  2134. return r;
  2135. return 0;
  2136. }
  2137. /*----------------------------------------------------------------*/
  2138. static struct kmem_cache *migration_cache;
  2139. #define NOT_CORE_OPTION 1
  2140. static int process_config_option(struct cache *cache, const char *key, const char *value)
  2141. {
  2142. unsigned long tmp;
  2143. if (!strcasecmp(key, "migration_threshold")) {
  2144. if (kstrtoul(value, 10, &tmp))
  2145. return -EINVAL;
  2146. cache->migration_threshold = tmp;
  2147. return 0;
  2148. }
  2149. return NOT_CORE_OPTION;
  2150. }
  2151. static int set_config_value(struct cache *cache, const char *key, const char *value)
  2152. {
  2153. int r = process_config_option(cache, key, value);
  2154. if (r == NOT_CORE_OPTION)
  2155. r = policy_set_config_value(cache->policy, key, value);
  2156. if (r)
  2157. DMWARN("bad config value for %s: %s", key, value);
  2158. return r;
  2159. }
  2160. static int set_config_values(struct cache *cache, int argc, const char **argv)
  2161. {
  2162. int r = 0;
  2163. if (argc & 1) {
  2164. DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
  2165. return -EINVAL;
  2166. }
  2167. while (argc) {
  2168. r = set_config_value(cache, argv[0], argv[1]);
  2169. if (r)
  2170. break;
  2171. argc -= 2;
  2172. argv += 2;
  2173. }
  2174. return r;
  2175. }
  2176. static int create_cache_policy(struct cache *cache, struct cache_args *ca,
  2177. char **error)
  2178. {
  2179. struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
  2180. cache->cache_size,
  2181. cache->origin_sectors,
  2182. cache->sectors_per_block);
  2183. if (IS_ERR(p)) {
  2184. *error = "Error creating cache's policy";
  2185. return PTR_ERR(p);
  2186. }
  2187. cache->policy = p;
  2188. return 0;
  2189. }
  2190. /*
  2191. * We want the discard block size to be at least the size of the cache
  2192. * block size and have no more than 2^14 discard blocks across the origin.
  2193. */
  2194. #define MAX_DISCARD_BLOCKS (1 << 14)
  2195. static bool too_many_discard_blocks(sector_t discard_block_size,
  2196. sector_t origin_size)
  2197. {
  2198. (void) sector_div(origin_size, discard_block_size);
  2199. return origin_size > MAX_DISCARD_BLOCKS;
  2200. }
  2201. static sector_t calculate_discard_block_size(sector_t cache_block_size,
  2202. sector_t origin_size)
  2203. {
  2204. sector_t discard_block_size = cache_block_size;
  2205. if (origin_size)
  2206. while (too_many_discard_blocks(discard_block_size, origin_size))
  2207. discard_block_size *= 2;
  2208. return discard_block_size;
  2209. }
  2210. static void set_cache_size(struct cache *cache, dm_cblock_t size)
  2211. {
  2212. dm_block_t nr_blocks = from_cblock(size);
  2213. if (nr_blocks > (1 << 20) && cache->cache_size != size)
  2214. DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
  2215. "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
  2216. "Please consider increasing the cache block size to reduce the overall cache block count.",
  2217. (unsigned long long) nr_blocks);
  2218. cache->cache_size = size;
  2219. }
  2220. #define DEFAULT_MIGRATION_THRESHOLD 2048
  2221. static int cache_create(struct cache_args *ca, struct cache **result)
  2222. {
  2223. int r = 0;
  2224. char **error = &ca->ti->error;
  2225. struct cache *cache;
  2226. struct dm_target *ti = ca->ti;
  2227. dm_block_t origin_blocks;
  2228. struct dm_cache_metadata *cmd;
  2229. bool may_format = ca->features.mode == CM_WRITE;
  2230. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  2231. if (!cache)
  2232. return -ENOMEM;
  2233. cache->ti = ca->ti;
  2234. ti->private = cache;
  2235. ti->num_flush_bios = 2;
  2236. ti->flush_supported = true;
  2237. ti->num_discard_bios = 1;
  2238. ti->discards_supported = true;
  2239. ti->discard_zeroes_data_unsupported = true;
  2240. ti->split_discard_bios = false;
  2241. cache->features = ca->features;
  2242. ti->per_io_data_size = get_per_bio_data_size(cache);
  2243. cache->callbacks.congested_fn = cache_is_congested;
  2244. dm_table_add_target_callbacks(ti->table, &cache->callbacks);
  2245. cache->metadata_dev = ca->metadata_dev;
  2246. cache->origin_dev = ca->origin_dev;
  2247. cache->cache_dev = ca->cache_dev;
  2248. ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
  2249. /* FIXME: factor out this whole section */
  2250. origin_blocks = cache->origin_sectors = ca->origin_sectors;
  2251. origin_blocks = block_div(origin_blocks, ca->block_size);
  2252. cache->origin_blocks = to_oblock(origin_blocks);
  2253. cache->sectors_per_block = ca->block_size;
  2254. if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
  2255. r = -EINVAL;
  2256. goto bad;
  2257. }
  2258. if (ca->block_size & (ca->block_size - 1)) {
  2259. dm_block_t cache_size = ca->cache_sectors;
  2260. cache->sectors_per_block_shift = -1;
  2261. cache_size = block_div(cache_size, ca->block_size);
  2262. set_cache_size(cache, to_cblock(cache_size));
  2263. } else {
  2264. cache->sectors_per_block_shift = __ffs(ca->block_size);
  2265. set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
  2266. }
  2267. r = create_cache_policy(cache, ca, error);
  2268. if (r)
  2269. goto bad;
  2270. cache->policy_nr_args = ca->policy_argc;
  2271. cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
  2272. r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
  2273. if (r) {
  2274. *error = "Error setting cache policy's config values";
  2275. goto bad;
  2276. }
  2277. cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
  2278. ca->block_size, may_format,
  2279. dm_cache_policy_get_hint_size(cache->policy),
  2280. ca->features.metadata_version);
  2281. if (IS_ERR(cmd)) {
  2282. *error = "Error creating metadata object";
  2283. r = PTR_ERR(cmd);
  2284. goto bad;
  2285. }
  2286. cache->cmd = cmd;
  2287. set_cache_mode(cache, CM_WRITE);
  2288. if (get_cache_mode(cache) != CM_WRITE) {
  2289. *error = "Unable to get write access to metadata, please check/repair metadata.";
  2290. r = -EINVAL;
  2291. goto bad;
  2292. }
  2293. if (passthrough_mode(&cache->features)) {
  2294. bool all_clean;
  2295. r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
  2296. if (r) {
  2297. *error = "dm_cache_metadata_all_clean() failed";
  2298. goto bad;
  2299. }
  2300. if (!all_clean) {
  2301. *error = "Cannot enter passthrough mode unless all blocks are clean";
  2302. r = -EINVAL;
  2303. goto bad;
  2304. }
  2305. }
  2306. spin_lock_init(&cache->lock);
  2307. INIT_LIST_HEAD(&cache->deferred_cells);
  2308. bio_list_init(&cache->deferred_bios);
  2309. bio_list_init(&cache->deferred_flush_bios);
  2310. bio_list_init(&cache->deferred_writethrough_bios);
  2311. INIT_LIST_HEAD(&cache->quiesced_migrations);
  2312. INIT_LIST_HEAD(&cache->completed_migrations);
  2313. INIT_LIST_HEAD(&cache->need_commit_migrations);
  2314. atomic_set(&cache->nr_allocated_migrations, 0);
  2315. atomic_set(&cache->nr_io_migrations, 0);
  2316. init_waitqueue_head(&cache->migration_wait);
  2317. init_waitqueue_head(&cache->quiescing_wait);
  2318. atomic_set(&cache->quiescing, 0);
  2319. atomic_set(&cache->quiescing_ack, 0);
  2320. r = -ENOMEM;
  2321. atomic_set(&cache->nr_dirty, 0);
  2322. cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
  2323. if (!cache->dirty_bitset) {
  2324. *error = "could not allocate dirty bitset";
  2325. goto bad;
  2326. }
  2327. clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
  2328. cache->discard_block_size =
  2329. calculate_discard_block_size(cache->sectors_per_block,
  2330. cache->origin_sectors);
  2331. cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
  2332. cache->discard_block_size));
  2333. cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
  2334. if (!cache->discard_bitset) {
  2335. *error = "could not allocate discard bitset";
  2336. goto bad;
  2337. }
  2338. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  2339. cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  2340. if (IS_ERR(cache->copier)) {
  2341. *error = "could not create kcopyd client";
  2342. r = PTR_ERR(cache->copier);
  2343. goto bad;
  2344. }
  2345. cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
  2346. if (!cache->wq) {
  2347. *error = "could not create workqueue for metadata object";
  2348. goto bad;
  2349. }
  2350. INIT_WORK(&cache->worker, do_worker);
  2351. INIT_DELAYED_WORK(&cache->waker, do_waker);
  2352. cache->last_commit_jiffies = jiffies;
  2353. cache->prison = dm_bio_prison_create();
  2354. if (!cache->prison) {
  2355. *error = "could not create bio prison";
  2356. goto bad;
  2357. }
  2358. cache->all_io_ds = dm_deferred_set_create();
  2359. if (!cache->all_io_ds) {
  2360. *error = "could not create all_io deferred set";
  2361. goto bad;
  2362. }
  2363. cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
  2364. migration_cache);
  2365. if (!cache->migration_pool) {
  2366. *error = "Error creating cache's migration mempool";
  2367. goto bad;
  2368. }
  2369. cache->need_tick_bio = true;
  2370. cache->sized = false;
  2371. cache->invalidate = false;
  2372. cache->commit_requested = false;
  2373. cache->loaded_mappings = false;
  2374. cache->loaded_discards = false;
  2375. load_stats(cache);
  2376. atomic_set(&cache->stats.demotion, 0);
  2377. atomic_set(&cache->stats.promotion, 0);
  2378. atomic_set(&cache->stats.copies_avoided, 0);
  2379. atomic_set(&cache->stats.cache_cell_clash, 0);
  2380. atomic_set(&cache->stats.commit_count, 0);
  2381. atomic_set(&cache->stats.discard_count, 0);
  2382. spin_lock_init(&cache->invalidation_lock);
  2383. INIT_LIST_HEAD(&cache->invalidation_requests);
  2384. iot_init(&cache->origin_tracker);
  2385. *result = cache;
  2386. return 0;
  2387. bad:
  2388. destroy(cache);
  2389. return r;
  2390. }
  2391. static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
  2392. {
  2393. unsigned i;
  2394. const char **copy;
  2395. copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
  2396. if (!copy)
  2397. return -ENOMEM;
  2398. for (i = 0; i < argc; i++) {
  2399. copy[i] = kstrdup(argv[i], GFP_KERNEL);
  2400. if (!copy[i]) {
  2401. while (i--)
  2402. kfree(copy[i]);
  2403. kfree(copy);
  2404. return -ENOMEM;
  2405. }
  2406. }
  2407. cache->nr_ctr_args = argc;
  2408. cache->ctr_args = copy;
  2409. return 0;
  2410. }
  2411. static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
  2412. {
  2413. int r = -EINVAL;
  2414. struct cache_args *ca;
  2415. struct cache *cache = NULL;
  2416. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  2417. if (!ca) {
  2418. ti->error = "Error allocating memory for cache";
  2419. return -ENOMEM;
  2420. }
  2421. ca->ti = ti;
  2422. r = parse_cache_args(ca, argc, argv, &ti->error);
  2423. if (r)
  2424. goto out;
  2425. r = cache_create(ca, &cache);
  2426. if (r)
  2427. goto out;
  2428. r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
  2429. if (r) {
  2430. destroy(cache);
  2431. goto out;
  2432. }
  2433. ti->private = cache;
  2434. out:
  2435. destroy_cache_args(ca);
  2436. return r;
  2437. }
  2438. /*----------------------------------------------------------------*/
  2439. static int cache_map(struct dm_target *ti, struct bio *bio)
  2440. {
  2441. struct cache *cache = ti->private;
  2442. int r;
  2443. struct dm_bio_prison_cell *cell = NULL;
  2444. dm_oblock_t block = get_bio_block(cache, bio);
  2445. size_t pb_data_size = get_per_bio_data_size(cache);
  2446. bool can_migrate = false;
  2447. bool fast_promotion;
  2448. struct policy_result lookup_result;
  2449. struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
  2450. struct old_oblock_lock ool;
  2451. ool.locker.fn = null_locker;
  2452. if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
  2453. /*
  2454. * This can only occur if the io goes to a partial block at
  2455. * the end of the origin device. We don't cache these.
  2456. * Just remap to the origin and carry on.
  2457. */
  2458. remap_to_origin(cache, bio);
  2459. accounted_begin(cache, bio);
  2460. return DM_MAPIO_REMAPPED;
  2461. }
  2462. if (discard_or_flush(bio)) {
  2463. defer_bio(cache, bio);
  2464. return DM_MAPIO_SUBMITTED;
  2465. }
  2466. /*
  2467. * Check to see if that block is currently migrating.
  2468. */
  2469. cell = alloc_prison_cell(cache);
  2470. if (!cell) {
  2471. defer_bio(cache, bio);
  2472. return DM_MAPIO_SUBMITTED;
  2473. }
  2474. r = bio_detain(cache, block, bio, cell,
  2475. (cell_free_fn) free_prison_cell,
  2476. cache, &cell);
  2477. if (r) {
  2478. if (r < 0)
  2479. defer_bio(cache, bio);
  2480. return DM_MAPIO_SUBMITTED;
  2481. }
  2482. fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
  2483. r = policy_map(cache->policy, block, false, can_migrate, fast_promotion,
  2484. bio, &ool.locker, &lookup_result);
  2485. if (r == -EWOULDBLOCK) {
  2486. cell_defer(cache, cell, true);
  2487. return DM_MAPIO_SUBMITTED;
  2488. } else if (r) {
  2489. DMERR_LIMIT("%s: Unexpected return from cache replacement policy: %d",
  2490. cache_device_name(cache), r);
  2491. cell_defer(cache, cell, false);
  2492. bio_io_error(bio);
  2493. return DM_MAPIO_SUBMITTED;
  2494. }
  2495. r = DM_MAPIO_REMAPPED;
  2496. switch (lookup_result.op) {
  2497. case POLICY_HIT:
  2498. if (passthrough_mode(&cache->features)) {
  2499. if (bio_data_dir(bio) == WRITE) {
  2500. /*
  2501. * We need to invalidate this block, so
  2502. * defer for the worker thread.
  2503. */
  2504. cell_defer(cache, cell, true);
  2505. r = DM_MAPIO_SUBMITTED;
  2506. } else {
  2507. inc_miss_counter(cache, bio);
  2508. remap_to_origin_clear_discard(cache, bio, block);
  2509. accounted_begin(cache, bio);
  2510. inc_ds(cache, bio, cell);
  2511. // FIXME: we want to remap hits or misses straight
  2512. // away rather than passing over to the worker.
  2513. cell_defer(cache, cell, false);
  2514. }
  2515. } else {
  2516. inc_hit_counter(cache, bio);
  2517. if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
  2518. !is_dirty(cache, lookup_result.cblock)) {
  2519. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  2520. accounted_begin(cache, bio);
  2521. inc_ds(cache, bio, cell);
  2522. cell_defer(cache, cell, false);
  2523. } else
  2524. remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false);
  2525. }
  2526. break;
  2527. case POLICY_MISS:
  2528. inc_miss_counter(cache, bio);
  2529. if (pb->req_nr != 0) {
  2530. /*
  2531. * This is a duplicate writethrough io that is no
  2532. * longer needed because the block has been demoted.
  2533. */
  2534. bio_endio(bio);
  2535. // FIXME: remap everything as a miss
  2536. cell_defer(cache, cell, false);
  2537. r = DM_MAPIO_SUBMITTED;
  2538. } else
  2539. remap_cell_to_origin_clear_discard(cache, cell, block, false);
  2540. break;
  2541. default:
  2542. DMERR_LIMIT("%s: %s: erroring bio: unknown policy op: %u",
  2543. cache_device_name(cache), __func__,
  2544. (unsigned) lookup_result.op);
  2545. cell_defer(cache, cell, false);
  2546. bio_io_error(bio);
  2547. r = DM_MAPIO_SUBMITTED;
  2548. }
  2549. return r;
  2550. }
  2551. static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
  2552. {
  2553. struct cache *cache = ti->private;
  2554. unsigned long flags;
  2555. size_t pb_data_size = get_per_bio_data_size(cache);
  2556. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  2557. if (pb->tick) {
  2558. policy_tick(cache->policy, false);
  2559. spin_lock_irqsave(&cache->lock, flags);
  2560. cache->need_tick_bio = true;
  2561. spin_unlock_irqrestore(&cache->lock, flags);
  2562. }
  2563. check_for_quiesced_migrations(cache, pb);
  2564. accounted_complete(cache, bio);
  2565. return 0;
  2566. }
  2567. static int write_dirty_bitset(struct cache *cache)
  2568. {
  2569. int r;
  2570. if (get_cache_mode(cache) >= CM_READ_ONLY)
  2571. return -EINVAL;
  2572. r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
  2573. if (r)
  2574. metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
  2575. return r;
  2576. }
  2577. static int write_discard_bitset(struct cache *cache)
  2578. {
  2579. unsigned i, r;
  2580. if (get_cache_mode(cache) >= CM_READ_ONLY)
  2581. return -EINVAL;
  2582. r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
  2583. cache->discard_nr_blocks);
  2584. if (r) {
  2585. DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
  2586. metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
  2587. return r;
  2588. }
  2589. for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
  2590. r = dm_cache_set_discard(cache->cmd, to_dblock(i),
  2591. is_discarded(cache, to_dblock(i)));
  2592. if (r) {
  2593. metadata_operation_failed(cache, "dm_cache_set_discard", r);
  2594. return r;
  2595. }
  2596. }
  2597. return 0;
  2598. }
  2599. static int write_hints(struct cache *cache)
  2600. {
  2601. int r;
  2602. if (get_cache_mode(cache) >= CM_READ_ONLY)
  2603. return -EINVAL;
  2604. r = dm_cache_write_hints(cache->cmd, cache->policy);
  2605. if (r) {
  2606. metadata_operation_failed(cache, "dm_cache_write_hints", r);
  2607. return r;
  2608. }
  2609. return 0;
  2610. }
  2611. /*
  2612. * returns true on success
  2613. */
  2614. static bool sync_metadata(struct cache *cache)
  2615. {
  2616. int r1, r2, r3, r4;
  2617. r1 = write_dirty_bitset(cache);
  2618. if (r1)
  2619. DMERR("%s: could not write dirty bitset", cache_device_name(cache));
  2620. r2 = write_discard_bitset(cache);
  2621. if (r2)
  2622. DMERR("%s: could not write discard bitset", cache_device_name(cache));
  2623. save_stats(cache);
  2624. r3 = write_hints(cache);
  2625. if (r3)
  2626. DMERR("%s: could not write hints", cache_device_name(cache));
  2627. /*
  2628. * If writing the above metadata failed, we still commit, but don't
  2629. * set the clean shutdown flag. This will effectively force every
  2630. * dirty bit to be set on reload.
  2631. */
  2632. r4 = commit(cache, !r1 && !r2 && !r3);
  2633. if (r4)
  2634. DMERR("%s: could not write cache metadata", cache_device_name(cache));
  2635. return !r1 && !r2 && !r3 && !r4;
  2636. }
  2637. static void cache_postsuspend(struct dm_target *ti)
  2638. {
  2639. struct cache *cache = ti->private;
  2640. start_quiescing(cache);
  2641. wait_for_migrations(cache);
  2642. stop_worker(cache);
  2643. requeue_deferred_bios(cache);
  2644. requeue_deferred_cells(cache);
  2645. stop_quiescing(cache);
  2646. if (get_cache_mode(cache) == CM_WRITE)
  2647. (void) sync_metadata(cache);
  2648. }
  2649. static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
  2650. bool dirty, uint32_t hint, bool hint_valid)
  2651. {
  2652. int r;
  2653. struct cache *cache = context;
  2654. r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
  2655. if (r)
  2656. return r;
  2657. if (dirty)
  2658. set_dirty(cache, oblock, cblock);
  2659. else
  2660. clear_dirty(cache, oblock, cblock);
  2661. return 0;
  2662. }
  2663. /*
  2664. * The discard block size in the on disk metadata is not
  2665. * neccessarily the same as we're currently using. So we have to
  2666. * be careful to only set the discarded attribute if we know it
  2667. * covers a complete block of the new size.
  2668. */
  2669. struct discard_load_info {
  2670. struct cache *cache;
  2671. /*
  2672. * These blocks are sized using the on disk dblock size, rather
  2673. * than the current one.
  2674. */
  2675. dm_block_t block_size;
  2676. dm_block_t discard_begin, discard_end;
  2677. };
  2678. static void discard_load_info_init(struct cache *cache,
  2679. struct discard_load_info *li)
  2680. {
  2681. li->cache = cache;
  2682. li->discard_begin = li->discard_end = 0;
  2683. }
  2684. static void set_discard_range(struct discard_load_info *li)
  2685. {
  2686. sector_t b, e;
  2687. if (li->discard_begin == li->discard_end)
  2688. return;
  2689. /*
  2690. * Convert to sectors.
  2691. */
  2692. b = li->discard_begin * li->block_size;
  2693. e = li->discard_end * li->block_size;
  2694. /*
  2695. * Then convert back to the current dblock size.
  2696. */
  2697. b = dm_sector_div_up(b, li->cache->discard_block_size);
  2698. sector_div(e, li->cache->discard_block_size);
  2699. /*
  2700. * The origin may have shrunk, so we need to check we're still in
  2701. * bounds.
  2702. */
  2703. if (e > from_dblock(li->cache->discard_nr_blocks))
  2704. e = from_dblock(li->cache->discard_nr_blocks);
  2705. for (; b < e; b++)
  2706. set_discard(li->cache, to_dblock(b));
  2707. }
  2708. static int load_discard(void *context, sector_t discard_block_size,
  2709. dm_dblock_t dblock, bool discard)
  2710. {
  2711. struct discard_load_info *li = context;
  2712. li->block_size = discard_block_size;
  2713. if (discard) {
  2714. if (from_dblock(dblock) == li->discard_end)
  2715. /*
  2716. * We're already in a discard range, just extend it.
  2717. */
  2718. li->discard_end = li->discard_end + 1ULL;
  2719. else {
  2720. /*
  2721. * Emit the old range and start a new one.
  2722. */
  2723. set_discard_range(li);
  2724. li->discard_begin = from_dblock(dblock);
  2725. li->discard_end = li->discard_begin + 1ULL;
  2726. }
  2727. } else {
  2728. set_discard_range(li);
  2729. li->discard_begin = li->discard_end = 0;
  2730. }
  2731. return 0;
  2732. }
  2733. static dm_cblock_t get_cache_dev_size(struct cache *cache)
  2734. {
  2735. sector_t size = get_dev_size(cache->cache_dev);
  2736. (void) sector_div(size, cache->sectors_per_block);
  2737. return to_cblock(size);
  2738. }
  2739. static bool can_resize(struct cache *cache, dm_cblock_t new_size)
  2740. {
  2741. if (from_cblock(new_size) > from_cblock(cache->cache_size))
  2742. return true;
  2743. /*
  2744. * We can't drop a dirty block when shrinking the cache.
  2745. */
  2746. while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
  2747. new_size = to_cblock(from_cblock(new_size) + 1);
  2748. if (is_dirty(cache, new_size)) {
  2749. DMERR("%s: unable to shrink cache; cache block %llu is dirty",
  2750. cache_device_name(cache),
  2751. (unsigned long long) from_cblock(new_size));
  2752. return false;
  2753. }
  2754. }
  2755. return true;
  2756. }
  2757. static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
  2758. {
  2759. int r;
  2760. r = dm_cache_resize(cache->cmd, new_size);
  2761. if (r) {
  2762. DMERR("%s: could not resize cache metadata", cache_device_name(cache));
  2763. metadata_operation_failed(cache, "dm_cache_resize", r);
  2764. return r;
  2765. }
  2766. set_cache_size(cache, new_size);
  2767. return 0;
  2768. }
  2769. static int cache_preresume(struct dm_target *ti)
  2770. {
  2771. int r = 0;
  2772. struct cache *cache = ti->private;
  2773. dm_cblock_t csize = get_cache_dev_size(cache);
  2774. /*
  2775. * Check to see if the cache has resized.
  2776. */
  2777. if (!cache->sized) {
  2778. r = resize_cache_dev(cache, csize);
  2779. if (r)
  2780. return r;
  2781. cache->sized = true;
  2782. } else if (csize != cache->cache_size) {
  2783. if (!can_resize(cache, csize))
  2784. return -EINVAL;
  2785. r = resize_cache_dev(cache, csize);
  2786. if (r)
  2787. return r;
  2788. }
  2789. if (!cache->loaded_mappings) {
  2790. r = dm_cache_load_mappings(cache->cmd, cache->policy,
  2791. load_mapping, cache);
  2792. if (r) {
  2793. DMERR("%s: could not load cache mappings", cache_device_name(cache));
  2794. metadata_operation_failed(cache, "dm_cache_load_mappings", r);
  2795. return r;
  2796. }
  2797. cache->loaded_mappings = true;
  2798. }
  2799. if (!cache->loaded_discards) {
  2800. struct discard_load_info li;
  2801. /*
  2802. * The discard bitset could have been resized, or the
  2803. * discard block size changed. To be safe we start by
  2804. * setting every dblock to not discarded.
  2805. */
  2806. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  2807. discard_load_info_init(cache, &li);
  2808. r = dm_cache_load_discards(cache->cmd, load_discard, &li);
  2809. if (r) {
  2810. DMERR("%s: could not load origin discards", cache_device_name(cache));
  2811. metadata_operation_failed(cache, "dm_cache_load_discards", r);
  2812. return r;
  2813. }
  2814. set_discard_range(&li);
  2815. cache->loaded_discards = true;
  2816. }
  2817. return r;
  2818. }
  2819. static void cache_resume(struct dm_target *ti)
  2820. {
  2821. struct cache *cache = ti->private;
  2822. cache->need_tick_bio = true;
  2823. do_waker(&cache->waker.work);
  2824. }
  2825. /*
  2826. * Status format:
  2827. *
  2828. * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
  2829. * <cache block size> <#used cache blocks>/<#total cache blocks>
  2830. * <#read hits> <#read misses> <#write hits> <#write misses>
  2831. * <#demotions> <#promotions> <#dirty>
  2832. * <#features> <features>*
  2833. * <#core args> <core args>
  2834. * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
  2835. */
  2836. static void cache_status(struct dm_target *ti, status_type_t type,
  2837. unsigned status_flags, char *result, unsigned maxlen)
  2838. {
  2839. int r = 0;
  2840. unsigned i;
  2841. ssize_t sz = 0;
  2842. dm_block_t nr_free_blocks_metadata = 0;
  2843. dm_block_t nr_blocks_metadata = 0;
  2844. char buf[BDEVNAME_SIZE];
  2845. struct cache *cache = ti->private;
  2846. dm_cblock_t residency;
  2847. bool needs_check;
  2848. switch (type) {
  2849. case STATUSTYPE_INFO:
  2850. if (get_cache_mode(cache) == CM_FAIL) {
  2851. DMEMIT("Fail");
  2852. break;
  2853. }
  2854. /* Commit to ensure statistics aren't out-of-date */
  2855. if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
  2856. (void) commit(cache, false);
  2857. r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
  2858. if (r) {
  2859. DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
  2860. cache_device_name(cache), r);
  2861. goto err;
  2862. }
  2863. r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
  2864. if (r) {
  2865. DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
  2866. cache_device_name(cache), r);
  2867. goto err;
  2868. }
  2869. residency = policy_residency(cache->policy);
  2870. DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
  2871. (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
  2872. (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
  2873. (unsigned long long)nr_blocks_metadata,
  2874. (unsigned long long)cache->sectors_per_block,
  2875. (unsigned long long) from_cblock(residency),
  2876. (unsigned long long) from_cblock(cache->cache_size),
  2877. (unsigned) atomic_read(&cache->stats.read_hit),
  2878. (unsigned) atomic_read(&cache->stats.read_miss),
  2879. (unsigned) atomic_read(&cache->stats.write_hit),
  2880. (unsigned) atomic_read(&cache->stats.write_miss),
  2881. (unsigned) atomic_read(&cache->stats.demotion),
  2882. (unsigned) atomic_read(&cache->stats.promotion),
  2883. (unsigned long) atomic_read(&cache->nr_dirty));
  2884. if (cache->features.metadata_version == 2)
  2885. DMEMIT("2 metadata2 ");
  2886. else
  2887. DMEMIT("1 ");
  2888. if (writethrough_mode(&cache->features))
  2889. DMEMIT("writethrough ");
  2890. else if (passthrough_mode(&cache->features))
  2891. DMEMIT("passthrough ");
  2892. else if (writeback_mode(&cache->features))
  2893. DMEMIT("writeback ");
  2894. else {
  2895. DMERR("%s: internal error: unknown io mode: %d",
  2896. cache_device_name(cache), (int) cache->features.io_mode);
  2897. goto err;
  2898. }
  2899. DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
  2900. DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
  2901. if (sz < maxlen) {
  2902. r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
  2903. if (r)
  2904. DMERR("%s: policy_emit_config_values returned %d",
  2905. cache_device_name(cache), r);
  2906. }
  2907. if (get_cache_mode(cache) == CM_READ_ONLY)
  2908. DMEMIT("ro ");
  2909. else
  2910. DMEMIT("rw ");
  2911. r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
  2912. if (r || needs_check)
  2913. DMEMIT("needs_check ");
  2914. else
  2915. DMEMIT("- ");
  2916. break;
  2917. case STATUSTYPE_TABLE:
  2918. format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
  2919. DMEMIT("%s ", buf);
  2920. format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
  2921. DMEMIT("%s ", buf);
  2922. format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
  2923. DMEMIT("%s", buf);
  2924. for (i = 0; i < cache->nr_ctr_args - 1; i++)
  2925. DMEMIT(" %s", cache->ctr_args[i]);
  2926. if (cache->nr_ctr_args)
  2927. DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
  2928. }
  2929. return;
  2930. err:
  2931. DMEMIT("Error");
  2932. }
  2933. /*
  2934. * A cache block range can take two forms:
  2935. *
  2936. * i) A single cblock, eg. '3456'
  2937. * ii) A begin and end cblock with dots between, eg. 123-234
  2938. */
  2939. static int parse_cblock_range(struct cache *cache, const char *str,
  2940. struct cblock_range *result)
  2941. {
  2942. char dummy;
  2943. uint64_t b, e;
  2944. int r;
  2945. /*
  2946. * Try and parse form (ii) first.
  2947. */
  2948. r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
  2949. if (r < 0)
  2950. return r;
  2951. if (r == 2) {
  2952. result->begin = to_cblock(b);
  2953. result->end = to_cblock(e);
  2954. return 0;
  2955. }
  2956. /*
  2957. * That didn't work, try form (i).
  2958. */
  2959. r = sscanf(str, "%llu%c", &b, &dummy);
  2960. if (r < 0)
  2961. return r;
  2962. if (r == 1) {
  2963. result->begin = to_cblock(b);
  2964. result->end = to_cblock(from_cblock(result->begin) + 1u);
  2965. return 0;
  2966. }
  2967. DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
  2968. return -EINVAL;
  2969. }
  2970. static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
  2971. {
  2972. uint64_t b = from_cblock(range->begin);
  2973. uint64_t e = from_cblock(range->end);
  2974. uint64_t n = from_cblock(cache->cache_size);
  2975. if (b >= n) {
  2976. DMERR("%s: begin cblock out of range: %llu >= %llu",
  2977. cache_device_name(cache), b, n);
  2978. return -EINVAL;
  2979. }
  2980. if (e > n) {
  2981. DMERR("%s: end cblock out of range: %llu > %llu",
  2982. cache_device_name(cache), e, n);
  2983. return -EINVAL;
  2984. }
  2985. if (b >= e) {
  2986. DMERR("%s: invalid cblock range: %llu >= %llu",
  2987. cache_device_name(cache), b, e);
  2988. return -EINVAL;
  2989. }
  2990. return 0;
  2991. }
  2992. static int request_invalidation(struct cache *cache, struct cblock_range *range)
  2993. {
  2994. struct invalidation_request req;
  2995. INIT_LIST_HEAD(&req.list);
  2996. req.cblocks = range;
  2997. atomic_set(&req.complete, 0);
  2998. req.err = 0;
  2999. init_waitqueue_head(&req.result_wait);
  3000. spin_lock(&cache->invalidation_lock);
  3001. list_add(&req.list, &cache->invalidation_requests);
  3002. spin_unlock(&cache->invalidation_lock);
  3003. wake_worker(cache);
  3004. wait_event(req.result_wait, atomic_read(&req.complete));
  3005. return req.err;
  3006. }
  3007. static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
  3008. const char **cblock_ranges)
  3009. {
  3010. int r = 0;
  3011. unsigned i;
  3012. struct cblock_range range;
  3013. if (!passthrough_mode(&cache->features)) {
  3014. DMERR("%s: cache has to be in passthrough mode for invalidation",
  3015. cache_device_name(cache));
  3016. return -EPERM;
  3017. }
  3018. for (i = 0; i < count; i++) {
  3019. r = parse_cblock_range(cache, cblock_ranges[i], &range);
  3020. if (r)
  3021. break;
  3022. r = validate_cblock_range(cache, &range);
  3023. if (r)
  3024. break;
  3025. /*
  3026. * Pass begin and end origin blocks to the worker and wake it.
  3027. */
  3028. r = request_invalidation(cache, &range);
  3029. if (r)
  3030. break;
  3031. }
  3032. return r;
  3033. }
  3034. /*
  3035. * Supports
  3036. * "<key> <value>"
  3037. * and
  3038. * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
  3039. *
  3040. * The key migration_threshold is supported by the cache target core.
  3041. */
  3042. static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
  3043. {
  3044. struct cache *cache = ti->private;
  3045. if (!argc)
  3046. return -EINVAL;
  3047. if (get_cache_mode(cache) >= CM_READ_ONLY) {
  3048. DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
  3049. cache_device_name(cache));
  3050. return -EOPNOTSUPP;
  3051. }
  3052. if (!strcasecmp(argv[0], "invalidate_cblocks"))
  3053. return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
  3054. if (argc != 2)
  3055. return -EINVAL;
  3056. return set_config_value(cache, argv[0], argv[1]);
  3057. }
  3058. static int cache_iterate_devices(struct dm_target *ti,
  3059. iterate_devices_callout_fn fn, void *data)
  3060. {
  3061. int r = 0;
  3062. struct cache *cache = ti->private;
  3063. r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
  3064. if (!r)
  3065. r = fn(ti, cache->origin_dev, 0, ti->len, data);
  3066. return r;
  3067. }
  3068. static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
  3069. {
  3070. /*
  3071. * FIXME: these limits may be incompatible with the cache device
  3072. */
  3073. limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
  3074. cache->origin_sectors);
  3075. limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
  3076. }
  3077. static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
  3078. {
  3079. struct cache *cache = ti->private;
  3080. uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
  3081. /*
  3082. * If the system-determined stacked limits are compatible with the
  3083. * cache's blocksize (io_opt is a factor) do not override them.
  3084. */
  3085. if (io_opt_sectors < cache->sectors_per_block ||
  3086. do_div(io_opt_sectors, cache->sectors_per_block)) {
  3087. blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
  3088. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  3089. }
  3090. set_discard_limits(cache, limits);
  3091. }
  3092. /*----------------------------------------------------------------*/
  3093. static struct target_type cache_target = {
  3094. .name = "cache",
  3095. .version = {1, 10, 0},
  3096. .module = THIS_MODULE,
  3097. .ctr = cache_ctr,
  3098. .dtr = cache_dtr,
  3099. .map = cache_map,
  3100. .end_io = cache_end_io,
  3101. .postsuspend = cache_postsuspend,
  3102. .preresume = cache_preresume,
  3103. .resume = cache_resume,
  3104. .status = cache_status,
  3105. .message = cache_message,
  3106. .iterate_devices = cache_iterate_devices,
  3107. .io_hints = cache_io_hints,
  3108. };
  3109. static int __init dm_cache_init(void)
  3110. {
  3111. int r;
  3112. r = dm_register_target(&cache_target);
  3113. if (r) {
  3114. DMERR("cache target registration failed: %d", r);
  3115. return r;
  3116. }
  3117. migration_cache = KMEM_CACHE(dm_cache_migration, 0);
  3118. if (!migration_cache) {
  3119. dm_unregister_target(&cache_target);
  3120. return -ENOMEM;
  3121. }
  3122. return 0;
  3123. }
  3124. static void __exit dm_cache_exit(void)
  3125. {
  3126. dm_unregister_target(&cache_target);
  3127. kmem_cache_destroy(migration_cache);
  3128. }
  3129. module_init(dm_cache_init);
  3130. module_exit(dm_cache_exit);
  3131. MODULE_DESCRIPTION(DM_NAME " cache target");
  3132. MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
  3133. MODULE_LICENSE("GPL");