dm-cache-target.c 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include "dm-bio-record.h"
  9. #include "dm-cache-metadata.h"
  10. #include <linux/dm-io.h>
  11. #include <linux/dm-kcopyd.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/init.h>
  14. #include <linux/mempool.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #define DM_MSG_PREFIX "cache"
  19. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  20. "A percentage of time allocated for copying to and/or from cache");
  21. /*----------------------------------------------------------------*/
  22. #define IOT_RESOLUTION 4
  23. struct io_tracker {
  24. spinlock_t lock;
  25. /*
  26. * Sectors of in-flight IO.
  27. */
  28. sector_t in_flight;
  29. /*
  30. * The time, in jiffies, when this device became idle (if it is
  31. * indeed idle).
  32. */
  33. unsigned long idle_time;
  34. unsigned long last_update_time;
  35. };
  36. static void iot_init(struct io_tracker *iot)
  37. {
  38. spin_lock_init(&iot->lock);
  39. iot->in_flight = 0ul;
  40. iot->idle_time = 0ul;
  41. iot->last_update_time = jiffies;
  42. }
  43. static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
  44. {
  45. if (iot->in_flight)
  46. return false;
  47. return time_after(jiffies, iot->idle_time + jifs);
  48. }
  49. static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
  50. {
  51. bool r;
  52. unsigned long flags;
  53. spin_lock_irqsave(&iot->lock, flags);
  54. r = __iot_idle_for(iot, jifs);
  55. spin_unlock_irqrestore(&iot->lock, flags);
  56. return r;
  57. }
  58. static void iot_io_begin(struct io_tracker *iot, sector_t len)
  59. {
  60. unsigned long flags;
  61. spin_lock_irqsave(&iot->lock, flags);
  62. iot->in_flight += len;
  63. spin_unlock_irqrestore(&iot->lock, flags);
  64. }
  65. static void __iot_io_end(struct io_tracker *iot, sector_t len)
  66. {
  67. iot->in_flight -= len;
  68. if (!iot->in_flight)
  69. iot->idle_time = jiffies;
  70. }
  71. static void iot_io_end(struct io_tracker *iot, sector_t len)
  72. {
  73. unsigned long flags;
  74. spin_lock_irqsave(&iot->lock, flags);
  75. __iot_io_end(iot, len);
  76. spin_unlock_irqrestore(&iot->lock, flags);
  77. }
  78. /*----------------------------------------------------------------*/
  79. /*
  80. * Glossary:
  81. *
  82. * oblock: index of an origin block
  83. * cblock: index of a cache block
  84. * promotion: movement of a block from origin to cache
  85. * demotion: movement of a block from cache to origin
  86. * migration: movement of a block between the origin and cache device,
  87. * either direction
  88. */
  89. /*----------------------------------------------------------------*/
  90. /*
  91. * There are a couple of places where we let a bio run, but want to do some
  92. * work before calling its endio function. We do this by temporarily
  93. * changing the endio fn.
  94. */
  95. struct dm_hook_info {
  96. bio_end_io_t *bi_end_io;
  97. };
  98. static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
  99. bio_end_io_t *bi_end_io, void *bi_private)
  100. {
  101. h->bi_end_io = bio->bi_end_io;
  102. bio->bi_end_io = bi_end_io;
  103. bio->bi_private = bi_private;
  104. }
  105. static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
  106. {
  107. bio->bi_end_io = h->bi_end_io;
  108. }
  109. /*----------------------------------------------------------------*/
  110. #define MIGRATION_POOL_SIZE 128
  111. #define COMMIT_PERIOD HZ
  112. #define MIGRATION_COUNT_WINDOW 10
  113. /*
  114. * The block size of the device holding cache data must be
  115. * between 32KB and 1GB.
  116. */
  117. #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
  118. #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  119. enum cache_metadata_mode {
  120. CM_WRITE, /* metadata may be changed */
  121. CM_READ_ONLY, /* metadata may not be changed */
  122. CM_FAIL
  123. };
  124. enum cache_io_mode {
  125. /*
  126. * Data is written to cached blocks only. These blocks are marked
  127. * dirty. If you lose the cache device you will lose data.
  128. * Potential performance increase for both reads and writes.
  129. */
  130. CM_IO_WRITEBACK,
  131. /*
  132. * Data is written to both cache and origin. Blocks are never
  133. * dirty. Potential performance benfit for reads only.
  134. */
  135. CM_IO_WRITETHROUGH,
  136. /*
  137. * A degraded mode useful for various cache coherency situations
  138. * (eg, rolling back snapshots). Reads and writes always go to the
  139. * origin. If a write goes to a cached oblock, then the cache
  140. * block is invalidated.
  141. */
  142. CM_IO_PASSTHROUGH
  143. };
  144. struct cache_features {
  145. enum cache_metadata_mode mode;
  146. enum cache_io_mode io_mode;
  147. };
  148. struct cache_stats {
  149. atomic_t read_hit;
  150. atomic_t read_miss;
  151. atomic_t write_hit;
  152. atomic_t write_miss;
  153. atomic_t demotion;
  154. atomic_t promotion;
  155. atomic_t copies_avoided;
  156. atomic_t cache_cell_clash;
  157. atomic_t commit_count;
  158. atomic_t discard_count;
  159. };
  160. /*
  161. * Defines a range of cblocks, begin to (end - 1) are in the range. end is
  162. * the one-past-the-end value.
  163. */
  164. struct cblock_range {
  165. dm_cblock_t begin;
  166. dm_cblock_t end;
  167. };
  168. struct invalidation_request {
  169. struct list_head list;
  170. struct cblock_range *cblocks;
  171. atomic_t complete;
  172. int err;
  173. wait_queue_head_t result_wait;
  174. };
  175. struct cache {
  176. struct dm_target *ti;
  177. struct dm_target_callbacks callbacks;
  178. struct dm_cache_metadata *cmd;
  179. /*
  180. * Metadata is written to this device.
  181. */
  182. struct dm_dev *metadata_dev;
  183. /*
  184. * The slower of the two data devices. Typically a spindle.
  185. */
  186. struct dm_dev *origin_dev;
  187. /*
  188. * The faster of the two data devices. Typically an SSD.
  189. */
  190. struct dm_dev *cache_dev;
  191. /*
  192. * Size of the origin device in _complete_ blocks and native sectors.
  193. */
  194. dm_oblock_t origin_blocks;
  195. sector_t origin_sectors;
  196. /*
  197. * Size of the cache device in blocks.
  198. */
  199. dm_cblock_t cache_size;
  200. /*
  201. * Fields for converting from sectors to blocks.
  202. */
  203. uint32_t sectors_per_block;
  204. int sectors_per_block_shift;
  205. spinlock_t lock;
  206. struct list_head deferred_cells;
  207. struct bio_list deferred_bios;
  208. struct bio_list deferred_flush_bios;
  209. struct bio_list deferred_writethrough_bios;
  210. struct list_head quiesced_migrations;
  211. struct list_head completed_migrations;
  212. struct list_head need_commit_migrations;
  213. sector_t migration_threshold;
  214. wait_queue_head_t migration_wait;
  215. atomic_t nr_allocated_migrations;
  216. /*
  217. * The number of in flight migrations that are performing
  218. * background io. eg, promotion, writeback.
  219. */
  220. atomic_t nr_io_migrations;
  221. wait_queue_head_t quiescing_wait;
  222. atomic_t quiescing;
  223. atomic_t quiescing_ack;
  224. /*
  225. * cache_size entries, dirty if set
  226. */
  227. atomic_t nr_dirty;
  228. unsigned long *dirty_bitset;
  229. /*
  230. * origin_blocks entries, discarded if set.
  231. */
  232. dm_dblock_t discard_nr_blocks;
  233. unsigned long *discard_bitset;
  234. uint32_t discard_block_size; /* a power of 2 times sectors per block */
  235. /*
  236. * Rather than reconstructing the table line for the status we just
  237. * save it and regurgitate.
  238. */
  239. unsigned nr_ctr_args;
  240. const char **ctr_args;
  241. struct dm_kcopyd_client *copier;
  242. struct workqueue_struct *wq;
  243. struct work_struct worker;
  244. struct delayed_work waker;
  245. unsigned long last_commit_jiffies;
  246. struct dm_bio_prison *prison;
  247. struct dm_deferred_set *all_io_ds;
  248. mempool_t *migration_pool;
  249. struct dm_cache_policy *policy;
  250. unsigned policy_nr_args;
  251. bool need_tick_bio:1;
  252. bool sized:1;
  253. bool invalidate:1;
  254. bool commit_requested:1;
  255. bool loaded_mappings:1;
  256. bool loaded_discards:1;
  257. /*
  258. * Cache features such as write-through.
  259. */
  260. struct cache_features features;
  261. struct cache_stats stats;
  262. /*
  263. * Invalidation fields.
  264. */
  265. spinlock_t invalidation_lock;
  266. struct list_head invalidation_requests;
  267. struct io_tracker origin_tracker;
  268. };
  269. struct per_bio_data {
  270. bool tick:1;
  271. unsigned req_nr:2;
  272. struct dm_deferred_entry *all_io_entry;
  273. struct dm_hook_info hook_info;
  274. sector_t len;
  275. /*
  276. * writethrough fields. These MUST remain at the end of this
  277. * structure and the 'cache' member must be the first as it
  278. * is used to determine the offset of the writethrough fields.
  279. */
  280. struct cache *cache;
  281. dm_cblock_t cblock;
  282. struct dm_bio_details bio_details;
  283. };
  284. struct dm_cache_migration {
  285. struct list_head list;
  286. struct cache *cache;
  287. unsigned long start_jiffies;
  288. dm_oblock_t old_oblock;
  289. dm_oblock_t new_oblock;
  290. dm_cblock_t cblock;
  291. bool err:1;
  292. bool discard:1;
  293. bool writeback:1;
  294. bool demote:1;
  295. bool promote:1;
  296. bool requeue_holder:1;
  297. bool invalidate:1;
  298. struct dm_bio_prison_cell *old_ocell;
  299. struct dm_bio_prison_cell *new_ocell;
  300. };
  301. /*
  302. * Processing a bio in the worker thread may require these memory
  303. * allocations. We prealloc to avoid deadlocks (the same worker thread
  304. * frees them back to the mempool).
  305. */
  306. struct prealloc {
  307. struct dm_cache_migration *mg;
  308. struct dm_bio_prison_cell *cell1;
  309. struct dm_bio_prison_cell *cell2;
  310. };
  311. static enum cache_metadata_mode get_cache_mode(struct cache *cache);
  312. static void wake_worker(struct cache *cache)
  313. {
  314. queue_work(cache->wq, &cache->worker);
  315. }
  316. /*----------------------------------------------------------------*/
  317. static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
  318. {
  319. /* FIXME: change to use a local slab. */
  320. return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
  321. }
  322. static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
  323. {
  324. dm_bio_prison_free_cell(cache->prison, cell);
  325. }
  326. static struct dm_cache_migration *alloc_migration(struct cache *cache)
  327. {
  328. struct dm_cache_migration *mg;
  329. mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  330. if (mg) {
  331. mg->cache = cache;
  332. atomic_inc(&mg->cache->nr_allocated_migrations);
  333. }
  334. return mg;
  335. }
  336. static void free_migration(struct dm_cache_migration *mg)
  337. {
  338. struct cache *cache = mg->cache;
  339. if (atomic_dec_and_test(&cache->nr_allocated_migrations))
  340. wake_up(&cache->migration_wait);
  341. mempool_free(mg, cache->migration_pool);
  342. }
  343. static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
  344. {
  345. if (!p->mg) {
  346. p->mg = alloc_migration(cache);
  347. if (!p->mg)
  348. return -ENOMEM;
  349. }
  350. if (!p->cell1) {
  351. p->cell1 = alloc_prison_cell(cache);
  352. if (!p->cell1)
  353. return -ENOMEM;
  354. }
  355. if (!p->cell2) {
  356. p->cell2 = alloc_prison_cell(cache);
  357. if (!p->cell2)
  358. return -ENOMEM;
  359. }
  360. return 0;
  361. }
  362. static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
  363. {
  364. if (p->cell2)
  365. free_prison_cell(cache, p->cell2);
  366. if (p->cell1)
  367. free_prison_cell(cache, p->cell1);
  368. if (p->mg)
  369. free_migration(p->mg);
  370. }
  371. static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
  372. {
  373. struct dm_cache_migration *mg = p->mg;
  374. BUG_ON(!mg);
  375. p->mg = NULL;
  376. return mg;
  377. }
  378. /*
  379. * You must have a cell within the prealloc struct to return. If not this
  380. * function will BUG() rather than returning NULL.
  381. */
  382. static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
  383. {
  384. struct dm_bio_prison_cell *r = NULL;
  385. if (p->cell1) {
  386. r = p->cell1;
  387. p->cell1 = NULL;
  388. } else if (p->cell2) {
  389. r = p->cell2;
  390. p->cell2 = NULL;
  391. } else
  392. BUG();
  393. return r;
  394. }
  395. /*
  396. * You can't have more than two cells in a prealloc struct. BUG() will be
  397. * called if you try and overfill.
  398. */
  399. static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
  400. {
  401. if (!p->cell2)
  402. p->cell2 = cell;
  403. else if (!p->cell1)
  404. p->cell1 = cell;
  405. else
  406. BUG();
  407. }
  408. /*----------------------------------------------------------------*/
  409. static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key)
  410. {
  411. key->virtual = 0;
  412. key->dev = 0;
  413. key->block_begin = from_oblock(begin);
  414. key->block_end = from_oblock(end);
  415. }
  416. /*
  417. * The caller hands in a preallocated cell, and a free function for it.
  418. * The cell will be freed if there's an error, or if it wasn't used because
  419. * a cell with that key already exists.
  420. */
  421. typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
  422. static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end,
  423. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  424. cell_free_fn free_fn, void *free_context,
  425. struct dm_bio_prison_cell **cell_result)
  426. {
  427. int r;
  428. struct dm_cell_key key;
  429. build_key(oblock_begin, oblock_end, &key);
  430. r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
  431. if (r)
  432. free_fn(free_context, cell_prealloc);
  433. return r;
  434. }
  435. static int bio_detain(struct cache *cache, dm_oblock_t oblock,
  436. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  437. cell_free_fn free_fn, void *free_context,
  438. struct dm_bio_prison_cell **cell_result)
  439. {
  440. dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
  441. return bio_detain_range(cache, oblock, end, bio,
  442. cell_prealloc, free_fn, free_context, cell_result);
  443. }
  444. static int get_cell(struct cache *cache,
  445. dm_oblock_t oblock,
  446. struct prealloc *structs,
  447. struct dm_bio_prison_cell **cell_result)
  448. {
  449. int r;
  450. struct dm_cell_key key;
  451. struct dm_bio_prison_cell *cell_prealloc;
  452. cell_prealloc = prealloc_get_cell(structs);
  453. build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key);
  454. r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
  455. if (r)
  456. prealloc_put_cell(structs, cell_prealloc);
  457. return r;
  458. }
  459. /*----------------------------------------------------------------*/
  460. static bool is_dirty(struct cache *cache, dm_cblock_t b)
  461. {
  462. return test_bit(from_cblock(b), cache->dirty_bitset);
  463. }
  464. static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  465. {
  466. if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
  467. atomic_inc(&cache->nr_dirty);
  468. policy_set_dirty(cache->policy, oblock);
  469. }
  470. }
  471. static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  472. {
  473. if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
  474. policy_clear_dirty(cache->policy, oblock);
  475. if (atomic_dec_return(&cache->nr_dirty) == 0)
  476. dm_table_event(cache->ti->table);
  477. }
  478. }
  479. /*----------------------------------------------------------------*/
  480. static bool block_size_is_power_of_two(struct cache *cache)
  481. {
  482. return cache->sectors_per_block_shift >= 0;
  483. }
  484. /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
  485. #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
  486. __always_inline
  487. #endif
  488. static dm_block_t block_div(dm_block_t b, uint32_t n)
  489. {
  490. do_div(b, n);
  491. return b;
  492. }
  493. static dm_block_t oblocks_per_dblock(struct cache *cache)
  494. {
  495. dm_block_t oblocks = cache->discard_block_size;
  496. if (block_size_is_power_of_two(cache))
  497. oblocks >>= cache->sectors_per_block_shift;
  498. else
  499. oblocks = block_div(oblocks, cache->sectors_per_block);
  500. return oblocks;
  501. }
  502. static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
  503. {
  504. return to_dblock(block_div(from_oblock(oblock),
  505. oblocks_per_dblock(cache)));
  506. }
  507. static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock)
  508. {
  509. return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache));
  510. }
  511. static void set_discard(struct cache *cache, dm_dblock_t b)
  512. {
  513. unsigned long flags;
  514. BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
  515. atomic_inc(&cache->stats.discard_count);
  516. spin_lock_irqsave(&cache->lock, flags);
  517. set_bit(from_dblock(b), cache->discard_bitset);
  518. spin_unlock_irqrestore(&cache->lock, flags);
  519. }
  520. static void clear_discard(struct cache *cache, dm_dblock_t b)
  521. {
  522. unsigned long flags;
  523. spin_lock_irqsave(&cache->lock, flags);
  524. clear_bit(from_dblock(b), cache->discard_bitset);
  525. spin_unlock_irqrestore(&cache->lock, flags);
  526. }
  527. static bool is_discarded(struct cache *cache, dm_dblock_t b)
  528. {
  529. int r;
  530. unsigned long flags;
  531. spin_lock_irqsave(&cache->lock, flags);
  532. r = test_bit(from_dblock(b), cache->discard_bitset);
  533. spin_unlock_irqrestore(&cache->lock, flags);
  534. return r;
  535. }
  536. static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  537. {
  538. int r;
  539. unsigned long flags;
  540. spin_lock_irqsave(&cache->lock, flags);
  541. r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
  542. cache->discard_bitset);
  543. spin_unlock_irqrestore(&cache->lock, flags);
  544. return r;
  545. }
  546. /*----------------------------------------------------------------*/
  547. static void load_stats(struct cache *cache)
  548. {
  549. struct dm_cache_statistics stats;
  550. dm_cache_metadata_get_stats(cache->cmd, &stats);
  551. atomic_set(&cache->stats.read_hit, stats.read_hits);
  552. atomic_set(&cache->stats.read_miss, stats.read_misses);
  553. atomic_set(&cache->stats.write_hit, stats.write_hits);
  554. atomic_set(&cache->stats.write_miss, stats.write_misses);
  555. }
  556. static void save_stats(struct cache *cache)
  557. {
  558. struct dm_cache_statistics stats;
  559. if (get_cache_mode(cache) >= CM_READ_ONLY)
  560. return;
  561. stats.read_hits = atomic_read(&cache->stats.read_hit);
  562. stats.read_misses = atomic_read(&cache->stats.read_miss);
  563. stats.write_hits = atomic_read(&cache->stats.write_hit);
  564. stats.write_misses = atomic_read(&cache->stats.write_miss);
  565. dm_cache_metadata_set_stats(cache->cmd, &stats);
  566. }
  567. /*----------------------------------------------------------------
  568. * Per bio data
  569. *--------------------------------------------------------------*/
  570. /*
  571. * If using writeback, leave out struct per_bio_data's writethrough fields.
  572. */
  573. #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
  574. #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
  575. static bool writethrough_mode(struct cache_features *f)
  576. {
  577. return f->io_mode == CM_IO_WRITETHROUGH;
  578. }
  579. static bool writeback_mode(struct cache_features *f)
  580. {
  581. return f->io_mode == CM_IO_WRITEBACK;
  582. }
  583. static bool passthrough_mode(struct cache_features *f)
  584. {
  585. return f->io_mode == CM_IO_PASSTHROUGH;
  586. }
  587. static size_t get_per_bio_data_size(struct cache *cache)
  588. {
  589. return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
  590. }
  591. static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
  592. {
  593. struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
  594. BUG_ON(!pb);
  595. return pb;
  596. }
  597. static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
  598. {
  599. struct per_bio_data *pb = get_per_bio_data(bio, data_size);
  600. pb->tick = false;
  601. pb->req_nr = dm_bio_get_target_bio_nr(bio);
  602. pb->all_io_entry = NULL;
  603. pb->len = 0;
  604. return pb;
  605. }
  606. /*----------------------------------------------------------------
  607. * Remapping
  608. *--------------------------------------------------------------*/
  609. static void remap_to_origin(struct cache *cache, struct bio *bio)
  610. {
  611. bio->bi_bdev = cache->origin_dev->bdev;
  612. }
  613. static void remap_to_cache(struct cache *cache, struct bio *bio,
  614. dm_cblock_t cblock)
  615. {
  616. sector_t bi_sector = bio->bi_iter.bi_sector;
  617. sector_t block = from_cblock(cblock);
  618. bio->bi_bdev = cache->cache_dev->bdev;
  619. if (!block_size_is_power_of_two(cache))
  620. bio->bi_iter.bi_sector =
  621. (block * cache->sectors_per_block) +
  622. sector_div(bi_sector, cache->sectors_per_block);
  623. else
  624. bio->bi_iter.bi_sector =
  625. (block << cache->sectors_per_block_shift) |
  626. (bi_sector & (cache->sectors_per_block - 1));
  627. }
  628. static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
  629. {
  630. unsigned long flags;
  631. size_t pb_data_size = get_per_bio_data_size(cache);
  632. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  633. spin_lock_irqsave(&cache->lock, flags);
  634. if (cache->need_tick_bio &&
  635. !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
  636. pb->tick = true;
  637. cache->need_tick_bio = false;
  638. }
  639. spin_unlock_irqrestore(&cache->lock, flags);
  640. }
  641. static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
  642. dm_oblock_t oblock)
  643. {
  644. check_if_tick_bio_needed(cache, bio);
  645. remap_to_origin(cache, bio);
  646. if (bio_data_dir(bio) == WRITE)
  647. clear_discard(cache, oblock_to_dblock(cache, oblock));
  648. }
  649. static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
  650. dm_oblock_t oblock, dm_cblock_t cblock)
  651. {
  652. check_if_tick_bio_needed(cache, bio);
  653. remap_to_cache(cache, bio, cblock);
  654. if (bio_data_dir(bio) == WRITE) {
  655. set_dirty(cache, oblock, cblock);
  656. clear_discard(cache, oblock_to_dblock(cache, oblock));
  657. }
  658. }
  659. static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
  660. {
  661. sector_t block_nr = bio->bi_iter.bi_sector;
  662. if (!block_size_is_power_of_two(cache))
  663. (void) sector_div(block_nr, cache->sectors_per_block);
  664. else
  665. block_nr >>= cache->sectors_per_block_shift;
  666. return to_oblock(block_nr);
  667. }
  668. static int bio_triggers_commit(struct cache *cache, struct bio *bio)
  669. {
  670. return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
  671. }
  672. /*
  673. * You must increment the deferred set whilst the prison cell is held. To
  674. * encourage this, we ask for 'cell' to be passed in.
  675. */
  676. static void inc_ds(struct cache *cache, struct bio *bio,
  677. struct dm_bio_prison_cell *cell)
  678. {
  679. size_t pb_data_size = get_per_bio_data_size(cache);
  680. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  681. BUG_ON(!cell);
  682. BUG_ON(pb->all_io_entry);
  683. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  684. }
  685. static bool accountable_bio(struct cache *cache, struct bio *bio)
  686. {
  687. return ((bio->bi_bdev == cache->origin_dev->bdev) &&
  688. !(bio->bi_rw & REQ_DISCARD));
  689. }
  690. static void accounted_begin(struct cache *cache, struct bio *bio)
  691. {
  692. size_t pb_data_size = get_per_bio_data_size(cache);
  693. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  694. if (accountable_bio(cache, bio)) {
  695. pb->len = bio_sectors(bio);
  696. iot_io_begin(&cache->origin_tracker, pb->len);
  697. }
  698. }
  699. static void accounted_complete(struct cache *cache, struct bio *bio)
  700. {
  701. size_t pb_data_size = get_per_bio_data_size(cache);
  702. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  703. iot_io_end(&cache->origin_tracker, pb->len);
  704. }
  705. static void accounted_request(struct cache *cache, struct bio *bio)
  706. {
  707. accounted_begin(cache, bio);
  708. generic_make_request(bio);
  709. }
  710. static void issue(struct cache *cache, struct bio *bio)
  711. {
  712. unsigned long flags;
  713. if (!bio_triggers_commit(cache, bio)) {
  714. accounted_request(cache, bio);
  715. return;
  716. }
  717. /*
  718. * Batch together any bios that trigger commits and then issue a
  719. * single commit for them in do_worker().
  720. */
  721. spin_lock_irqsave(&cache->lock, flags);
  722. cache->commit_requested = true;
  723. bio_list_add(&cache->deferred_flush_bios, bio);
  724. spin_unlock_irqrestore(&cache->lock, flags);
  725. }
  726. static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
  727. {
  728. inc_ds(cache, bio, cell);
  729. issue(cache, bio);
  730. }
  731. static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
  732. {
  733. unsigned long flags;
  734. spin_lock_irqsave(&cache->lock, flags);
  735. bio_list_add(&cache->deferred_writethrough_bios, bio);
  736. spin_unlock_irqrestore(&cache->lock, flags);
  737. wake_worker(cache);
  738. }
  739. static void writethrough_endio(struct bio *bio)
  740. {
  741. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  742. dm_unhook_bio(&pb->hook_info, bio);
  743. if (bio->bi_error) {
  744. bio_endio(bio);
  745. return;
  746. }
  747. dm_bio_restore(&pb->bio_details, bio);
  748. remap_to_cache(pb->cache, bio, pb->cblock);
  749. /*
  750. * We can't issue this bio directly, since we're in interrupt
  751. * context. So it gets put on a bio list for processing by the
  752. * worker thread.
  753. */
  754. defer_writethrough_bio(pb->cache, bio);
  755. }
  756. /*
  757. * When running in writethrough mode we need to send writes to clean blocks
  758. * to both the cache and origin devices. In future we'd like to clone the
  759. * bio and send them in parallel, but for now we're doing them in
  760. * series as this is easier.
  761. */
  762. static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  763. dm_oblock_t oblock, dm_cblock_t cblock)
  764. {
  765. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  766. pb->cache = cache;
  767. pb->cblock = cblock;
  768. dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
  769. dm_bio_record(&pb->bio_details, bio);
  770. remap_to_origin_clear_discard(pb->cache, bio, oblock);
  771. }
  772. /*----------------------------------------------------------------
  773. * Failure modes
  774. *--------------------------------------------------------------*/
  775. static enum cache_metadata_mode get_cache_mode(struct cache *cache)
  776. {
  777. return cache->features.mode;
  778. }
  779. static const char *cache_device_name(struct cache *cache)
  780. {
  781. return dm_device_name(dm_table_get_md(cache->ti->table));
  782. }
  783. static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
  784. {
  785. const char *descs[] = {
  786. "write",
  787. "read-only",
  788. "fail"
  789. };
  790. dm_table_event(cache->ti->table);
  791. DMINFO("%s: switching cache to %s mode",
  792. cache_device_name(cache), descs[(int)mode]);
  793. }
  794. static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
  795. {
  796. bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
  797. enum cache_metadata_mode old_mode = get_cache_mode(cache);
  798. if (new_mode == CM_WRITE && needs_check) {
  799. DMERR("%s: unable to switch cache to write mode until repaired.",
  800. cache_device_name(cache));
  801. if (old_mode != new_mode)
  802. new_mode = old_mode;
  803. else
  804. new_mode = CM_READ_ONLY;
  805. }
  806. /* Never move out of fail mode */
  807. if (old_mode == CM_FAIL)
  808. new_mode = CM_FAIL;
  809. switch (new_mode) {
  810. case CM_FAIL:
  811. case CM_READ_ONLY:
  812. dm_cache_metadata_set_read_only(cache->cmd);
  813. break;
  814. case CM_WRITE:
  815. dm_cache_metadata_set_read_write(cache->cmd);
  816. break;
  817. }
  818. cache->features.mode = new_mode;
  819. if (new_mode != old_mode)
  820. notify_mode_switch(cache, new_mode);
  821. }
  822. static void abort_transaction(struct cache *cache)
  823. {
  824. const char *dev_name = cache_device_name(cache);
  825. if (get_cache_mode(cache) >= CM_READ_ONLY)
  826. return;
  827. if (dm_cache_metadata_set_needs_check(cache->cmd)) {
  828. DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
  829. set_cache_mode(cache, CM_FAIL);
  830. }
  831. DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
  832. if (dm_cache_metadata_abort(cache->cmd)) {
  833. DMERR("%s: failed to abort metadata transaction", dev_name);
  834. set_cache_mode(cache, CM_FAIL);
  835. }
  836. }
  837. static void metadata_operation_failed(struct cache *cache, const char *op, int r)
  838. {
  839. DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
  840. cache_device_name(cache), op, r);
  841. abort_transaction(cache);
  842. set_cache_mode(cache, CM_READ_ONLY);
  843. }
  844. /*----------------------------------------------------------------
  845. * Migration processing
  846. *
  847. * Migration covers moving data from the origin device to the cache, or
  848. * vice versa.
  849. *--------------------------------------------------------------*/
  850. static void inc_io_migrations(struct cache *cache)
  851. {
  852. atomic_inc(&cache->nr_io_migrations);
  853. }
  854. static void dec_io_migrations(struct cache *cache)
  855. {
  856. atomic_dec(&cache->nr_io_migrations);
  857. }
  858. static bool discard_or_flush(struct bio *bio)
  859. {
  860. return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
  861. }
  862. static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
  863. {
  864. if (discard_or_flush(cell->holder)) {
  865. /*
  866. * We have to handle these bios individually.
  867. */
  868. dm_cell_release(cache->prison, cell, &cache->deferred_bios);
  869. free_prison_cell(cache, cell);
  870. } else
  871. list_add_tail(&cell->user_list, &cache->deferred_cells);
  872. }
  873. static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder)
  874. {
  875. unsigned long flags;
  876. if (!holder && dm_cell_promote_or_release(cache->prison, cell)) {
  877. /*
  878. * There was no prisoner to promote to holder, the
  879. * cell has been released.
  880. */
  881. free_prison_cell(cache, cell);
  882. return;
  883. }
  884. spin_lock_irqsave(&cache->lock, flags);
  885. __cell_defer(cache, cell);
  886. spin_unlock_irqrestore(&cache->lock, flags);
  887. wake_worker(cache);
  888. }
  889. static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
  890. {
  891. dm_cell_error(cache->prison, cell, err);
  892. free_prison_cell(cache, cell);
  893. }
  894. static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
  895. {
  896. cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE);
  897. }
  898. static void free_io_migration(struct dm_cache_migration *mg)
  899. {
  900. struct cache *cache = mg->cache;
  901. dec_io_migrations(cache);
  902. free_migration(mg);
  903. wake_worker(cache);
  904. }
  905. static void migration_failure(struct dm_cache_migration *mg)
  906. {
  907. struct cache *cache = mg->cache;
  908. const char *dev_name = cache_device_name(cache);
  909. if (mg->writeback) {
  910. DMERR_LIMIT("%s: writeback failed; couldn't copy block", dev_name);
  911. set_dirty(cache, mg->old_oblock, mg->cblock);
  912. cell_defer(cache, mg->old_ocell, false);
  913. } else if (mg->demote) {
  914. DMERR_LIMIT("%s: demotion failed; couldn't copy block", dev_name);
  915. policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
  916. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  917. if (mg->promote)
  918. cell_defer(cache, mg->new_ocell, true);
  919. } else {
  920. DMERR_LIMIT("%s: promotion failed; couldn't copy block", dev_name);
  921. policy_remove_mapping(cache->policy, mg->new_oblock);
  922. cell_defer(cache, mg->new_ocell, true);
  923. }
  924. free_io_migration(mg);
  925. }
  926. static void migration_success_pre_commit(struct dm_cache_migration *mg)
  927. {
  928. int r;
  929. unsigned long flags;
  930. struct cache *cache = mg->cache;
  931. if (mg->writeback) {
  932. clear_dirty(cache, mg->old_oblock, mg->cblock);
  933. cell_defer(cache, mg->old_ocell, false);
  934. free_io_migration(mg);
  935. return;
  936. } else if (mg->demote) {
  937. r = dm_cache_remove_mapping(cache->cmd, mg->cblock);
  938. if (r) {
  939. DMERR_LIMIT("%s: demotion failed; couldn't update on disk metadata",
  940. cache_device_name(cache));
  941. metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
  942. policy_force_mapping(cache->policy, mg->new_oblock,
  943. mg->old_oblock);
  944. if (mg->promote)
  945. cell_defer(cache, mg->new_ocell, true);
  946. free_io_migration(mg);
  947. return;
  948. }
  949. } else {
  950. r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock);
  951. if (r) {
  952. DMERR_LIMIT("%s: promotion failed; couldn't update on disk metadata",
  953. cache_device_name(cache));
  954. metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
  955. policy_remove_mapping(cache->policy, mg->new_oblock);
  956. free_io_migration(mg);
  957. return;
  958. }
  959. }
  960. spin_lock_irqsave(&cache->lock, flags);
  961. list_add_tail(&mg->list, &cache->need_commit_migrations);
  962. cache->commit_requested = true;
  963. spin_unlock_irqrestore(&cache->lock, flags);
  964. }
  965. static void migration_success_post_commit(struct dm_cache_migration *mg)
  966. {
  967. unsigned long flags;
  968. struct cache *cache = mg->cache;
  969. if (mg->writeback) {
  970. DMWARN_LIMIT("%s: writeback unexpectedly triggered commit",
  971. cache_device_name(cache));
  972. return;
  973. } else if (mg->demote) {
  974. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  975. if (mg->promote) {
  976. mg->demote = false;
  977. spin_lock_irqsave(&cache->lock, flags);
  978. list_add_tail(&mg->list, &cache->quiesced_migrations);
  979. spin_unlock_irqrestore(&cache->lock, flags);
  980. } else {
  981. if (mg->invalidate)
  982. policy_remove_mapping(cache->policy, mg->old_oblock);
  983. free_io_migration(mg);
  984. }
  985. } else {
  986. if (mg->requeue_holder) {
  987. clear_dirty(cache, mg->new_oblock, mg->cblock);
  988. cell_defer(cache, mg->new_ocell, true);
  989. } else {
  990. /*
  991. * The block was promoted via an overwrite, so it's dirty.
  992. */
  993. set_dirty(cache, mg->new_oblock, mg->cblock);
  994. bio_endio(mg->new_ocell->holder);
  995. cell_defer(cache, mg->new_ocell, false);
  996. }
  997. free_io_migration(mg);
  998. }
  999. }
  1000. static void copy_complete(int read_err, unsigned long write_err, void *context)
  1001. {
  1002. unsigned long flags;
  1003. struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
  1004. struct cache *cache = mg->cache;
  1005. if (read_err || write_err)
  1006. mg->err = true;
  1007. spin_lock_irqsave(&cache->lock, flags);
  1008. list_add_tail(&mg->list, &cache->completed_migrations);
  1009. spin_unlock_irqrestore(&cache->lock, flags);
  1010. wake_worker(cache);
  1011. }
  1012. static void issue_copy(struct dm_cache_migration *mg)
  1013. {
  1014. int r;
  1015. struct dm_io_region o_region, c_region;
  1016. struct cache *cache = mg->cache;
  1017. sector_t cblock = from_cblock(mg->cblock);
  1018. o_region.bdev = cache->origin_dev->bdev;
  1019. o_region.count = cache->sectors_per_block;
  1020. c_region.bdev = cache->cache_dev->bdev;
  1021. c_region.sector = cblock * cache->sectors_per_block;
  1022. c_region.count = cache->sectors_per_block;
  1023. if (mg->writeback || mg->demote) {
  1024. /* demote */
  1025. o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
  1026. r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
  1027. } else {
  1028. /* promote */
  1029. o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
  1030. r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
  1031. }
  1032. if (r < 0) {
  1033. DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache));
  1034. migration_failure(mg);
  1035. }
  1036. }
  1037. static void overwrite_endio(struct bio *bio)
  1038. {
  1039. struct dm_cache_migration *mg = bio->bi_private;
  1040. struct cache *cache = mg->cache;
  1041. size_t pb_data_size = get_per_bio_data_size(cache);
  1042. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1043. unsigned long flags;
  1044. dm_unhook_bio(&pb->hook_info, bio);
  1045. if (bio->bi_error)
  1046. mg->err = true;
  1047. mg->requeue_holder = false;
  1048. spin_lock_irqsave(&cache->lock, flags);
  1049. list_add_tail(&mg->list, &cache->completed_migrations);
  1050. spin_unlock_irqrestore(&cache->lock, flags);
  1051. wake_worker(cache);
  1052. }
  1053. static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
  1054. {
  1055. size_t pb_data_size = get_per_bio_data_size(mg->cache);
  1056. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1057. dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
  1058. remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
  1059. /*
  1060. * No need to inc_ds() here, since the cell will be held for the
  1061. * duration of the io.
  1062. */
  1063. accounted_request(mg->cache, bio);
  1064. }
  1065. static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
  1066. {
  1067. return (bio_data_dir(bio) == WRITE) &&
  1068. (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
  1069. }
  1070. static void avoid_copy(struct dm_cache_migration *mg)
  1071. {
  1072. atomic_inc(&mg->cache->stats.copies_avoided);
  1073. migration_success_pre_commit(mg);
  1074. }
  1075. static void calc_discard_block_range(struct cache *cache, struct bio *bio,
  1076. dm_dblock_t *b, dm_dblock_t *e)
  1077. {
  1078. sector_t sb = bio->bi_iter.bi_sector;
  1079. sector_t se = bio_end_sector(bio);
  1080. *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
  1081. if (se - sb < cache->discard_block_size)
  1082. *e = *b;
  1083. else
  1084. *e = to_dblock(block_div(se, cache->discard_block_size));
  1085. }
  1086. static void issue_discard(struct dm_cache_migration *mg)
  1087. {
  1088. dm_dblock_t b, e;
  1089. struct bio *bio = mg->new_ocell->holder;
  1090. struct cache *cache = mg->cache;
  1091. calc_discard_block_range(cache, bio, &b, &e);
  1092. while (b != e) {
  1093. set_discard(cache, b);
  1094. b = to_dblock(from_dblock(b) + 1);
  1095. }
  1096. bio_endio(bio);
  1097. cell_defer(cache, mg->new_ocell, false);
  1098. free_migration(mg);
  1099. wake_worker(cache);
  1100. }
  1101. static void issue_copy_or_discard(struct dm_cache_migration *mg)
  1102. {
  1103. bool avoid;
  1104. struct cache *cache = mg->cache;
  1105. if (mg->discard) {
  1106. issue_discard(mg);
  1107. return;
  1108. }
  1109. if (mg->writeback || mg->demote)
  1110. avoid = !is_dirty(cache, mg->cblock) ||
  1111. is_discarded_oblock(cache, mg->old_oblock);
  1112. else {
  1113. struct bio *bio = mg->new_ocell->holder;
  1114. avoid = is_discarded_oblock(cache, mg->new_oblock);
  1115. if (writeback_mode(&cache->features) &&
  1116. !avoid && bio_writes_complete_block(cache, bio)) {
  1117. issue_overwrite(mg, bio);
  1118. return;
  1119. }
  1120. }
  1121. avoid ? avoid_copy(mg) : issue_copy(mg);
  1122. }
  1123. static void complete_migration(struct dm_cache_migration *mg)
  1124. {
  1125. if (mg->err)
  1126. migration_failure(mg);
  1127. else
  1128. migration_success_pre_commit(mg);
  1129. }
  1130. static void process_migrations(struct cache *cache, struct list_head *head,
  1131. void (*fn)(struct dm_cache_migration *))
  1132. {
  1133. unsigned long flags;
  1134. struct list_head list;
  1135. struct dm_cache_migration *mg, *tmp;
  1136. INIT_LIST_HEAD(&list);
  1137. spin_lock_irqsave(&cache->lock, flags);
  1138. list_splice_init(head, &list);
  1139. spin_unlock_irqrestore(&cache->lock, flags);
  1140. list_for_each_entry_safe(mg, tmp, &list, list)
  1141. fn(mg);
  1142. }
  1143. static void __queue_quiesced_migration(struct dm_cache_migration *mg)
  1144. {
  1145. list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
  1146. }
  1147. static void queue_quiesced_migration(struct dm_cache_migration *mg)
  1148. {
  1149. unsigned long flags;
  1150. struct cache *cache = mg->cache;
  1151. spin_lock_irqsave(&cache->lock, flags);
  1152. __queue_quiesced_migration(mg);
  1153. spin_unlock_irqrestore(&cache->lock, flags);
  1154. wake_worker(cache);
  1155. }
  1156. static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
  1157. {
  1158. unsigned long flags;
  1159. struct dm_cache_migration *mg, *tmp;
  1160. spin_lock_irqsave(&cache->lock, flags);
  1161. list_for_each_entry_safe(mg, tmp, work, list)
  1162. __queue_quiesced_migration(mg);
  1163. spin_unlock_irqrestore(&cache->lock, flags);
  1164. wake_worker(cache);
  1165. }
  1166. static void check_for_quiesced_migrations(struct cache *cache,
  1167. struct per_bio_data *pb)
  1168. {
  1169. struct list_head work;
  1170. if (!pb->all_io_entry)
  1171. return;
  1172. INIT_LIST_HEAD(&work);
  1173. dm_deferred_entry_dec(pb->all_io_entry, &work);
  1174. if (!list_empty(&work))
  1175. queue_quiesced_migrations(cache, &work);
  1176. }
  1177. static void quiesce_migration(struct dm_cache_migration *mg)
  1178. {
  1179. if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
  1180. queue_quiesced_migration(mg);
  1181. }
  1182. static void promote(struct cache *cache, struct prealloc *structs,
  1183. dm_oblock_t oblock, dm_cblock_t cblock,
  1184. struct dm_bio_prison_cell *cell)
  1185. {
  1186. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1187. mg->err = false;
  1188. mg->discard = false;
  1189. mg->writeback = false;
  1190. mg->demote = false;
  1191. mg->promote = true;
  1192. mg->requeue_holder = true;
  1193. mg->invalidate = false;
  1194. mg->cache = cache;
  1195. mg->new_oblock = oblock;
  1196. mg->cblock = cblock;
  1197. mg->old_ocell = NULL;
  1198. mg->new_ocell = cell;
  1199. mg->start_jiffies = jiffies;
  1200. inc_io_migrations(cache);
  1201. quiesce_migration(mg);
  1202. }
  1203. static void writeback(struct cache *cache, struct prealloc *structs,
  1204. dm_oblock_t oblock, dm_cblock_t cblock,
  1205. struct dm_bio_prison_cell *cell)
  1206. {
  1207. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1208. mg->err = false;
  1209. mg->discard = false;
  1210. mg->writeback = true;
  1211. mg->demote = false;
  1212. mg->promote = false;
  1213. mg->requeue_holder = true;
  1214. mg->invalidate = false;
  1215. mg->cache = cache;
  1216. mg->old_oblock = oblock;
  1217. mg->cblock = cblock;
  1218. mg->old_ocell = cell;
  1219. mg->new_ocell = NULL;
  1220. mg->start_jiffies = jiffies;
  1221. inc_io_migrations(cache);
  1222. quiesce_migration(mg);
  1223. }
  1224. static void demote_then_promote(struct cache *cache, struct prealloc *structs,
  1225. dm_oblock_t old_oblock, dm_oblock_t new_oblock,
  1226. dm_cblock_t cblock,
  1227. struct dm_bio_prison_cell *old_ocell,
  1228. struct dm_bio_prison_cell *new_ocell)
  1229. {
  1230. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1231. mg->err = false;
  1232. mg->discard = false;
  1233. mg->writeback = false;
  1234. mg->demote = true;
  1235. mg->promote = true;
  1236. mg->requeue_holder = true;
  1237. mg->invalidate = false;
  1238. mg->cache = cache;
  1239. mg->old_oblock = old_oblock;
  1240. mg->new_oblock = new_oblock;
  1241. mg->cblock = cblock;
  1242. mg->old_ocell = old_ocell;
  1243. mg->new_ocell = new_ocell;
  1244. mg->start_jiffies = jiffies;
  1245. inc_io_migrations(cache);
  1246. quiesce_migration(mg);
  1247. }
  1248. /*
  1249. * Invalidate a cache entry. No writeback occurs; any changes in the cache
  1250. * block are thrown away.
  1251. */
  1252. static void invalidate(struct cache *cache, struct prealloc *structs,
  1253. dm_oblock_t oblock, dm_cblock_t cblock,
  1254. struct dm_bio_prison_cell *cell)
  1255. {
  1256. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1257. mg->err = false;
  1258. mg->discard = false;
  1259. mg->writeback = false;
  1260. mg->demote = true;
  1261. mg->promote = false;
  1262. mg->requeue_holder = true;
  1263. mg->invalidate = true;
  1264. mg->cache = cache;
  1265. mg->old_oblock = oblock;
  1266. mg->cblock = cblock;
  1267. mg->old_ocell = cell;
  1268. mg->new_ocell = NULL;
  1269. mg->start_jiffies = jiffies;
  1270. inc_io_migrations(cache);
  1271. quiesce_migration(mg);
  1272. }
  1273. static void discard(struct cache *cache, struct prealloc *structs,
  1274. struct dm_bio_prison_cell *cell)
  1275. {
  1276. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1277. mg->err = false;
  1278. mg->discard = true;
  1279. mg->writeback = false;
  1280. mg->demote = false;
  1281. mg->promote = false;
  1282. mg->requeue_holder = false;
  1283. mg->invalidate = false;
  1284. mg->cache = cache;
  1285. mg->old_ocell = NULL;
  1286. mg->new_ocell = cell;
  1287. mg->start_jiffies = jiffies;
  1288. quiesce_migration(mg);
  1289. }
  1290. /*----------------------------------------------------------------
  1291. * bio processing
  1292. *--------------------------------------------------------------*/
  1293. static void defer_bio(struct cache *cache, struct bio *bio)
  1294. {
  1295. unsigned long flags;
  1296. spin_lock_irqsave(&cache->lock, flags);
  1297. bio_list_add(&cache->deferred_bios, bio);
  1298. spin_unlock_irqrestore(&cache->lock, flags);
  1299. wake_worker(cache);
  1300. }
  1301. static void process_flush_bio(struct cache *cache, struct bio *bio)
  1302. {
  1303. size_t pb_data_size = get_per_bio_data_size(cache);
  1304. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1305. BUG_ON(bio->bi_iter.bi_size);
  1306. if (!pb->req_nr)
  1307. remap_to_origin(cache, bio);
  1308. else
  1309. remap_to_cache(cache, bio, 0);
  1310. /*
  1311. * REQ_FLUSH is not directed at any particular block so we don't
  1312. * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH
  1313. * by dm-core.
  1314. */
  1315. issue(cache, bio);
  1316. }
  1317. static void process_discard_bio(struct cache *cache, struct prealloc *structs,
  1318. struct bio *bio)
  1319. {
  1320. int r;
  1321. dm_dblock_t b, e;
  1322. struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
  1323. calc_discard_block_range(cache, bio, &b, &e);
  1324. if (b == e) {
  1325. bio_endio(bio);
  1326. return;
  1327. }
  1328. cell_prealloc = prealloc_get_cell(structs);
  1329. r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
  1330. (cell_free_fn) prealloc_put_cell,
  1331. structs, &new_ocell);
  1332. if (r > 0)
  1333. return;
  1334. discard(cache, structs, new_ocell);
  1335. }
  1336. static bool spare_migration_bandwidth(struct cache *cache)
  1337. {
  1338. sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
  1339. cache->sectors_per_block;
  1340. return current_volume < cache->migration_threshold;
  1341. }
  1342. static void inc_hit_counter(struct cache *cache, struct bio *bio)
  1343. {
  1344. atomic_inc(bio_data_dir(bio) == READ ?
  1345. &cache->stats.read_hit : &cache->stats.write_hit);
  1346. }
  1347. static void inc_miss_counter(struct cache *cache, struct bio *bio)
  1348. {
  1349. atomic_inc(bio_data_dir(bio) == READ ?
  1350. &cache->stats.read_miss : &cache->stats.write_miss);
  1351. }
  1352. /*----------------------------------------------------------------*/
  1353. struct inc_detail {
  1354. struct cache *cache;
  1355. struct bio_list bios_for_issue;
  1356. struct bio_list unhandled_bios;
  1357. bool any_writes;
  1358. };
  1359. static void inc_fn(void *context, struct dm_bio_prison_cell *cell)
  1360. {
  1361. struct bio *bio;
  1362. struct inc_detail *detail = context;
  1363. struct cache *cache = detail->cache;
  1364. inc_ds(cache, cell->holder, cell);
  1365. if (bio_data_dir(cell->holder) == WRITE)
  1366. detail->any_writes = true;
  1367. while ((bio = bio_list_pop(&cell->bios))) {
  1368. if (discard_or_flush(bio)) {
  1369. bio_list_add(&detail->unhandled_bios, bio);
  1370. continue;
  1371. }
  1372. if (bio_data_dir(bio) == WRITE)
  1373. detail->any_writes = true;
  1374. bio_list_add(&detail->bios_for_issue, bio);
  1375. inc_ds(cache, bio, cell);
  1376. }
  1377. }
  1378. // FIXME: refactor these two
  1379. static void remap_cell_to_origin_clear_discard(struct cache *cache,
  1380. struct dm_bio_prison_cell *cell,
  1381. dm_oblock_t oblock, bool issue_holder)
  1382. {
  1383. struct bio *bio;
  1384. unsigned long flags;
  1385. struct inc_detail detail;
  1386. detail.cache = cache;
  1387. bio_list_init(&detail.bios_for_issue);
  1388. bio_list_init(&detail.unhandled_bios);
  1389. detail.any_writes = false;
  1390. spin_lock_irqsave(&cache->lock, flags);
  1391. dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
  1392. bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
  1393. spin_unlock_irqrestore(&cache->lock, flags);
  1394. remap_to_origin(cache, cell->holder);
  1395. if (issue_holder)
  1396. issue(cache, cell->holder);
  1397. else
  1398. accounted_begin(cache, cell->holder);
  1399. if (detail.any_writes)
  1400. clear_discard(cache, oblock_to_dblock(cache, oblock));
  1401. while ((bio = bio_list_pop(&detail.bios_for_issue))) {
  1402. remap_to_origin(cache, bio);
  1403. issue(cache, bio);
  1404. }
  1405. free_prison_cell(cache, cell);
  1406. }
  1407. static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
  1408. dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder)
  1409. {
  1410. struct bio *bio;
  1411. unsigned long flags;
  1412. struct inc_detail detail;
  1413. detail.cache = cache;
  1414. bio_list_init(&detail.bios_for_issue);
  1415. bio_list_init(&detail.unhandled_bios);
  1416. detail.any_writes = false;
  1417. spin_lock_irqsave(&cache->lock, flags);
  1418. dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
  1419. bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
  1420. spin_unlock_irqrestore(&cache->lock, flags);
  1421. remap_to_cache(cache, cell->holder, cblock);
  1422. if (issue_holder)
  1423. issue(cache, cell->holder);
  1424. else
  1425. accounted_begin(cache, cell->holder);
  1426. if (detail.any_writes) {
  1427. set_dirty(cache, oblock, cblock);
  1428. clear_discard(cache, oblock_to_dblock(cache, oblock));
  1429. }
  1430. while ((bio = bio_list_pop(&detail.bios_for_issue))) {
  1431. remap_to_cache(cache, bio, cblock);
  1432. issue(cache, bio);
  1433. }
  1434. free_prison_cell(cache, cell);
  1435. }
  1436. /*----------------------------------------------------------------*/
  1437. struct old_oblock_lock {
  1438. struct policy_locker locker;
  1439. struct cache *cache;
  1440. struct prealloc *structs;
  1441. struct dm_bio_prison_cell *cell;
  1442. };
  1443. static int null_locker(struct policy_locker *locker, dm_oblock_t b)
  1444. {
  1445. /* This should never be called */
  1446. BUG();
  1447. return 0;
  1448. }
  1449. static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
  1450. {
  1451. struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
  1452. struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
  1453. return bio_detain(l->cache, b, NULL, cell_prealloc,
  1454. (cell_free_fn) prealloc_put_cell,
  1455. l->structs, &l->cell);
  1456. }
  1457. static void process_cell(struct cache *cache, struct prealloc *structs,
  1458. struct dm_bio_prison_cell *new_ocell)
  1459. {
  1460. int r;
  1461. bool release_cell = true;
  1462. struct bio *bio = new_ocell->holder;
  1463. dm_oblock_t block = get_bio_block(cache, bio);
  1464. struct policy_result lookup_result;
  1465. bool passthrough = passthrough_mode(&cache->features);
  1466. bool fast_promotion, can_migrate;
  1467. struct old_oblock_lock ool;
  1468. fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
  1469. can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache));
  1470. ool.locker.fn = cell_locker;
  1471. ool.cache = cache;
  1472. ool.structs = structs;
  1473. ool.cell = NULL;
  1474. r = policy_map(cache->policy, block, true, can_migrate, fast_promotion,
  1475. bio, &ool.locker, &lookup_result);
  1476. if (r == -EWOULDBLOCK)
  1477. /* migration has been denied */
  1478. lookup_result.op = POLICY_MISS;
  1479. switch (lookup_result.op) {
  1480. case POLICY_HIT:
  1481. if (passthrough) {
  1482. inc_miss_counter(cache, bio);
  1483. /*
  1484. * Passthrough always maps to the origin,
  1485. * invalidating any cache blocks that are written
  1486. * to.
  1487. */
  1488. if (bio_data_dir(bio) == WRITE) {
  1489. atomic_inc(&cache->stats.demotion);
  1490. invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
  1491. release_cell = false;
  1492. } else {
  1493. /* FIXME: factor out issue_origin() */
  1494. remap_to_origin_clear_discard(cache, bio, block);
  1495. inc_and_issue(cache, bio, new_ocell);
  1496. }
  1497. } else {
  1498. inc_hit_counter(cache, bio);
  1499. if (bio_data_dir(bio) == WRITE &&
  1500. writethrough_mode(&cache->features) &&
  1501. !is_dirty(cache, lookup_result.cblock)) {
  1502. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  1503. inc_and_issue(cache, bio, new_ocell);
  1504. } else {
  1505. remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true);
  1506. release_cell = false;
  1507. }
  1508. }
  1509. break;
  1510. case POLICY_MISS:
  1511. inc_miss_counter(cache, bio);
  1512. remap_cell_to_origin_clear_discard(cache, new_ocell, block, true);
  1513. release_cell = false;
  1514. break;
  1515. case POLICY_NEW:
  1516. atomic_inc(&cache->stats.promotion);
  1517. promote(cache, structs, block, lookup_result.cblock, new_ocell);
  1518. release_cell = false;
  1519. break;
  1520. case POLICY_REPLACE:
  1521. atomic_inc(&cache->stats.demotion);
  1522. atomic_inc(&cache->stats.promotion);
  1523. demote_then_promote(cache, structs, lookup_result.old_oblock,
  1524. block, lookup_result.cblock,
  1525. ool.cell, new_ocell);
  1526. release_cell = false;
  1527. break;
  1528. default:
  1529. DMERR_LIMIT("%s: %s: erroring bio, unknown policy op: %u",
  1530. cache_device_name(cache), __func__,
  1531. (unsigned) lookup_result.op);
  1532. bio_io_error(bio);
  1533. }
  1534. if (release_cell)
  1535. cell_defer(cache, new_ocell, false);
  1536. }
  1537. static void process_bio(struct cache *cache, struct prealloc *structs,
  1538. struct bio *bio)
  1539. {
  1540. int r;
  1541. dm_oblock_t block = get_bio_block(cache, bio);
  1542. struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
  1543. /*
  1544. * Check to see if that block is currently migrating.
  1545. */
  1546. cell_prealloc = prealloc_get_cell(structs);
  1547. r = bio_detain(cache, block, bio, cell_prealloc,
  1548. (cell_free_fn) prealloc_put_cell,
  1549. structs, &new_ocell);
  1550. if (r > 0)
  1551. return;
  1552. process_cell(cache, structs, new_ocell);
  1553. }
  1554. static int need_commit_due_to_time(struct cache *cache)
  1555. {
  1556. return jiffies < cache->last_commit_jiffies ||
  1557. jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
  1558. }
  1559. /*
  1560. * A non-zero return indicates read_only or fail_io mode.
  1561. */
  1562. static int commit(struct cache *cache, bool clean_shutdown)
  1563. {
  1564. int r;
  1565. if (get_cache_mode(cache) >= CM_READ_ONLY)
  1566. return -EINVAL;
  1567. atomic_inc(&cache->stats.commit_count);
  1568. r = dm_cache_commit(cache->cmd, clean_shutdown);
  1569. if (r)
  1570. metadata_operation_failed(cache, "dm_cache_commit", r);
  1571. return r;
  1572. }
  1573. static int commit_if_needed(struct cache *cache)
  1574. {
  1575. int r = 0;
  1576. if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
  1577. dm_cache_changed_this_transaction(cache->cmd)) {
  1578. r = commit(cache, false);
  1579. cache->commit_requested = false;
  1580. cache->last_commit_jiffies = jiffies;
  1581. }
  1582. return r;
  1583. }
  1584. static void process_deferred_bios(struct cache *cache)
  1585. {
  1586. bool prealloc_used = false;
  1587. unsigned long flags;
  1588. struct bio_list bios;
  1589. struct bio *bio;
  1590. struct prealloc structs;
  1591. memset(&structs, 0, sizeof(structs));
  1592. bio_list_init(&bios);
  1593. spin_lock_irqsave(&cache->lock, flags);
  1594. bio_list_merge(&bios, &cache->deferred_bios);
  1595. bio_list_init(&cache->deferred_bios);
  1596. spin_unlock_irqrestore(&cache->lock, flags);
  1597. while (!bio_list_empty(&bios)) {
  1598. /*
  1599. * If we've got no free migration structs, and processing
  1600. * this bio might require one, we pause until there are some
  1601. * prepared mappings to process.
  1602. */
  1603. prealloc_used = true;
  1604. if (prealloc_data_structs(cache, &structs)) {
  1605. spin_lock_irqsave(&cache->lock, flags);
  1606. bio_list_merge(&cache->deferred_bios, &bios);
  1607. spin_unlock_irqrestore(&cache->lock, flags);
  1608. break;
  1609. }
  1610. bio = bio_list_pop(&bios);
  1611. if (bio->bi_rw & REQ_FLUSH)
  1612. process_flush_bio(cache, bio);
  1613. else if (bio->bi_rw & REQ_DISCARD)
  1614. process_discard_bio(cache, &structs, bio);
  1615. else
  1616. process_bio(cache, &structs, bio);
  1617. }
  1618. if (prealloc_used)
  1619. prealloc_free_structs(cache, &structs);
  1620. }
  1621. static void process_deferred_cells(struct cache *cache)
  1622. {
  1623. bool prealloc_used = false;
  1624. unsigned long flags;
  1625. struct dm_bio_prison_cell *cell, *tmp;
  1626. struct list_head cells;
  1627. struct prealloc structs;
  1628. memset(&structs, 0, sizeof(structs));
  1629. INIT_LIST_HEAD(&cells);
  1630. spin_lock_irqsave(&cache->lock, flags);
  1631. list_splice_init(&cache->deferred_cells, &cells);
  1632. spin_unlock_irqrestore(&cache->lock, flags);
  1633. list_for_each_entry_safe(cell, tmp, &cells, user_list) {
  1634. /*
  1635. * If we've got no free migration structs, and processing
  1636. * this bio might require one, we pause until there are some
  1637. * prepared mappings to process.
  1638. */
  1639. prealloc_used = true;
  1640. if (prealloc_data_structs(cache, &structs)) {
  1641. spin_lock_irqsave(&cache->lock, flags);
  1642. list_splice(&cells, &cache->deferred_cells);
  1643. spin_unlock_irqrestore(&cache->lock, flags);
  1644. break;
  1645. }
  1646. process_cell(cache, &structs, cell);
  1647. }
  1648. if (prealloc_used)
  1649. prealloc_free_structs(cache, &structs);
  1650. }
  1651. static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
  1652. {
  1653. unsigned long flags;
  1654. struct bio_list bios;
  1655. struct bio *bio;
  1656. bio_list_init(&bios);
  1657. spin_lock_irqsave(&cache->lock, flags);
  1658. bio_list_merge(&bios, &cache->deferred_flush_bios);
  1659. bio_list_init(&cache->deferred_flush_bios);
  1660. spin_unlock_irqrestore(&cache->lock, flags);
  1661. /*
  1662. * These bios have already been through inc_ds()
  1663. */
  1664. while ((bio = bio_list_pop(&bios)))
  1665. submit_bios ? accounted_request(cache, bio) : bio_io_error(bio);
  1666. }
  1667. static void process_deferred_writethrough_bios(struct cache *cache)
  1668. {
  1669. unsigned long flags;
  1670. struct bio_list bios;
  1671. struct bio *bio;
  1672. bio_list_init(&bios);
  1673. spin_lock_irqsave(&cache->lock, flags);
  1674. bio_list_merge(&bios, &cache->deferred_writethrough_bios);
  1675. bio_list_init(&cache->deferred_writethrough_bios);
  1676. spin_unlock_irqrestore(&cache->lock, flags);
  1677. /*
  1678. * These bios have already been through inc_ds()
  1679. */
  1680. while ((bio = bio_list_pop(&bios)))
  1681. accounted_request(cache, bio);
  1682. }
  1683. static void writeback_some_dirty_blocks(struct cache *cache)
  1684. {
  1685. bool prealloc_used = false;
  1686. dm_oblock_t oblock;
  1687. dm_cblock_t cblock;
  1688. struct prealloc structs;
  1689. struct dm_bio_prison_cell *old_ocell;
  1690. bool busy = !iot_idle_for(&cache->origin_tracker, HZ);
  1691. memset(&structs, 0, sizeof(structs));
  1692. while (spare_migration_bandwidth(cache)) {
  1693. if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
  1694. break; /* no work to do */
  1695. prealloc_used = true;
  1696. if (prealloc_data_structs(cache, &structs) ||
  1697. get_cell(cache, oblock, &structs, &old_ocell)) {
  1698. policy_set_dirty(cache->policy, oblock);
  1699. break;
  1700. }
  1701. writeback(cache, &structs, oblock, cblock, old_ocell);
  1702. }
  1703. if (prealloc_used)
  1704. prealloc_free_structs(cache, &structs);
  1705. }
  1706. /*----------------------------------------------------------------
  1707. * Invalidations.
  1708. * Dropping something from the cache *without* writing back.
  1709. *--------------------------------------------------------------*/
  1710. static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
  1711. {
  1712. int r = 0;
  1713. uint64_t begin = from_cblock(req->cblocks->begin);
  1714. uint64_t end = from_cblock(req->cblocks->end);
  1715. while (begin != end) {
  1716. r = policy_remove_cblock(cache->policy, to_cblock(begin));
  1717. if (!r) {
  1718. r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
  1719. if (r) {
  1720. metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
  1721. break;
  1722. }
  1723. } else if (r == -ENODATA) {
  1724. /* harmless, already unmapped */
  1725. r = 0;
  1726. } else {
  1727. DMERR("%s: policy_remove_cblock failed", cache_device_name(cache));
  1728. break;
  1729. }
  1730. begin++;
  1731. }
  1732. cache->commit_requested = true;
  1733. req->err = r;
  1734. atomic_set(&req->complete, 1);
  1735. wake_up(&req->result_wait);
  1736. }
  1737. static void process_invalidation_requests(struct cache *cache)
  1738. {
  1739. struct list_head list;
  1740. struct invalidation_request *req, *tmp;
  1741. INIT_LIST_HEAD(&list);
  1742. spin_lock(&cache->invalidation_lock);
  1743. list_splice_init(&cache->invalidation_requests, &list);
  1744. spin_unlock(&cache->invalidation_lock);
  1745. list_for_each_entry_safe (req, tmp, &list, list)
  1746. process_invalidation_request(cache, req);
  1747. }
  1748. /*----------------------------------------------------------------
  1749. * Main worker loop
  1750. *--------------------------------------------------------------*/
  1751. static bool is_quiescing(struct cache *cache)
  1752. {
  1753. return atomic_read(&cache->quiescing);
  1754. }
  1755. static void ack_quiescing(struct cache *cache)
  1756. {
  1757. if (is_quiescing(cache)) {
  1758. atomic_inc(&cache->quiescing_ack);
  1759. wake_up(&cache->quiescing_wait);
  1760. }
  1761. }
  1762. static void wait_for_quiescing_ack(struct cache *cache)
  1763. {
  1764. wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
  1765. }
  1766. static void start_quiescing(struct cache *cache)
  1767. {
  1768. atomic_inc(&cache->quiescing);
  1769. wait_for_quiescing_ack(cache);
  1770. }
  1771. static void stop_quiescing(struct cache *cache)
  1772. {
  1773. atomic_set(&cache->quiescing, 0);
  1774. atomic_set(&cache->quiescing_ack, 0);
  1775. }
  1776. static void wait_for_migrations(struct cache *cache)
  1777. {
  1778. wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
  1779. }
  1780. static void stop_worker(struct cache *cache)
  1781. {
  1782. cancel_delayed_work(&cache->waker);
  1783. flush_workqueue(cache->wq);
  1784. }
  1785. static void requeue_deferred_cells(struct cache *cache)
  1786. {
  1787. unsigned long flags;
  1788. struct list_head cells;
  1789. struct dm_bio_prison_cell *cell, *tmp;
  1790. INIT_LIST_HEAD(&cells);
  1791. spin_lock_irqsave(&cache->lock, flags);
  1792. list_splice_init(&cache->deferred_cells, &cells);
  1793. spin_unlock_irqrestore(&cache->lock, flags);
  1794. list_for_each_entry_safe(cell, tmp, &cells, user_list)
  1795. cell_requeue(cache, cell);
  1796. }
  1797. static void requeue_deferred_bios(struct cache *cache)
  1798. {
  1799. struct bio *bio;
  1800. struct bio_list bios;
  1801. bio_list_init(&bios);
  1802. bio_list_merge(&bios, &cache->deferred_bios);
  1803. bio_list_init(&cache->deferred_bios);
  1804. while ((bio = bio_list_pop(&bios))) {
  1805. bio->bi_error = DM_ENDIO_REQUEUE;
  1806. bio_endio(bio);
  1807. }
  1808. }
  1809. static int more_work(struct cache *cache)
  1810. {
  1811. if (is_quiescing(cache))
  1812. return !list_empty(&cache->quiesced_migrations) ||
  1813. !list_empty(&cache->completed_migrations) ||
  1814. !list_empty(&cache->need_commit_migrations);
  1815. else
  1816. return !bio_list_empty(&cache->deferred_bios) ||
  1817. !list_empty(&cache->deferred_cells) ||
  1818. !bio_list_empty(&cache->deferred_flush_bios) ||
  1819. !bio_list_empty(&cache->deferred_writethrough_bios) ||
  1820. !list_empty(&cache->quiesced_migrations) ||
  1821. !list_empty(&cache->completed_migrations) ||
  1822. !list_empty(&cache->need_commit_migrations) ||
  1823. cache->invalidate;
  1824. }
  1825. static void do_worker(struct work_struct *ws)
  1826. {
  1827. struct cache *cache = container_of(ws, struct cache, worker);
  1828. do {
  1829. if (!is_quiescing(cache)) {
  1830. writeback_some_dirty_blocks(cache);
  1831. process_deferred_writethrough_bios(cache);
  1832. process_deferred_bios(cache);
  1833. process_deferred_cells(cache);
  1834. process_invalidation_requests(cache);
  1835. }
  1836. process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard);
  1837. process_migrations(cache, &cache->completed_migrations, complete_migration);
  1838. if (commit_if_needed(cache)) {
  1839. process_deferred_flush_bios(cache, false);
  1840. process_migrations(cache, &cache->need_commit_migrations, migration_failure);
  1841. } else {
  1842. process_deferred_flush_bios(cache, true);
  1843. process_migrations(cache, &cache->need_commit_migrations,
  1844. migration_success_post_commit);
  1845. }
  1846. ack_quiescing(cache);
  1847. } while (more_work(cache));
  1848. }
  1849. /*
  1850. * We want to commit periodically so that not too much
  1851. * unwritten metadata builds up.
  1852. */
  1853. static void do_waker(struct work_struct *ws)
  1854. {
  1855. struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
  1856. policy_tick(cache->policy, true);
  1857. wake_worker(cache);
  1858. queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
  1859. }
  1860. /*----------------------------------------------------------------*/
  1861. static int is_congested(struct dm_dev *dev, int bdi_bits)
  1862. {
  1863. struct request_queue *q = bdev_get_queue(dev->bdev);
  1864. return bdi_congested(&q->backing_dev_info, bdi_bits);
  1865. }
  1866. static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
  1867. {
  1868. struct cache *cache = container_of(cb, struct cache, callbacks);
  1869. return is_congested(cache->origin_dev, bdi_bits) ||
  1870. is_congested(cache->cache_dev, bdi_bits);
  1871. }
  1872. /*----------------------------------------------------------------
  1873. * Target methods
  1874. *--------------------------------------------------------------*/
  1875. /*
  1876. * This function gets called on the error paths of the constructor, so we
  1877. * have to cope with a partially initialised struct.
  1878. */
  1879. static void destroy(struct cache *cache)
  1880. {
  1881. unsigned i;
  1882. mempool_destroy(cache->migration_pool);
  1883. if (cache->all_io_ds)
  1884. dm_deferred_set_destroy(cache->all_io_ds);
  1885. if (cache->prison)
  1886. dm_bio_prison_destroy(cache->prison);
  1887. if (cache->wq)
  1888. destroy_workqueue(cache->wq);
  1889. if (cache->dirty_bitset)
  1890. free_bitset(cache->dirty_bitset);
  1891. if (cache->discard_bitset)
  1892. free_bitset(cache->discard_bitset);
  1893. if (cache->copier)
  1894. dm_kcopyd_client_destroy(cache->copier);
  1895. if (cache->cmd)
  1896. dm_cache_metadata_close(cache->cmd);
  1897. if (cache->metadata_dev)
  1898. dm_put_device(cache->ti, cache->metadata_dev);
  1899. if (cache->origin_dev)
  1900. dm_put_device(cache->ti, cache->origin_dev);
  1901. if (cache->cache_dev)
  1902. dm_put_device(cache->ti, cache->cache_dev);
  1903. if (cache->policy)
  1904. dm_cache_policy_destroy(cache->policy);
  1905. for (i = 0; i < cache->nr_ctr_args ; i++)
  1906. kfree(cache->ctr_args[i]);
  1907. kfree(cache->ctr_args);
  1908. kfree(cache);
  1909. }
  1910. static void cache_dtr(struct dm_target *ti)
  1911. {
  1912. struct cache *cache = ti->private;
  1913. destroy(cache);
  1914. }
  1915. static sector_t get_dev_size(struct dm_dev *dev)
  1916. {
  1917. return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
  1918. }
  1919. /*----------------------------------------------------------------*/
  1920. /*
  1921. * Construct a cache device mapping.
  1922. *
  1923. * cache <metadata dev> <cache dev> <origin dev> <block size>
  1924. * <#feature args> [<feature arg>]*
  1925. * <policy> <#policy args> [<policy arg>]*
  1926. *
  1927. * metadata dev : fast device holding the persistent metadata
  1928. * cache dev : fast device holding cached data blocks
  1929. * origin dev : slow device holding original data blocks
  1930. * block size : cache unit size in sectors
  1931. *
  1932. * #feature args : number of feature arguments passed
  1933. * feature args : writethrough. (The default is writeback.)
  1934. *
  1935. * policy : the replacement policy to use
  1936. * #policy args : an even number of policy arguments corresponding
  1937. * to key/value pairs passed to the policy
  1938. * policy args : key/value pairs passed to the policy
  1939. * E.g. 'sequential_threshold 1024'
  1940. * See cache-policies.txt for details.
  1941. *
  1942. * Optional feature arguments are:
  1943. * writethrough : write through caching that prohibits cache block
  1944. * content from being different from origin block content.
  1945. * Without this argument, the default behaviour is to write
  1946. * back cache block contents later for performance reasons,
  1947. * so they may differ from the corresponding origin blocks.
  1948. */
  1949. struct cache_args {
  1950. struct dm_target *ti;
  1951. struct dm_dev *metadata_dev;
  1952. struct dm_dev *cache_dev;
  1953. sector_t cache_sectors;
  1954. struct dm_dev *origin_dev;
  1955. sector_t origin_sectors;
  1956. uint32_t block_size;
  1957. const char *policy_name;
  1958. int policy_argc;
  1959. const char **policy_argv;
  1960. struct cache_features features;
  1961. };
  1962. static void destroy_cache_args(struct cache_args *ca)
  1963. {
  1964. if (ca->metadata_dev)
  1965. dm_put_device(ca->ti, ca->metadata_dev);
  1966. if (ca->cache_dev)
  1967. dm_put_device(ca->ti, ca->cache_dev);
  1968. if (ca->origin_dev)
  1969. dm_put_device(ca->ti, ca->origin_dev);
  1970. kfree(ca);
  1971. }
  1972. static bool at_least_one_arg(struct dm_arg_set *as, char **error)
  1973. {
  1974. if (!as->argc) {
  1975. *error = "Insufficient args";
  1976. return false;
  1977. }
  1978. return true;
  1979. }
  1980. static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
  1981. char **error)
  1982. {
  1983. int r;
  1984. sector_t metadata_dev_size;
  1985. char b[BDEVNAME_SIZE];
  1986. if (!at_least_one_arg(as, error))
  1987. return -EINVAL;
  1988. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1989. &ca->metadata_dev);
  1990. if (r) {
  1991. *error = "Error opening metadata device";
  1992. return r;
  1993. }
  1994. metadata_dev_size = get_dev_size(ca->metadata_dev);
  1995. if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
  1996. DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
  1997. bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
  1998. return 0;
  1999. }
  2000. static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
  2001. char **error)
  2002. {
  2003. int r;
  2004. if (!at_least_one_arg(as, error))
  2005. return -EINVAL;
  2006. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  2007. &ca->cache_dev);
  2008. if (r) {
  2009. *error = "Error opening cache device";
  2010. return r;
  2011. }
  2012. ca->cache_sectors = get_dev_size(ca->cache_dev);
  2013. return 0;
  2014. }
  2015. static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
  2016. char **error)
  2017. {
  2018. int r;
  2019. if (!at_least_one_arg(as, error))
  2020. return -EINVAL;
  2021. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  2022. &ca->origin_dev);
  2023. if (r) {
  2024. *error = "Error opening origin device";
  2025. return r;
  2026. }
  2027. ca->origin_sectors = get_dev_size(ca->origin_dev);
  2028. if (ca->ti->len > ca->origin_sectors) {
  2029. *error = "Device size larger than cached device";
  2030. return -EINVAL;
  2031. }
  2032. return 0;
  2033. }
  2034. static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
  2035. char **error)
  2036. {
  2037. unsigned long block_size;
  2038. if (!at_least_one_arg(as, error))
  2039. return -EINVAL;
  2040. if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
  2041. block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
  2042. block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
  2043. block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
  2044. *error = "Invalid data block size";
  2045. return -EINVAL;
  2046. }
  2047. if (block_size > ca->cache_sectors) {
  2048. *error = "Data block size is larger than the cache device";
  2049. return -EINVAL;
  2050. }
  2051. ca->block_size = block_size;
  2052. return 0;
  2053. }
  2054. static void init_features(struct cache_features *cf)
  2055. {
  2056. cf->mode = CM_WRITE;
  2057. cf->io_mode = CM_IO_WRITEBACK;
  2058. }
  2059. static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
  2060. char **error)
  2061. {
  2062. static struct dm_arg _args[] = {
  2063. {0, 1, "Invalid number of cache feature arguments"},
  2064. };
  2065. int r;
  2066. unsigned argc;
  2067. const char *arg;
  2068. struct cache_features *cf = &ca->features;
  2069. init_features(cf);
  2070. r = dm_read_arg_group(_args, as, &argc, error);
  2071. if (r)
  2072. return -EINVAL;
  2073. while (argc--) {
  2074. arg = dm_shift_arg(as);
  2075. if (!strcasecmp(arg, "writeback"))
  2076. cf->io_mode = CM_IO_WRITEBACK;
  2077. else if (!strcasecmp(arg, "writethrough"))
  2078. cf->io_mode = CM_IO_WRITETHROUGH;
  2079. else if (!strcasecmp(arg, "passthrough"))
  2080. cf->io_mode = CM_IO_PASSTHROUGH;
  2081. else {
  2082. *error = "Unrecognised cache feature requested";
  2083. return -EINVAL;
  2084. }
  2085. }
  2086. return 0;
  2087. }
  2088. static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
  2089. char **error)
  2090. {
  2091. static struct dm_arg _args[] = {
  2092. {0, 1024, "Invalid number of policy arguments"},
  2093. };
  2094. int r;
  2095. if (!at_least_one_arg(as, error))
  2096. return -EINVAL;
  2097. ca->policy_name = dm_shift_arg(as);
  2098. r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
  2099. if (r)
  2100. return -EINVAL;
  2101. ca->policy_argv = (const char **)as->argv;
  2102. dm_consume_args(as, ca->policy_argc);
  2103. return 0;
  2104. }
  2105. static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
  2106. char **error)
  2107. {
  2108. int r;
  2109. struct dm_arg_set as;
  2110. as.argc = argc;
  2111. as.argv = argv;
  2112. r = parse_metadata_dev(ca, &as, error);
  2113. if (r)
  2114. return r;
  2115. r = parse_cache_dev(ca, &as, error);
  2116. if (r)
  2117. return r;
  2118. r = parse_origin_dev(ca, &as, error);
  2119. if (r)
  2120. return r;
  2121. r = parse_block_size(ca, &as, error);
  2122. if (r)
  2123. return r;
  2124. r = parse_features(ca, &as, error);
  2125. if (r)
  2126. return r;
  2127. r = parse_policy(ca, &as, error);
  2128. if (r)
  2129. return r;
  2130. return 0;
  2131. }
  2132. /*----------------------------------------------------------------*/
  2133. static struct kmem_cache *migration_cache;
  2134. #define NOT_CORE_OPTION 1
  2135. static int process_config_option(struct cache *cache, const char *key, const char *value)
  2136. {
  2137. unsigned long tmp;
  2138. if (!strcasecmp(key, "migration_threshold")) {
  2139. if (kstrtoul(value, 10, &tmp))
  2140. return -EINVAL;
  2141. cache->migration_threshold = tmp;
  2142. return 0;
  2143. }
  2144. return NOT_CORE_OPTION;
  2145. }
  2146. static int set_config_value(struct cache *cache, const char *key, const char *value)
  2147. {
  2148. int r = process_config_option(cache, key, value);
  2149. if (r == NOT_CORE_OPTION)
  2150. r = policy_set_config_value(cache->policy, key, value);
  2151. if (r)
  2152. DMWARN("bad config value for %s: %s", key, value);
  2153. return r;
  2154. }
  2155. static int set_config_values(struct cache *cache, int argc, const char **argv)
  2156. {
  2157. int r = 0;
  2158. if (argc & 1) {
  2159. DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
  2160. return -EINVAL;
  2161. }
  2162. while (argc) {
  2163. r = set_config_value(cache, argv[0], argv[1]);
  2164. if (r)
  2165. break;
  2166. argc -= 2;
  2167. argv += 2;
  2168. }
  2169. return r;
  2170. }
  2171. static int create_cache_policy(struct cache *cache, struct cache_args *ca,
  2172. char **error)
  2173. {
  2174. struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
  2175. cache->cache_size,
  2176. cache->origin_sectors,
  2177. cache->sectors_per_block);
  2178. if (IS_ERR(p)) {
  2179. *error = "Error creating cache's policy";
  2180. return PTR_ERR(p);
  2181. }
  2182. cache->policy = p;
  2183. return 0;
  2184. }
  2185. /*
  2186. * We want the discard block size to be at least the size of the cache
  2187. * block size and have no more than 2^14 discard blocks across the origin.
  2188. */
  2189. #define MAX_DISCARD_BLOCKS (1 << 14)
  2190. static bool too_many_discard_blocks(sector_t discard_block_size,
  2191. sector_t origin_size)
  2192. {
  2193. (void) sector_div(origin_size, discard_block_size);
  2194. return origin_size > MAX_DISCARD_BLOCKS;
  2195. }
  2196. static sector_t calculate_discard_block_size(sector_t cache_block_size,
  2197. sector_t origin_size)
  2198. {
  2199. sector_t discard_block_size = cache_block_size;
  2200. if (origin_size)
  2201. while (too_many_discard_blocks(discard_block_size, origin_size))
  2202. discard_block_size *= 2;
  2203. return discard_block_size;
  2204. }
  2205. static void set_cache_size(struct cache *cache, dm_cblock_t size)
  2206. {
  2207. dm_block_t nr_blocks = from_cblock(size);
  2208. if (nr_blocks > (1 << 20) && cache->cache_size != size)
  2209. DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
  2210. "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
  2211. "Please consider increasing the cache block size to reduce the overall cache block count.",
  2212. (unsigned long long) nr_blocks);
  2213. cache->cache_size = size;
  2214. }
  2215. #define DEFAULT_MIGRATION_THRESHOLD 2048
  2216. static int cache_create(struct cache_args *ca, struct cache **result)
  2217. {
  2218. int r = 0;
  2219. char **error = &ca->ti->error;
  2220. struct cache *cache;
  2221. struct dm_target *ti = ca->ti;
  2222. dm_block_t origin_blocks;
  2223. struct dm_cache_metadata *cmd;
  2224. bool may_format = ca->features.mode == CM_WRITE;
  2225. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  2226. if (!cache)
  2227. return -ENOMEM;
  2228. cache->ti = ca->ti;
  2229. ti->private = cache;
  2230. ti->num_flush_bios = 2;
  2231. ti->flush_supported = true;
  2232. ti->num_discard_bios = 1;
  2233. ti->discards_supported = true;
  2234. ti->discard_zeroes_data_unsupported = true;
  2235. ti->split_discard_bios = false;
  2236. cache->features = ca->features;
  2237. ti->per_bio_data_size = get_per_bio_data_size(cache);
  2238. cache->callbacks.congested_fn = cache_is_congested;
  2239. dm_table_add_target_callbacks(ti->table, &cache->callbacks);
  2240. cache->metadata_dev = ca->metadata_dev;
  2241. cache->origin_dev = ca->origin_dev;
  2242. cache->cache_dev = ca->cache_dev;
  2243. ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
  2244. /* FIXME: factor out this whole section */
  2245. origin_blocks = cache->origin_sectors = ca->origin_sectors;
  2246. origin_blocks = block_div(origin_blocks, ca->block_size);
  2247. cache->origin_blocks = to_oblock(origin_blocks);
  2248. cache->sectors_per_block = ca->block_size;
  2249. if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
  2250. r = -EINVAL;
  2251. goto bad;
  2252. }
  2253. if (ca->block_size & (ca->block_size - 1)) {
  2254. dm_block_t cache_size = ca->cache_sectors;
  2255. cache->sectors_per_block_shift = -1;
  2256. cache_size = block_div(cache_size, ca->block_size);
  2257. set_cache_size(cache, to_cblock(cache_size));
  2258. } else {
  2259. cache->sectors_per_block_shift = __ffs(ca->block_size);
  2260. set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
  2261. }
  2262. r = create_cache_policy(cache, ca, error);
  2263. if (r)
  2264. goto bad;
  2265. cache->policy_nr_args = ca->policy_argc;
  2266. cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
  2267. r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
  2268. if (r) {
  2269. *error = "Error setting cache policy's config values";
  2270. goto bad;
  2271. }
  2272. cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
  2273. ca->block_size, may_format,
  2274. dm_cache_policy_get_hint_size(cache->policy));
  2275. if (IS_ERR(cmd)) {
  2276. *error = "Error creating metadata object";
  2277. r = PTR_ERR(cmd);
  2278. goto bad;
  2279. }
  2280. cache->cmd = cmd;
  2281. set_cache_mode(cache, CM_WRITE);
  2282. if (get_cache_mode(cache) != CM_WRITE) {
  2283. *error = "Unable to get write access to metadata, please check/repair metadata.";
  2284. r = -EINVAL;
  2285. goto bad;
  2286. }
  2287. if (passthrough_mode(&cache->features)) {
  2288. bool all_clean;
  2289. r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
  2290. if (r) {
  2291. *error = "dm_cache_metadata_all_clean() failed";
  2292. goto bad;
  2293. }
  2294. if (!all_clean) {
  2295. *error = "Cannot enter passthrough mode unless all blocks are clean";
  2296. r = -EINVAL;
  2297. goto bad;
  2298. }
  2299. }
  2300. spin_lock_init(&cache->lock);
  2301. INIT_LIST_HEAD(&cache->deferred_cells);
  2302. bio_list_init(&cache->deferred_bios);
  2303. bio_list_init(&cache->deferred_flush_bios);
  2304. bio_list_init(&cache->deferred_writethrough_bios);
  2305. INIT_LIST_HEAD(&cache->quiesced_migrations);
  2306. INIT_LIST_HEAD(&cache->completed_migrations);
  2307. INIT_LIST_HEAD(&cache->need_commit_migrations);
  2308. atomic_set(&cache->nr_allocated_migrations, 0);
  2309. atomic_set(&cache->nr_io_migrations, 0);
  2310. init_waitqueue_head(&cache->migration_wait);
  2311. init_waitqueue_head(&cache->quiescing_wait);
  2312. atomic_set(&cache->quiescing, 0);
  2313. atomic_set(&cache->quiescing_ack, 0);
  2314. r = -ENOMEM;
  2315. atomic_set(&cache->nr_dirty, 0);
  2316. cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
  2317. if (!cache->dirty_bitset) {
  2318. *error = "could not allocate dirty bitset";
  2319. goto bad;
  2320. }
  2321. clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
  2322. cache->discard_block_size =
  2323. calculate_discard_block_size(cache->sectors_per_block,
  2324. cache->origin_sectors);
  2325. cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
  2326. cache->discard_block_size));
  2327. cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
  2328. if (!cache->discard_bitset) {
  2329. *error = "could not allocate discard bitset";
  2330. goto bad;
  2331. }
  2332. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  2333. cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  2334. if (IS_ERR(cache->copier)) {
  2335. *error = "could not create kcopyd client";
  2336. r = PTR_ERR(cache->copier);
  2337. goto bad;
  2338. }
  2339. cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
  2340. if (!cache->wq) {
  2341. *error = "could not create workqueue for metadata object";
  2342. goto bad;
  2343. }
  2344. INIT_WORK(&cache->worker, do_worker);
  2345. INIT_DELAYED_WORK(&cache->waker, do_waker);
  2346. cache->last_commit_jiffies = jiffies;
  2347. cache->prison = dm_bio_prison_create();
  2348. if (!cache->prison) {
  2349. *error = "could not create bio prison";
  2350. goto bad;
  2351. }
  2352. cache->all_io_ds = dm_deferred_set_create();
  2353. if (!cache->all_io_ds) {
  2354. *error = "could not create all_io deferred set";
  2355. goto bad;
  2356. }
  2357. cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
  2358. migration_cache);
  2359. if (!cache->migration_pool) {
  2360. *error = "Error creating cache's migration mempool";
  2361. goto bad;
  2362. }
  2363. cache->need_tick_bio = true;
  2364. cache->sized = false;
  2365. cache->invalidate = false;
  2366. cache->commit_requested = false;
  2367. cache->loaded_mappings = false;
  2368. cache->loaded_discards = false;
  2369. load_stats(cache);
  2370. atomic_set(&cache->stats.demotion, 0);
  2371. atomic_set(&cache->stats.promotion, 0);
  2372. atomic_set(&cache->stats.copies_avoided, 0);
  2373. atomic_set(&cache->stats.cache_cell_clash, 0);
  2374. atomic_set(&cache->stats.commit_count, 0);
  2375. atomic_set(&cache->stats.discard_count, 0);
  2376. spin_lock_init(&cache->invalidation_lock);
  2377. INIT_LIST_HEAD(&cache->invalidation_requests);
  2378. iot_init(&cache->origin_tracker);
  2379. *result = cache;
  2380. return 0;
  2381. bad:
  2382. destroy(cache);
  2383. return r;
  2384. }
  2385. static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
  2386. {
  2387. unsigned i;
  2388. const char **copy;
  2389. copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
  2390. if (!copy)
  2391. return -ENOMEM;
  2392. for (i = 0; i < argc; i++) {
  2393. copy[i] = kstrdup(argv[i], GFP_KERNEL);
  2394. if (!copy[i]) {
  2395. while (i--)
  2396. kfree(copy[i]);
  2397. kfree(copy);
  2398. return -ENOMEM;
  2399. }
  2400. }
  2401. cache->nr_ctr_args = argc;
  2402. cache->ctr_args = copy;
  2403. return 0;
  2404. }
  2405. static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
  2406. {
  2407. int r = -EINVAL;
  2408. struct cache_args *ca;
  2409. struct cache *cache = NULL;
  2410. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  2411. if (!ca) {
  2412. ti->error = "Error allocating memory for cache";
  2413. return -ENOMEM;
  2414. }
  2415. ca->ti = ti;
  2416. r = parse_cache_args(ca, argc, argv, &ti->error);
  2417. if (r)
  2418. goto out;
  2419. r = cache_create(ca, &cache);
  2420. if (r)
  2421. goto out;
  2422. r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
  2423. if (r) {
  2424. destroy(cache);
  2425. goto out;
  2426. }
  2427. ti->private = cache;
  2428. out:
  2429. destroy_cache_args(ca);
  2430. return r;
  2431. }
  2432. /*----------------------------------------------------------------*/
  2433. static int cache_map(struct dm_target *ti, struct bio *bio)
  2434. {
  2435. struct cache *cache = ti->private;
  2436. int r;
  2437. struct dm_bio_prison_cell *cell = NULL;
  2438. dm_oblock_t block = get_bio_block(cache, bio);
  2439. size_t pb_data_size = get_per_bio_data_size(cache);
  2440. bool can_migrate = false;
  2441. bool fast_promotion;
  2442. struct policy_result lookup_result;
  2443. struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
  2444. struct old_oblock_lock ool;
  2445. ool.locker.fn = null_locker;
  2446. if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
  2447. /*
  2448. * This can only occur if the io goes to a partial block at
  2449. * the end of the origin device. We don't cache these.
  2450. * Just remap to the origin and carry on.
  2451. */
  2452. remap_to_origin(cache, bio);
  2453. accounted_begin(cache, bio);
  2454. return DM_MAPIO_REMAPPED;
  2455. }
  2456. if (discard_or_flush(bio)) {
  2457. defer_bio(cache, bio);
  2458. return DM_MAPIO_SUBMITTED;
  2459. }
  2460. /*
  2461. * Check to see if that block is currently migrating.
  2462. */
  2463. cell = alloc_prison_cell(cache);
  2464. if (!cell) {
  2465. defer_bio(cache, bio);
  2466. return DM_MAPIO_SUBMITTED;
  2467. }
  2468. r = bio_detain(cache, block, bio, cell,
  2469. (cell_free_fn) free_prison_cell,
  2470. cache, &cell);
  2471. if (r) {
  2472. if (r < 0)
  2473. defer_bio(cache, bio);
  2474. return DM_MAPIO_SUBMITTED;
  2475. }
  2476. fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
  2477. r = policy_map(cache->policy, block, false, can_migrate, fast_promotion,
  2478. bio, &ool.locker, &lookup_result);
  2479. if (r == -EWOULDBLOCK) {
  2480. cell_defer(cache, cell, true);
  2481. return DM_MAPIO_SUBMITTED;
  2482. } else if (r) {
  2483. DMERR_LIMIT("%s: Unexpected return from cache replacement policy: %d",
  2484. cache_device_name(cache), r);
  2485. cell_defer(cache, cell, false);
  2486. bio_io_error(bio);
  2487. return DM_MAPIO_SUBMITTED;
  2488. }
  2489. r = DM_MAPIO_REMAPPED;
  2490. switch (lookup_result.op) {
  2491. case POLICY_HIT:
  2492. if (passthrough_mode(&cache->features)) {
  2493. if (bio_data_dir(bio) == WRITE) {
  2494. /*
  2495. * We need to invalidate this block, so
  2496. * defer for the worker thread.
  2497. */
  2498. cell_defer(cache, cell, true);
  2499. r = DM_MAPIO_SUBMITTED;
  2500. } else {
  2501. inc_miss_counter(cache, bio);
  2502. remap_to_origin_clear_discard(cache, bio, block);
  2503. accounted_begin(cache, bio);
  2504. inc_ds(cache, bio, cell);
  2505. // FIXME: we want to remap hits or misses straight
  2506. // away rather than passing over to the worker.
  2507. cell_defer(cache, cell, false);
  2508. }
  2509. } else {
  2510. inc_hit_counter(cache, bio);
  2511. if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
  2512. !is_dirty(cache, lookup_result.cblock)) {
  2513. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  2514. accounted_begin(cache, bio);
  2515. inc_ds(cache, bio, cell);
  2516. cell_defer(cache, cell, false);
  2517. } else
  2518. remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false);
  2519. }
  2520. break;
  2521. case POLICY_MISS:
  2522. inc_miss_counter(cache, bio);
  2523. if (pb->req_nr != 0) {
  2524. /*
  2525. * This is a duplicate writethrough io that is no
  2526. * longer needed because the block has been demoted.
  2527. */
  2528. bio_endio(bio);
  2529. // FIXME: remap everything as a miss
  2530. cell_defer(cache, cell, false);
  2531. r = DM_MAPIO_SUBMITTED;
  2532. } else
  2533. remap_cell_to_origin_clear_discard(cache, cell, block, false);
  2534. break;
  2535. default:
  2536. DMERR_LIMIT("%s: %s: erroring bio: unknown policy op: %u",
  2537. cache_device_name(cache), __func__,
  2538. (unsigned) lookup_result.op);
  2539. cell_defer(cache, cell, false);
  2540. bio_io_error(bio);
  2541. r = DM_MAPIO_SUBMITTED;
  2542. }
  2543. return r;
  2544. }
  2545. static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
  2546. {
  2547. struct cache *cache = ti->private;
  2548. unsigned long flags;
  2549. size_t pb_data_size = get_per_bio_data_size(cache);
  2550. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  2551. if (pb->tick) {
  2552. policy_tick(cache->policy, false);
  2553. spin_lock_irqsave(&cache->lock, flags);
  2554. cache->need_tick_bio = true;
  2555. spin_unlock_irqrestore(&cache->lock, flags);
  2556. }
  2557. check_for_quiesced_migrations(cache, pb);
  2558. accounted_complete(cache, bio);
  2559. return 0;
  2560. }
  2561. static int write_dirty_bitset(struct cache *cache)
  2562. {
  2563. unsigned i, r;
  2564. if (get_cache_mode(cache) >= CM_READ_ONLY)
  2565. return -EINVAL;
  2566. for (i = 0; i < from_cblock(cache->cache_size); i++) {
  2567. r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
  2568. is_dirty(cache, to_cblock(i)));
  2569. if (r) {
  2570. metadata_operation_failed(cache, "dm_cache_set_dirty", r);
  2571. return r;
  2572. }
  2573. }
  2574. return 0;
  2575. }
  2576. static int write_discard_bitset(struct cache *cache)
  2577. {
  2578. unsigned i, r;
  2579. if (get_cache_mode(cache) >= CM_READ_ONLY)
  2580. return -EINVAL;
  2581. r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
  2582. cache->discard_nr_blocks);
  2583. if (r) {
  2584. DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
  2585. metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
  2586. return r;
  2587. }
  2588. for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
  2589. r = dm_cache_set_discard(cache->cmd, to_dblock(i),
  2590. is_discarded(cache, to_dblock(i)));
  2591. if (r) {
  2592. metadata_operation_failed(cache, "dm_cache_set_discard", r);
  2593. return r;
  2594. }
  2595. }
  2596. return 0;
  2597. }
  2598. static int write_hints(struct cache *cache)
  2599. {
  2600. int r;
  2601. if (get_cache_mode(cache) >= CM_READ_ONLY)
  2602. return -EINVAL;
  2603. r = dm_cache_write_hints(cache->cmd, cache->policy);
  2604. if (r) {
  2605. metadata_operation_failed(cache, "dm_cache_write_hints", r);
  2606. return r;
  2607. }
  2608. return 0;
  2609. }
  2610. /*
  2611. * returns true on success
  2612. */
  2613. static bool sync_metadata(struct cache *cache)
  2614. {
  2615. int r1, r2, r3, r4;
  2616. r1 = write_dirty_bitset(cache);
  2617. if (r1)
  2618. DMERR("%s: could not write dirty bitset", cache_device_name(cache));
  2619. r2 = write_discard_bitset(cache);
  2620. if (r2)
  2621. DMERR("%s: could not write discard bitset", cache_device_name(cache));
  2622. save_stats(cache);
  2623. r3 = write_hints(cache);
  2624. if (r3)
  2625. DMERR("%s: could not write hints", cache_device_name(cache));
  2626. /*
  2627. * If writing the above metadata failed, we still commit, but don't
  2628. * set the clean shutdown flag. This will effectively force every
  2629. * dirty bit to be set on reload.
  2630. */
  2631. r4 = commit(cache, !r1 && !r2 && !r3);
  2632. if (r4)
  2633. DMERR("%s: could not write cache metadata", cache_device_name(cache));
  2634. return !r1 && !r2 && !r3 && !r4;
  2635. }
  2636. static void cache_postsuspend(struct dm_target *ti)
  2637. {
  2638. struct cache *cache = ti->private;
  2639. start_quiescing(cache);
  2640. wait_for_migrations(cache);
  2641. stop_worker(cache);
  2642. requeue_deferred_bios(cache);
  2643. requeue_deferred_cells(cache);
  2644. stop_quiescing(cache);
  2645. if (get_cache_mode(cache) == CM_WRITE)
  2646. (void) sync_metadata(cache);
  2647. }
  2648. static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
  2649. bool dirty, uint32_t hint, bool hint_valid)
  2650. {
  2651. int r;
  2652. struct cache *cache = context;
  2653. r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
  2654. if (r)
  2655. return r;
  2656. if (dirty)
  2657. set_dirty(cache, oblock, cblock);
  2658. else
  2659. clear_dirty(cache, oblock, cblock);
  2660. return 0;
  2661. }
  2662. /*
  2663. * The discard block size in the on disk metadata is not
  2664. * neccessarily the same as we're currently using. So we have to
  2665. * be careful to only set the discarded attribute if we know it
  2666. * covers a complete block of the new size.
  2667. */
  2668. struct discard_load_info {
  2669. struct cache *cache;
  2670. /*
  2671. * These blocks are sized using the on disk dblock size, rather
  2672. * than the current one.
  2673. */
  2674. dm_block_t block_size;
  2675. dm_block_t discard_begin, discard_end;
  2676. };
  2677. static void discard_load_info_init(struct cache *cache,
  2678. struct discard_load_info *li)
  2679. {
  2680. li->cache = cache;
  2681. li->discard_begin = li->discard_end = 0;
  2682. }
  2683. static void set_discard_range(struct discard_load_info *li)
  2684. {
  2685. sector_t b, e;
  2686. if (li->discard_begin == li->discard_end)
  2687. return;
  2688. /*
  2689. * Convert to sectors.
  2690. */
  2691. b = li->discard_begin * li->block_size;
  2692. e = li->discard_end * li->block_size;
  2693. /*
  2694. * Then convert back to the current dblock size.
  2695. */
  2696. b = dm_sector_div_up(b, li->cache->discard_block_size);
  2697. sector_div(e, li->cache->discard_block_size);
  2698. /*
  2699. * The origin may have shrunk, so we need to check we're still in
  2700. * bounds.
  2701. */
  2702. if (e > from_dblock(li->cache->discard_nr_blocks))
  2703. e = from_dblock(li->cache->discard_nr_blocks);
  2704. for (; b < e; b++)
  2705. set_discard(li->cache, to_dblock(b));
  2706. }
  2707. static int load_discard(void *context, sector_t discard_block_size,
  2708. dm_dblock_t dblock, bool discard)
  2709. {
  2710. struct discard_load_info *li = context;
  2711. li->block_size = discard_block_size;
  2712. if (discard) {
  2713. if (from_dblock(dblock) == li->discard_end)
  2714. /*
  2715. * We're already in a discard range, just extend it.
  2716. */
  2717. li->discard_end = li->discard_end + 1ULL;
  2718. else {
  2719. /*
  2720. * Emit the old range and start a new one.
  2721. */
  2722. set_discard_range(li);
  2723. li->discard_begin = from_dblock(dblock);
  2724. li->discard_end = li->discard_begin + 1ULL;
  2725. }
  2726. } else {
  2727. set_discard_range(li);
  2728. li->discard_begin = li->discard_end = 0;
  2729. }
  2730. return 0;
  2731. }
  2732. static dm_cblock_t get_cache_dev_size(struct cache *cache)
  2733. {
  2734. sector_t size = get_dev_size(cache->cache_dev);
  2735. (void) sector_div(size, cache->sectors_per_block);
  2736. return to_cblock(size);
  2737. }
  2738. static bool can_resize(struct cache *cache, dm_cblock_t new_size)
  2739. {
  2740. if (from_cblock(new_size) > from_cblock(cache->cache_size))
  2741. return true;
  2742. /*
  2743. * We can't drop a dirty block when shrinking the cache.
  2744. */
  2745. while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
  2746. new_size = to_cblock(from_cblock(new_size) + 1);
  2747. if (is_dirty(cache, new_size)) {
  2748. DMERR("%s: unable to shrink cache; cache block %llu is dirty",
  2749. cache_device_name(cache),
  2750. (unsigned long long) from_cblock(new_size));
  2751. return false;
  2752. }
  2753. }
  2754. return true;
  2755. }
  2756. static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
  2757. {
  2758. int r;
  2759. r = dm_cache_resize(cache->cmd, new_size);
  2760. if (r) {
  2761. DMERR("%s: could not resize cache metadata", cache_device_name(cache));
  2762. metadata_operation_failed(cache, "dm_cache_resize", r);
  2763. return r;
  2764. }
  2765. set_cache_size(cache, new_size);
  2766. return 0;
  2767. }
  2768. static int cache_preresume(struct dm_target *ti)
  2769. {
  2770. int r = 0;
  2771. struct cache *cache = ti->private;
  2772. dm_cblock_t csize = get_cache_dev_size(cache);
  2773. /*
  2774. * Check to see if the cache has resized.
  2775. */
  2776. if (!cache->sized) {
  2777. r = resize_cache_dev(cache, csize);
  2778. if (r)
  2779. return r;
  2780. cache->sized = true;
  2781. } else if (csize != cache->cache_size) {
  2782. if (!can_resize(cache, csize))
  2783. return -EINVAL;
  2784. r = resize_cache_dev(cache, csize);
  2785. if (r)
  2786. return r;
  2787. }
  2788. if (!cache->loaded_mappings) {
  2789. r = dm_cache_load_mappings(cache->cmd, cache->policy,
  2790. load_mapping, cache);
  2791. if (r) {
  2792. DMERR("%s: could not load cache mappings", cache_device_name(cache));
  2793. metadata_operation_failed(cache, "dm_cache_load_mappings", r);
  2794. return r;
  2795. }
  2796. cache->loaded_mappings = true;
  2797. }
  2798. if (!cache->loaded_discards) {
  2799. struct discard_load_info li;
  2800. /*
  2801. * The discard bitset could have been resized, or the
  2802. * discard block size changed. To be safe we start by
  2803. * setting every dblock to not discarded.
  2804. */
  2805. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  2806. discard_load_info_init(cache, &li);
  2807. r = dm_cache_load_discards(cache->cmd, load_discard, &li);
  2808. if (r) {
  2809. DMERR("%s: could not load origin discards", cache_device_name(cache));
  2810. metadata_operation_failed(cache, "dm_cache_load_discards", r);
  2811. return r;
  2812. }
  2813. set_discard_range(&li);
  2814. cache->loaded_discards = true;
  2815. }
  2816. return r;
  2817. }
  2818. static void cache_resume(struct dm_target *ti)
  2819. {
  2820. struct cache *cache = ti->private;
  2821. cache->need_tick_bio = true;
  2822. do_waker(&cache->waker.work);
  2823. }
  2824. /*
  2825. * Status format:
  2826. *
  2827. * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
  2828. * <cache block size> <#used cache blocks>/<#total cache blocks>
  2829. * <#read hits> <#read misses> <#write hits> <#write misses>
  2830. * <#demotions> <#promotions> <#dirty>
  2831. * <#features> <features>*
  2832. * <#core args> <core args>
  2833. * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
  2834. */
  2835. static void cache_status(struct dm_target *ti, status_type_t type,
  2836. unsigned status_flags, char *result, unsigned maxlen)
  2837. {
  2838. int r = 0;
  2839. unsigned i;
  2840. ssize_t sz = 0;
  2841. dm_block_t nr_free_blocks_metadata = 0;
  2842. dm_block_t nr_blocks_metadata = 0;
  2843. char buf[BDEVNAME_SIZE];
  2844. struct cache *cache = ti->private;
  2845. dm_cblock_t residency;
  2846. switch (type) {
  2847. case STATUSTYPE_INFO:
  2848. if (get_cache_mode(cache) == CM_FAIL) {
  2849. DMEMIT("Fail");
  2850. break;
  2851. }
  2852. /* Commit to ensure statistics aren't out-of-date */
  2853. if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
  2854. (void) commit(cache, false);
  2855. r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
  2856. if (r) {
  2857. DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
  2858. cache_device_name(cache), r);
  2859. goto err;
  2860. }
  2861. r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
  2862. if (r) {
  2863. DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
  2864. cache_device_name(cache), r);
  2865. goto err;
  2866. }
  2867. residency = policy_residency(cache->policy);
  2868. DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
  2869. (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
  2870. (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
  2871. (unsigned long long)nr_blocks_metadata,
  2872. cache->sectors_per_block,
  2873. (unsigned long long) from_cblock(residency),
  2874. (unsigned long long) from_cblock(cache->cache_size),
  2875. (unsigned) atomic_read(&cache->stats.read_hit),
  2876. (unsigned) atomic_read(&cache->stats.read_miss),
  2877. (unsigned) atomic_read(&cache->stats.write_hit),
  2878. (unsigned) atomic_read(&cache->stats.write_miss),
  2879. (unsigned) atomic_read(&cache->stats.demotion),
  2880. (unsigned) atomic_read(&cache->stats.promotion),
  2881. (unsigned long) atomic_read(&cache->nr_dirty));
  2882. if (writethrough_mode(&cache->features))
  2883. DMEMIT("1 writethrough ");
  2884. else if (passthrough_mode(&cache->features))
  2885. DMEMIT("1 passthrough ");
  2886. else if (writeback_mode(&cache->features))
  2887. DMEMIT("1 writeback ");
  2888. else {
  2889. DMERR("%s: internal error: unknown io mode: %d",
  2890. cache_device_name(cache), (int) cache->features.io_mode);
  2891. goto err;
  2892. }
  2893. DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
  2894. DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
  2895. if (sz < maxlen) {
  2896. r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
  2897. if (r)
  2898. DMERR("%s: policy_emit_config_values returned %d",
  2899. cache_device_name(cache), r);
  2900. }
  2901. if (get_cache_mode(cache) == CM_READ_ONLY)
  2902. DMEMIT("ro ");
  2903. else
  2904. DMEMIT("rw ");
  2905. if (dm_cache_metadata_needs_check(cache->cmd))
  2906. DMEMIT("needs_check ");
  2907. else
  2908. DMEMIT("- ");
  2909. break;
  2910. case STATUSTYPE_TABLE:
  2911. format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
  2912. DMEMIT("%s ", buf);
  2913. format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
  2914. DMEMIT("%s ", buf);
  2915. format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
  2916. DMEMIT("%s", buf);
  2917. for (i = 0; i < cache->nr_ctr_args - 1; i++)
  2918. DMEMIT(" %s", cache->ctr_args[i]);
  2919. if (cache->nr_ctr_args)
  2920. DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
  2921. }
  2922. return;
  2923. err:
  2924. DMEMIT("Error");
  2925. }
  2926. /*
  2927. * A cache block range can take two forms:
  2928. *
  2929. * i) A single cblock, eg. '3456'
  2930. * ii) A begin and end cblock with dots between, eg. 123-234
  2931. */
  2932. static int parse_cblock_range(struct cache *cache, const char *str,
  2933. struct cblock_range *result)
  2934. {
  2935. char dummy;
  2936. uint64_t b, e;
  2937. int r;
  2938. /*
  2939. * Try and parse form (ii) first.
  2940. */
  2941. r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
  2942. if (r < 0)
  2943. return r;
  2944. if (r == 2) {
  2945. result->begin = to_cblock(b);
  2946. result->end = to_cblock(e);
  2947. return 0;
  2948. }
  2949. /*
  2950. * That didn't work, try form (i).
  2951. */
  2952. r = sscanf(str, "%llu%c", &b, &dummy);
  2953. if (r < 0)
  2954. return r;
  2955. if (r == 1) {
  2956. result->begin = to_cblock(b);
  2957. result->end = to_cblock(from_cblock(result->begin) + 1u);
  2958. return 0;
  2959. }
  2960. DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
  2961. return -EINVAL;
  2962. }
  2963. static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
  2964. {
  2965. uint64_t b = from_cblock(range->begin);
  2966. uint64_t e = from_cblock(range->end);
  2967. uint64_t n = from_cblock(cache->cache_size);
  2968. if (b >= n) {
  2969. DMERR("%s: begin cblock out of range: %llu >= %llu",
  2970. cache_device_name(cache), b, n);
  2971. return -EINVAL;
  2972. }
  2973. if (e > n) {
  2974. DMERR("%s: end cblock out of range: %llu > %llu",
  2975. cache_device_name(cache), e, n);
  2976. return -EINVAL;
  2977. }
  2978. if (b >= e) {
  2979. DMERR("%s: invalid cblock range: %llu >= %llu",
  2980. cache_device_name(cache), b, e);
  2981. return -EINVAL;
  2982. }
  2983. return 0;
  2984. }
  2985. static int request_invalidation(struct cache *cache, struct cblock_range *range)
  2986. {
  2987. struct invalidation_request req;
  2988. INIT_LIST_HEAD(&req.list);
  2989. req.cblocks = range;
  2990. atomic_set(&req.complete, 0);
  2991. req.err = 0;
  2992. init_waitqueue_head(&req.result_wait);
  2993. spin_lock(&cache->invalidation_lock);
  2994. list_add(&req.list, &cache->invalidation_requests);
  2995. spin_unlock(&cache->invalidation_lock);
  2996. wake_worker(cache);
  2997. wait_event(req.result_wait, atomic_read(&req.complete));
  2998. return req.err;
  2999. }
  3000. static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
  3001. const char **cblock_ranges)
  3002. {
  3003. int r = 0;
  3004. unsigned i;
  3005. struct cblock_range range;
  3006. if (!passthrough_mode(&cache->features)) {
  3007. DMERR("%s: cache has to be in passthrough mode for invalidation",
  3008. cache_device_name(cache));
  3009. return -EPERM;
  3010. }
  3011. for (i = 0; i < count; i++) {
  3012. r = parse_cblock_range(cache, cblock_ranges[i], &range);
  3013. if (r)
  3014. break;
  3015. r = validate_cblock_range(cache, &range);
  3016. if (r)
  3017. break;
  3018. /*
  3019. * Pass begin and end origin blocks to the worker and wake it.
  3020. */
  3021. r = request_invalidation(cache, &range);
  3022. if (r)
  3023. break;
  3024. }
  3025. return r;
  3026. }
  3027. /*
  3028. * Supports
  3029. * "<key> <value>"
  3030. * and
  3031. * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
  3032. *
  3033. * The key migration_threshold is supported by the cache target core.
  3034. */
  3035. static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
  3036. {
  3037. struct cache *cache = ti->private;
  3038. if (!argc)
  3039. return -EINVAL;
  3040. if (get_cache_mode(cache) >= CM_READ_ONLY) {
  3041. DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
  3042. cache_device_name(cache));
  3043. return -EOPNOTSUPP;
  3044. }
  3045. if (!strcasecmp(argv[0], "invalidate_cblocks"))
  3046. return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
  3047. if (argc != 2)
  3048. return -EINVAL;
  3049. return set_config_value(cache, argv[0], argv[1]);
  3050. }
  3051. static int cache_iterate_devices(struct dm_target *ti,
  3052. iterate_devices_callout_fn fn, void *data)
  3053. {
  3054. int r = 0;
  3055. struct cache *cache = ti->private;
  3056. r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
  3057. if (!r)
  3058. r = fn(ti, cache->origin_dev, 0, ti->len, data);
  3059. return r;
  3060. }
  3061. static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
  3062. {
  3063. /*
  3064. * FIXME: these limits may be incompatible with the cache device
  3065. */
  3066. limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
  3067. cache->origin_sectors);
  3068. limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
  3069. }
  3070. static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
  3071. {
  3072. struct cache *cache = ti->private;
  3073. uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
  3074. /*
  3075. * If the system-determined stacked limits are compatible with the
  3076. * cache's blocksize (io_opt is a factor) do not override them.
  3077. */
  3078. if (io_opt_sectors < cache->sectors_per_block ||
  3079. do_div(io_opt_sectors, cache->sectors_per_block)) {
  3080. blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
  3081. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  3082. }
  3083. set_discard_limits(cache, limits);
  3084. }
  3085. /*----------------------------------------------------------------*/
  3086. static struct target_type cache_target = {
  3087. .name = "cache",
  3088. .version = {1, 8, 0},
  3089. .module = THIS_MODULE,
  3090. .ctr = cache_ctr,
  3091. .dtr = cache_dtr,
  3092. .map = cache_map,
  3093. .end_io = cache_end_io,
  3094. .postsuspend = cache_postsuspend,
  3095. .preresume = cache_preresume,
  3096. .resume = cache_resume,
  3097. .status = cache_status,
  3098. .message = cache_message,
  3099. .iterate_devices = cache_iterate_devices,
  3100. .io_hints = cache_io_hints,
  3101. };
  3102. static int __init dm_cache_init(void)
  3103. {
  3104. int r;
  3105. r = dm_register_target(&cache_target);
  3106. if (r) {
  3107. DMERR("cache target registration failed: %d", r);
  3108. return r;
  3109. }
  3110. migration_cache = KMEM_CACHE(dm_cache_migration, 0);
  3111. if (!migration_cache) {
  3112. dm_unregister_target(&cache_target);
  3113. return -ENOMEM;
  3114. }
  3115. return 0;
  3116. }
  3117. static void __exit dm_cache_exit(void)
  3118. {
  3119. dm_unregister_target(&cache_target);
  3120. kmem_cache_destroy(migration_cache);
  3121. }
  3122. module_init(dm_cache_init);
  3123. module_exit(dm_cache_exit);
  3124. MODULE_DESCRIPTION(DM_NAME " cache target");
  3125. MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
  3126. MODULE_LICENSE("GPL");