dm-cache-target.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include "dm-bio-record.h"
  9. #include "dm-cache-metadata.h"
  10. #include <linux/dm-io.h>
  11. #include <linux/dm-kcopyd.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/init.h>
  14. #include <linux/mempool.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #define DM_MSG_PREFIX "cache"
  19. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  20. "A percentage of time allocated for copying to and/or from cache");
  21. /*----------------------------------------------------------------*/
  22. /*
  23. * Glossary:
  24. *
  25. * oblock: index of an origin block
  26. * cblock: index of a cache block
  27. * promotion: movement of a block from origin to cache
  28. * demotion: movement of a block from cache to origin
  29. * migration: movement of a block between the origin and cache device,
  30. * either direction
  31. */
  32. /*----------------------------------------------------------------*/
  33. static size_t bitset_size_in_bytes(unsigned nr_entries)
  34. {
  35. return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
  36. }
  37. static unsigned long *alloc_bitset(unsigned nr_entries)
  38. {
  39. size_t s = bitset_size_in_bytes(nr_entries);
  40. return vzalloc(s);
  41. }
  42. static void clear_bitset(void *bitset, unsigned nr_entries)
  43. {
  44. size_t s = bitset_size_in_bytes(nr_entries);
  45. memset(bitset, 0, s);
  46. }
  47. static void free_bitset(unsigned long *bits)
  48. {
  49. vfree(bits);
  50. }
  51. /*----------------------------------------------------------------*/
  52. /*
  53. * There are a couple of places where we let a bio run, but want to do some
  54. * work before calling its endio function. We do this by temporarily
  55. * changing the endio fn.
  56. */
  57. struct dm_hook_info {
  58. bio_end_io_t *bi_end_io;
  59. void *bi_private;
  60. };
  61. static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
  62. bio_end_io_t *bi_end_io, void *bi_private)
  63. {
  64. h->bi_end_io = bio->bi_end_io;
  65. h->bi_private = bio->bi_private;
  66. bio->bi_end_io = bi_end_io;
  67. bio->bi_private = bi_private;
  68. }
  69. static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
  70. {
  71. bio->bi_end_io = h->bi_end_io;
  72. bio->bi_private = h->bi_private;
  73. /*
  74. * Must bump bi_remaining to allow bio to complete with
  75. * restored bi_end_io.
  76. */
  77. atomic_inc(&bio->bi_remaining);
  78. }
  79. /*----------------------------------------------------------------*/
  80. #define MIGRATION_POOL_SIZE 128
  81. #define COMMIT_PERIOD HZ
  82. #define MIGRATION_COUNT_WINDOW 10
  83. /*
  84. * The block size of the device holding cache data must be
  85. * between 32KB and 1GB.
  86. */
  87. #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
  88. #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  89. /*
  90. * FIXME: the cache is read/write for the time being.
  91. */
  92. enum cache_metadata_mode {
  93. CM_WRITE, /* metadata may be changed */
  94. CM_READ_ONLY, /* metadata may not be changed */
  95. };
  96. enum cache_io_mode {
  97. /*
  98. * Data is written to cached blocks only. These blocks are marked
  99. * dirty. If you lose the cache device you will lose data.
  100. * Potential performance increase for both reads and writes.
  101. */
  102. CM_IO_WRITEBACK,
  103. /*
  104. * Data is written to both cache and origin. Blocks are never
  105. * dirty. Potential performance benfit for reads only.
  106. */
  107. CM_IO_WRITETHROUGH,
  108. /*
  109. * A degraded mode useful for various cache coherency situations
  110. * (eg, rolling back snapshots). Reads and writes always go to the
  111. * origin. If a write goes to a cached oblock, then the cache
  112. * block is invalidated.
  113. */
  114. CM_IO_PASSTHROUGH
  115. };
  116. struct cache_features {
  117. enum cache_metadata_mode mode;
  118. enum cache_io_mode io_mode;
  119. };
  120. struct cache_stats {
  121. atomic_t read_hit;
  122. atomic_t read_miss;
  123. atomic_t write_hit;
  124. atomic_t write_miss;
  125. atomic_t demotion;
  126. atomic_t promotion;
  127. atomic_t copies_avoided;
  128. atomic_t cache_cell_clash;
  129. atomic_t commit_count;
  130. atomic_t discard_count;
  131. };
  132. /*
  133. * Defines a range of cblocks, begin to (end - 1) are in the range. end is
  134. * the one-past-the-end value.
  135. */
  136. struct cblock_range {
  137. dm_cblock_t begin;
  138. dm_cblock_t end;
  139. };
  140. struct invalidation_request {
  141. struct list_head list;
  142. struct cblock_range *cblocks;
  143. atomic_t complete;
  144. int err;
  145. wait_queue_head_t result_wait;
  146. };
  147. struct cache {
  148. struct dm_target *ti;
  149. struct dm_target_callbacks callbacks;
  150. struct dm_cache_metadata *cmd;
  151. /*
  152. * Metadata is written to this device.
  153. */
  154. struct dm_dev *metadata_dev;
  155. /*
  156. * The slower of the two data devices. Typically a spindle.
  157. */
  158. struct dm_dev *origin_dev;
  159. /*
  160. * The faster of the two data devices. Typically an SSD.
  161. */
  162. struct dm_dev *cache_dev;
  163. /*
  164. * Size of the origin device in _complete_ blocks and native sectors.
  165. */
  166. dm_oblock_t origin_blocks;
  167. sector_t origin_sectors;
  168. /*
  169. * Size of the cache device in blocks.
  170. */
  171. dm_cblock_t cache_size;
  172. /*
  173. * Fields for converting from sectors to blocks.
  174. */
  175. uint32_t sectors_per_block;
  176. int sectors_per_block_shift;
  177. spinlock_t lock;
  178. struct bio_list deferred_bios;
  179. struct bio_list deferred_flush_bios;
  180. struct bio_list deferred_writethrough_bios;
  181. struct list_head quiesced_migrations;
  182. struct list_head completed_migrations;
  183. struct list_head need_commit_migrations;
  184. sector_t migration_threshold;
  185. wait_queue_head_t migration_wait;
  186. atomic_t nr_allocated_migrations;
  187. /*
  188. * The number of in flight migrations that are performing
  189. * background io. eg, promotion, writeback.
  190. */
  191. atomic_t nr_io_migrations;
  192. wait_queue_head_t quiescing_wait;
  193. atomic_t quiescing;
  194. atomic_t quiescing_ack;
  195. /*
  196. * cache_size entries, dirty if set
  197. */
  198. atomic_t nr_dirty;
  199. unsigned long *dirty_bitset;
  200. /*
  201. * origin_blocks entries, discarded if set.
  202. */
  203. dm_dblock_t discard_nr_blocks;
  204. unsigned long *discard_bitset;
  205. uint32_t discard_block_size; /* a power of 2 times sectors per block */
  206. /*
  207. * Rather than reconstructing the table line for the status we just
  208. * save it and regurgitate.
  209. */
  210. unsigned nr_ctr_args;
  211. const char **ctr_args;
  212. struct dm_kcopyd_client *copier;
  213. struct workqueue_struct *wq;
  214. struct work_struct worker;
  215. struct delayed_work waker;
  216. unsigned long last_commit_jiffies;
  217. struct dm_bio_prison *prison;
  218. struct dm_deferred_set *all_io_ds;
  219. mempool_t *migration_pool;
  220. struct dm_cache_policy *policy;
  221. unsigned policy_nr_args;
  222. bool need_tick_bio:1;
  223. bool sized:1;
  224. bool invalidate:1;
  225. bool commit_requested:1;
  226. bool loaded_mappings:1;
  227. bool loaded_discards:1;
  228. /*
  229. * Cache features such as write-through.
  230. */
  231. struct cache_features features;
  232. struct cache_stats stats;
  233. /*
  234. * Invalidation fields.
  235. */
  236. spinlock_t invalidation_lock;
  237. struct list_head invalidation_requests;
  238. };
  239. struct per_bio_data {
  240. bool tick:1;
  241. unsigned req_nr:2;
  242. struct dm_deferred_entry *all_io_entry;
  243. struct dm_hook_info hook_info;
  244. /*
  245. * writethrough fields. These MUST remain at the end of this
  246. * structure and the 'cache' member must be the first as it
  247. * is used to determine the offset of the writethrough fields.
  248. */
  249. struct cache *cache;
  250. dm_cblock_t cblock;
  251. struct dm_bio_details bio_details;
  252. };
  253. struct dm_cache_migration {
  254. struct list_head list;
  255. struct cache *cache;
  256. unsigned long start_jiffies;
  257. dm_oblock_t old_oblock;
  258. dm_oblock_t new_oblock;
  259. dm_cblock_t cblock;
  260. bool err:1;
  261. bool discard:1;
  262. bool writeback:1;
  263. bool demote:1;
  264. bool promote:1;
  265. bool requeue_holder:1;
  266. bool invalidate:1;
  267. struct dm_bio_prison_cell *old_ocell;
  268. struct dm_bio_prison_cell *new_ocell;
  269. };
  270. /*
  271. * Processing a bio in the worker thread may require these memory
  272. * allocations. We prealloc to avoid deadlocks (the same worker thread
  273. * frees them back to the mempool).
  274. */
  275. struct prealloc {
  276. struct dm_cache_migration *mg;
  277. struct dm_bio_prison_cell *cell1;
  278. struct dm_bio_prison_cell *cell2;
  279. };
  280. static void wake_worker(struct cache *cache)
  281. {
  282. queue_work(cache->wq, &cache->worker);
  283. }
  284. /*----------------------------------------------------------------*/
  285. static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
  286. {
  287. /* FIXME: change to use a local slab. */
  288. return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
  289. }
  290. static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
  291. {
  292. dm_bio_prison_free_cell(cache->prison, cell);
  293. }
  294. static struct dm_cache_migration *alloc_migration(struct cache *cache)
  295. {
  296. struct dm_cache_migration *mg;
  297. mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  298. if (mg) {
  299. mg->cache = cache;
  300. atomic_inc(&mg->cache->nr_allocated_migrations);
  301. }
  302. return mg;
  303. }
  304. static void free_migration(struct dm_cache_migration *mg)
  305. {
  306. if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
  307. wake_up(&mg->cache->migration_wait);
  308. mempool_free(mg, mg->cache->migration_pool);
  309. }
  310. static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
  311. {
  312. if (!p->mg) {
  313. p->mg = alloc_migration(cache);
  314. if (!p->mg)
  315. return -ENOMEM;
  316. }
  317. if (!p->cell1) {
  318. p->cell1 = alloc_prison_cell(cache);
  319. if (!p->cell1)
  320. return -ENOMEM;
  321. }
  322. if (!p->cell2) {
  323. p->cell2 = alloc_prison_cell(cache);
  324. if (!p->cell2)
  325. return -ENOMEM;
  326. }
  327. return 0;
  328. }
  329. static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
  330. {
  331. if (p->cell2)
  332. free_prison_cell(cache, p->cell2);
  333. if (p->cell1)
  334. free_prison_cell(cache, p->cell1);
  335. if (p->mg)
  336. free_migration(p->mg);
  337. }
  338. static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
  339. {
  340. struct dm_cache_migration *mg = p->mg;
  341. BUG_ON(!mg);
  342. p->mg = NULL;
  343. return mg;
  344. }
  345. /*
  346. * You must have a cell within the prealloc struct to return. If not this
  347. * function will BUG() rather than returning NULL.
  348. */
  349. static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
  350. {
  351. struct dm_bio_prison_cell *r = NULL;
  352. if (p->cell1) {
  353. r = p->cell1;
  354. p->cell1 = NULL;
  355. } else if (p->cell2) {
  356. r = p->cell2;
  357. p->cell2 = NULL;
  358. } else
  359. BUG();
  360. return r;
  361. }
  362. /*
  363. * You can't have more than two cells in a prealloc struct. BUG() will be
  364. * called if you try and overfill.
  365. */
  366. static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
  367. {
  368. if (!p->cell2)
  369. p->cell2 = cell;
  370. else if (!p->cell1)
  371. p->cell1 = cell;
  372. else
  373. BUG();
  374. }
  375. /*----------------------------------------------------------------*/
  376. static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key)
  377. {
  378. key->virtual = 0;
  379. key->dev = 0;
  380. key->block_begin = from_oblock(begin);
  381. key->block_end = from_oblock(end);
  382. }
  383. /*
  384. * The caller hands in a preallocated cell, and a free function for it.
  385. * The cell will be freed if there's an error, or if it wasn't used because
  386. * a cell with that key already exists.
  387. */
  388. typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
  389. static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end,
  390. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  391. cell_free_fn free_fn, void *free_context,
  392. struct dm_bio_prison_cell **cell_result)
  393. {
  394. int r;
  395. struct dm_cell_key key;
  396. build_key(oblock_begin, oblock_end, &key);
  397. r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
  398. if (r)
  399. free_fn(free_context, cell_prealloc);
  400. return r;
  401. }
  402. static int bio_detain(struct cache *cache, dm_oblock_t oblock,
  403. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  404. cell_free_fn free_fn, void *free_context,
  405. struct dm_bio_prison_cell **cell_result)
  406. {
  407. dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
  408. return bio_detain_range(cache, oblock, end, bio,
  409. cell_prealloc, free_fn, free_context, cell_result);
  410. }
  411. static int get_cell(struct cache *cache,
  412. dm_oblock_t oblock,
  413. struct prealloc *structs,
  414. struct dm_bio_prison_cell **cell_result)
  415. {
  416. int r;
  417. struct dm_cell_key key;
  418. struct dm_bio_prison_cell *cell_prealloc;
  419. cell_prealloc = prealloc_get_cell(structs);
  420. build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key);
  421. r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
  422. if (r)
  423. prealloc_put_cell(structs, cell_prealloc);
  424. return r;
  425. }
  426. /*----------------------------------------------------------------*/
  427. static bool is_dirty(struct cache *cache, dm_cblock_t b)
  428. {
  429. return test_bit(from_cblock(b), cache->dirty_bitset);
  430. }
  431. static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  432. {
  433. if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
  434. atomic_inc(&cache->nr_dirty);
  435. policy_set_dirty(cache->policy, oblock);
  436. }
  437. }
  438. static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  439. {
  440. if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
  441. policy_clear_dirty(cache->policy, oblock);
  442. if (atomic_dec_return(&cache->nr_dirty) == 0)
  443. dm_table_event(cache->ti->table);
  444. }
  445. }
  446. /*----------------------------------------------------------------*/
  447. static bool block_size_is_power_of_two(struct cache *cache)
  448. {
  449. return cache->sectors_per_block_shift >= 0;
  450. }
  451. /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
  452. #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
  453. __always_inline
  454. #endif
  455. static dm_block_t block_div(dm_block_t b, uint32_t n)
  456. {
  457. do_div(b, n);
  458. return b;
  459. }
  460. static dm_block_t oblocks_per_dblock(struct cache *cache)
  461. {
  462. dm_block_t oblocks = cache->discard_block_size;
  463. if (block_size_is_power_of_two(cache))
  464. oblocks >>= cache->sectors_per_block_shift;
  465. else
  466. oblocks = block_div(oblocks, cache->sectors_per_block);
  467. return oblocks;
  468. }
  469. static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
  470. {
  471. return to_dblock(block_div(from_oblock(oblock),
  472. oblocks_per_dblock(cache)));
  473. }
  474. static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock)
  475. {
  476. return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache));
  477. }
  478. static void set_discard(struct cache *cache, dm_dblock_t b)
  479. {
  480. unsigned long flags;
  481. BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
  482. atomic_inc(&cache->stats.discard_count);
  483. spin_lock_irqsave(&cache->lock, flags);
  484. set_bit(from_dblock(b), cache->discard_bitset);
  485. spin_unlock_irqrestore(&cache->lock, flags);
  486. }
  487. static void clear_discard(struct cache *cache, dm_dblock_t b)
  488. {
  489. unsigned long flags;
  490. spin_lock_irqsave(&cache->lock, flags);
  491. clear_bit(from_dblock(b), cache->discard_bitset);
  492. spin_unlock_irqrestore(&cache->lock, flags);
  493. }
  494. static bool is_discarded(struct cache *cache, dm_dblock_t b)
  495. {
  496. int r;
  497. unsigned long flags;
  498. spin_lock_irqsave(&cache->lock, flags);
  499. r = test_bit(from_dblock(b), cache->discard_bitset);
  500. spin_unlock_irqrestore(&cache->lock, flags);
  501. return r;
  502. }
  503. static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  504. {
  505. int r;
  506. unsigned long flags;
  507. spin_lock_irqsave(&cache->lock, flags);
  508. r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
  509. cache->discard_bitset);
  510. spin_unlock_irqrestore(&cache->lock, flags);
  511. return r;
  512. }
  513. /*----------------------------------------------------------------*/
  514. static void load_stats(struct cache *cache)
  515. {
  516. struct dm_cache_statistics stats;
  517. dm_cache_metadata_get_stats(cache->cmd, &stats);
  518. atomic_set(&cache->stats.read_hit, stats.read_hits);
  519. atomic_set(&cache->stats.read_miss, stats.read_misses);
  520. atomic_set(&cache->stats.write_hit, stats.write_hits);
  521. atomic_set(&cache->stats.write_miss, stats.write_misses);
  522. }
  523. static void save_stats(struct cache *cache)
  524. {
  525. struct dm_cache_statistics stats;
  526. stats.read_hits = atomic_read(&cache->stats.read_hit);
  527. stats.read_misses = atomic_read(&cache->stats.read_miss);
  528. stats.write_hits = atomic_read(&cache->stats.write_hit);
  529. stats.write_misses = atomic_read(&cache->stats.write_miss);
  530. dm_cache_metadata_set_stats(cache->cmd, &stats);
  531. }
  532. /*----------------------------------------------------------------
  533. * Per bio data
  534. *--------------------------------------------------------------*/
  535. /*
  536. * If using writeback, leave out struct per_bio_data's writethrough fields.
  537. */
  538. #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
  539. #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
  540. static bool writethrough_mode(struct cache_features *f)
  541. {
  542. return f->io_mode == CM_IO_WRITETHROUGH;
  543. }
  544. static bool writeback_mode(struct cache_features *f)
  545. {
  546. return f->io_mode == CM_IO_WRITEBACK;
  547. }
  548. static bool passthrough_mode(struct cache_features *f)
  549. {
  550. return f->io_mode == CM_IO_PASSTHROUGH;
  551. }
  552. static size_t get_per_bio_data_size(struct cache *cache)
  553. {
  554. return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
  555. }
  556. static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
  557. {
  558. struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
  559. BUG_ON(!pb);
  560. return pb;
  561. }
  562. static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
  563. {
  564. struct per_bio_data *pb = get_per_bio_data(bio, data_size);
  565. pb->tick = false;
  566. pb->req_nr = dm_bio_get_target_bio_nr(bio);
  567. pb->all_io_entry = NULL;
  568. return pb;
  569. }
  570. /*----------------------------------------------------------------
  571. * Remapping
  572. *--------------------------------------------------------------*/
  573. static void remap_to_origin(struct cache *cache, struct bio *bio)
  574. {
  575. bio->bi_bdev = cache->origin_dev->bdev;
  576. }
  577. static void remap_to_cache(struct cache *cache, struct bio *bio,
  578. dm_cblock_t cblock)
  579. {
  580. sector_t bi_sector = bio->bi_iter.bi_sector;
  581. sector_t block = from_cblock(cblock);
  582. bio->bi_bdev = cache->cache_dev->bdev;
  583. if (!block_size_is_power_of_two(cache))
  584. bio->bi_iter.bi_sector =
  585. (block * cache->sectors_per_block) +
  586. sector_div(bi_sector, cache->sectors_per_block);
  587. else
  588. bio->bi_iter.bi_sector =
  589. (block << cache->sectors_per_block_shift) |
  590. (bi_sector & (cache->sectors_per_block - 1));
  591. }
  592. static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
  593. {
  594. unsigned long flags;
  595. size_t pb_data_size = get_per_bio_data_size(cache);
  596. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  597. spin_lock_irqsave(&cache->lock, flags);
  598. if (cache->need_tick_bio &&
  599. !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
  600. pb->tick = true;
  601. cache->need_tick_bio = false;
  602. }
  603. spin_unlock_irqrestore(&cache->lock, flags);
  604. }
  605. static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
  606. dm_oblock_t oblock)
  607. {
  608. check_if_tick_bio_needed(cache, bio);
  609. remap_to_origin(cache, bio);
  610. if (bio_data_dir(bio) == WRITE)
  611. clear_discard(cache, oblock_to_dblock(cache, oblock));
  612. }
  613. static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
  614. dm_oblock_t oblock, dm_cblock_t cblock)
  615. {
  616. check_if_tick_bio_needed(cache, bio);
  617. remap_to_cache(cache, bio, cblock);
  618. if (bio_data_dir(bio) == WRITE) {
  619. set_dirty(cache, oblock, cblock);
  620. clear_discard(cache, oblock_to_dblock(cache, oblock));
  621. }
  622. }
  623. static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
  624. {
  625. sector_t block_nr = bio->bi_iter.bi_sector;
  626. if (!block_size_is_power_of_two(cache))
  627. (void) sector_div(block_nr, cache->sectors_per_block);
  628. else
  629. block_nr >>= cache->sectors_per_block_shift;
  630. return to_oblock(block_nr);
  631. }
  632. static int bio_triggers_commit(struct cache *cache, struct bio *bio)
  633. {
  634. return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
  635. }
  636. /*
  637. * You must increment the deferred set whilst the prison cell is held. To
  638. * encourage this, we ask for 'cell' to be passed in.
  639. */
  640. static void inc_ds(struct cache *cache, struct bio *bio,
  641. struct dm_bio_prison_cell *cell)
  642. {
  643. size_t pb_data_size = get_per_bio_data_size(cache);
  644. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  645. BUG_ON(!cell);
  646. BUG_ON(pb->all_io_entry);
  647. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  648. }
  649. static void issue(struct cache *cache, struct bio *bio)
  650. {
  651. unsigned long flags;
  652. if (!bio_triggers_commit(cache, bio)) {
  653. generic_make_request(bio);
  654. return;
  655. }
  656. /*
  657. * Batch together any bios that trigger commits and then issue a
  658. * single commit for them in do_worker().
  659. */
  660. spin_lock_irqsave(&cache->lock, flags);
  661. cache->commit_requested = true;
  662. bio_list_add(&cache->deferred_flush_bios, bio);
  663. spin_unlock_irqrestore(&cache->lock, flags);
  664. }
  665. static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
  666. {
  667. inc_ds(cache, bio, cell);
  668. issue(cache, bio);
  669. }
  670. static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
  671. {
  672. unsigned long flags;
  673. spin_lock_irqsave(&cache->lock, flags);
  674. bio_list_add(&cache->deferred_writethrough_bios, bio);
  675. spin_unlock_irqrestore(&cache->lock, flags);
  676. wake_worker(cache);
  677. }
  678. static void writethrough_endio(struct bio *bio, int err)
  679. {
  680. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  681. dm_unhook_bio(&pb->hook_info, bio);
  682. if (err) {
  683. bio_endio(bio, err);
  684. return;
  685. }
  686. dm_bio_restore(&pb->bio_details, bio);
  687. remap_to_cache(pb->cache, bio, pb->cblock);
  688. /*
  689. * We can't issue this bio directly, since we're in interrupt
  690. * context. So it gets put on a bio list for processing by the
  691. * worker thread.
  692. */
  693. defer_writethrough_bio(pb->cache, bio);
  694. }
  695. /*
  696. * When running in writethrough mode we need to send writes to clean blocks
  697. * to both the cache and origin devices. In future we'd like to clone the
  698. * bio and send them in parallel, but for now we're doing them in
  699. * series as this is easier.
  700. */
  701. static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  702. dm_oblock_t oblock, dm_cblock_t cblock)
  703. {
  704. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  705. pb->cache = cache;
  706. pb->cblock = cblock;
  707. dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
  708. dm_bio_record(&pb->bio_details, bio);
  709. remap_to_origin_clear_discard(pb->cache, bio, oblock);
  710. }
  711. /*----------------------------------------------------------------
  712. * Migration processing
  713. *
  714. * Migration covers moving data from the origin device to the cache, or
  715. * vice versa.
  716. *--------------------------------------------------------------*/
  717. static void inc_io_migrations(struct cache *cache)
  718. {
  719. atomic_inc(&cache->nr_io_migrations);
  720. }
  721. static void dec_io_migrations(struct cache *cache)
  722. {
  723. atomic_dec(&cache->nr_io_migrations);
  724. }
  725. static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  726. bool holder)
  727. {
  728. (holder ? dm_cell_release : dm_cell_release_no_holder)
  729. (cache->prison, cell, &cache->deferred_bios);
  730. free_prison_cell(cache, cell);
  731. }
  732. static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  733. bool holder)
  734. {
  735. unsigned long flags;
  736. spin_lock_irqsave(&cache->lock, flags);
  737. __cell_defer(cache, cell, holder);
  738. spin_unlock_irqrestore(&cache->lock, flags);
  739. wake_worker(cache);
  740. }
  741. static void free_io_migration(struct dm_cache_migration *mg)
  742. {
  743. dec_io_migrations(mg->cache);
  744. free_migration(mg);
  745. }
  746. static void migration_failure(struct dm_cache_migration *mg)
  747. {
  748. struct cache *cache = mg->cache;
  749. if (mg->writeback) {
  750. DMWARN_LIMIT("writeback failed; couldn't copy block");
  751. set_dirty(cache, mg->old_oblock, mg->cblock);
  752. cell_defer(cache, mg->old_ocell, false);
  753. } else if (mg->demote) {
  754. DMWARN_LIMIT("demotion failed; couldn't copy block");
  755. policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
  756. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  757. if (mg->promote)
  758. cell_defer(cache, mg->new_ocell, true);
  759. } else {
  760. DMWARN_LIMIT("promotion failed; couldn't copy block");
  761. policy_remove_mapping(cache->policy, mg->new_oblock);
  762. cell_defer(cache, mg->new_ocell, true);
  763. }
  764. free_io_migration(mg);
  765. }
  766. static void migration_success_pre_commit(struct dm_cache_migration *mg)
  767. {
  768. unsigned long flags;
  769. struct cache *cache = mg->cache;
  770. if (mg->writeback) {
  771. clear_dirty(cache, mg->old_oblock, mg->cblock);
  772. cell_defer(cache, mg->old_ocell, false);
  773. free_io_migration(mg);
  774. return;
  775. } else if (mg->demote) {
  776. if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
  777. DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
  778. policy_force_mapping(cache->policy, mg->new_oblock,
  779. mg->old_oblock);
  780. if (mg->promote)
  781. cell_defer(cache, mg->new_ocell, true);
  782. free_io_migration(mg);
  783. return;
  784. }
  785. } else {
  786. if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
  787. DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
  788. policy_remove_mapping(cache->policy, mg->new_oblock);
  789. free_io_migration(mg);
  790. return;
  791. }
  792. }
  793. spin_lock_irqsave(&cache->lock, flags);
  794. list_add_tail(&mg->list, &cache->need_commit_migrations);
  795. cache->commit_requested = true;
  796. spin_unlock_irqrestore(&cache->lock, flags);
  797. }
  798. static void migration_success_post_commit(struct dm_cache_migration *mg)
  799. {
  800. unsigned long flags;
  801. struct cache *cache = mg->cache;
  802. if (mg->writeback) {
  803. DMWARN("writeback unexpectedly triggered commit");
  804. return;
  805. } else if (mg->demote) {
  806. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  807. if (mg->promote) {
  808. mg->demote = false;
  809. spin_lock_irqsave(&cache->lock, flags);
  810. list_add_tail(&mg->list, &cache->quiesced_migrations);
  811. spin_unlock_irqrestore(&cache->lock, flags);
  812. } else {
  813. if (mg->invalidate)
  814. policy_remove_mapping(cache->policy, mg->old_oblock);
  815. free_io_migration(mg);
  816. }
  817. } else {
  818. if (mg->requeue_holder) {
  819. clear_dirty(cache, mg->new_oblock, mg->cblock);
  820. cell_defer(cache, mg->new_ocell, true);
  821. } else {
  822. /*
  823. * The block was promoted via an overwrite, so it's dirty.
  824. */
  825. set_dirty(cache, mg->new_oblock, mg->cblock);
  826. bio_endio(mg->new_ocell->holder, 0);
  827. cell_defer(cache, mg->new_ocell, false);
  828. }
  829. free_io_migration(mg);
  830. }
  831. }
  832. static void copy_complete(int read_err, unsigned long write_err, void *context)
  833. {
  834. unsigned long flags;
  835. struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
  836. struct cache *cache = mg->cache;
  837. if (read_err || write_err)
  838. mg->err = true;
  839. spin_lock_irqsave(&cache->lock, flags);
  840. list_add_tail(&mg->list, &cache->completed_migrations);
  841. spin_unlock_irqrestore(&cache->lock, flags);
  842. wake_worker(cache);
  843. }
  844. static void issue_copy(struct dm_cache_migration *mg)
  845. {
  846. int r;
  847. struct dm_io_region o_region, c_region;
  848. struct cache *cache = mg->cache;
  849. sector_t cblock = from_cblock(mg->cblock);
  850. o_region.bdev = cache->origin_dev->bdev;
  851. o_region.count = cache->sectors_per_block;
  852. c_region.bdev = cache->cache_dev->bdev;
  853. c_region.sector = cblock * cache->sectors_per_block;
  854. c_region.count = cache->sectors_per_block;
  855. if (mg->writeback || mg->demote) {
  856. /* demote */
  857. o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
  858. r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
  859. } else {
  860. /* promote */
  861. o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
  862. r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
  863. }
  864. if (r < 0) {
  865. DMERR_LIMIT("issuing migration failed");
  866. migration_failure(mg);
  867. }
  868. }
  869. static void overwrite_endio(struct bio *bio, int err)
  870. {
  871. struct dm_cache_migration *mg = bio->bi_private;
  872. struct cache *cache = mg->cache;
  873. size_t pb_data_size = get_per_bio_data_size(cache);
  874. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  875. unsigned long flags;
  876. dm_unhook_bio(&pb->hook_info, bio);
  877. if (err)
  878. mg->err = true;
  879. mg->requeue_holder = false;
  880. spin_lock_irqsave(&cache->lock, flags);
  881. list_add_tail(&mg->list, &cache->completed_migrations);
  882. spin_unlock_irqrestore(&cache->lock, flags);
  883. wake_worker(cache);
  884. }
  885. static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
  886. {
  887. size_t pb_data_size = get_per_bio_data_size(mg->cache);
  888. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  889. dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
  890. remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
  891. /*
  892. * No need to inc_ds() here, since the cell will be held for the
  893. * duration of the io.
  894. */
  895. generic_make_request(bio);
  896. }
  897. static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
  898. {
  899. return (bio_data_dir(bio) == WRITE) &&
  900. (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
  901. }
  902. static void avoid_copy(struct dm_cache_migration *mg)
  903. {
  904. atomic_inc(&mg->cache->stats.copies_avoided);
  905. migration_success_pre_commit(mg);
  906. }
  907. static void calc_discard_block_range(struct cache *cache, struct bio *bio,
  908. dm_dblock_t *b, dm_dblock_t *e)
  909. {
  910. sector_t sb = bio->bi_iter.bi_sector;
  911. sector_t se = bio_end_sector(bio);
  912. *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
  913. if (se - sb < cache->discard_block_size)
  914. *e = *b;
  915. else
  916. *e = to_dblock(block_div(se, cache->discard_block_size));
  917. }
  918. static void issue_discard(struct dm_cache_migration *mg)
  919. {
  920. dm_dblock_t b, e;
  921. struct bio *bio = mg->new_ocell->holder;
  922. calc_discard_block_range(mg->cache, bio, &b, &e);
  923. while (b != e) {
  924. set_discard(mg->cache, b);
  925. b = to_dblock(from_dblock(b) + 1);
  926. }
  927. bio_endio(bio, 0);
  928. cell_defer(mg->cache, mg->new_ocell, false);
  929. free_migration(mg);
  930. }
  931. static void issue_copy_or_discard(struct dm_cache_migration *mg)
  932. {
  933. bool avoid;
  934. struct cache *cache = mg->cache;
  935. if (mg->discard) {
  936. issue_discard(mg);
  937. return;
  938. }
  939. if (mg->writeback || mg->demote)
  940. avoid = !is_dirty(cache, mg->cblock) ||
  941. is_discarded_oblock(cache, mg->old_oblock);
  942. else {
  943. struct bio *bio = mg->new_ocell->holder;
  944. avoid = is_discarded_oblock(cache, mg->new_oblock);
  945. if (writeback_mode(&cache->features) &&
  946. !avoid && bio_writes_complete_block(cache, bio)) {
  947. issue_overwrite(mg, bio);
  948. return;
  949. }
  950. }
  951. avoid ? avoid_copy(mg) : issue_copy(mg);
  952. }
  953. static void complete_migration(struct dm_cache_migration *mg)
  954. {
  955. if (mg->err)
  956. migration_failure(mg);
  957. else
  958. migration_success_pre_commit(mg);
  959. }
  960. static void process_migrations(struct cache *cache, struct list_head *head,
  961. void (*fn)(struct dm_cache_migration *))
  962. {
  963. unsigned long flags;
  964. struct list_head list;
  965. struct dm_cache_migration *mg, *tmp;
  966. INIT_LIST_HEAD(&list);
  967. spin_lock_irqsave(&cache->lock, flags);
  968. list_splice_init(head, &list);
  969. spin_unlock_irqrestore(&cache->lock, flags);
  970. list_for_each_entry_safe(mg, tmp, &list, list)
  971. fn(mg);
  972. }
  973. static void __queue_quiesced_migration(struct dm_cache_migration *mg)
  974. {
  975. list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
  976. }
  977. static void queue_quiesced_migration(struct dm_cache_migration *mg)
  978. {
  979. unsigned long flags;
  980. struct cache *cache = mg->cache;
  981. spin_lock_irqsave(&cache->lock, flags);
  982. __queue_quiesced_migration(mg);
  983. spin_unlock_irqrestore(&cache->lock, flags);
  984. wake_worker(cache);
  985. }
  986. static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
  987. {
  988. unsigned long flags;
  989. struct dm_cache_migration *mg, *tmp;
  990. spin_lock_irqsave(&cache->lock, flags);
  991. list_for_each_entry_safe(mg, tmp, work, list)
  992. __queue_quiesced_migration(mg);
  993. spin_unlock_irqrestore(&cache->lock, flags);
  994. wake_worker(cache);
  995. }
  996. static void check_for_quiesced_migrations(struct cache *cache,
  997. struct per_bio_data *pb)
  998. {
  999. struct list_head work;
  1000. if (!pb->all_io_entry)
  1001. return;
  1002. INIT_LIST_HEAD(&work);
  1003. dm_deferred_entry_dec(pb->all_io_entry, &work);
  1004. if (!list_empty(&work))
  1005. queue_quiesced_migrations(cache, &work);
  1006. }
  1007. static void quiesce_migration(struct dm_cache_migration *mg)
  1008. {
  1009. if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
  1010. queue_quiesced_migration(mg);
  1011. }
  1012. static void promote(struct cache *cache, struct prealloc *structs,
  1013. dm_oblock_t oblock, dm_cblock_t cblock,
  1014. struct dm_bio_prison_cell *cell)
  1015. {
  1016. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1017. mg->err = false;
  1018. mg->discard = false;
  1019. mg->writeback = false;
  1020. mg->demote = false;
  1021. mg->promote = true;
  1022. mg->requeue_holder = true;
  1023. mg->invalidate = false;
  1024. mg->cache = cache;
  1025. mg->new_oblock = oblock;
  1026. mg->cblock = cblock;
  1027. mg->old_ocell = NULL;
  1028. mg->new_ocell = cell;
  1029. mg->start_jiffies = jiffies;
  1030. inc_io_migrations(cache);
  1031. quiesce_migration(mg);
  1032. }
  1033. static void writeback(struct cache *cache, struct prealloc *structs,
  1034. dm_oblock_t oblock, dm_cblock_t cblock,
  1035. struct dm_bio_prison_cell *cell)
  1036. {
  1037. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1038. mg->err = false;
  1039. mg->discard = false;
  1040. mg->writeback = true;
  1041. mg->demote = false;
  1042. mg->promote = false;
  1043. mg->requeue_holder = true;
  1044. mg->invalidate = false;
  1045. mg->cache = cache;
  1046. mg->old_oblock = oblock;
  1047. mg->cblock = cblock;
  1048. mg->old_ocell = cell;
  1049. mg->new_ocell = NULL;
  1050. mg->start_jiffies = jiffies;
  1051. inc_io_migrations(cache);
  1052. quiesce_migration(mg);
  1053. }
  1054. static void demote_then_promote(struct cache *cache, struct prealloc *structs,
  1055. dm_oblock_t old_oblock, dm_oblock_t new_oblock,
  1056. dm_cblock_t cblock,
  1057. struct dm_bio_prison_cell *old_ocell,
  1058. struct dm_bio_prison_cell *new_ocell)
  1059. {
  1060. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1061. mg->err = false;
  1062. mg->discard = false;
  1063. mg->writeback = false;
  1064. mg->demote = true;
  1065. mg->promote = true;
  1066. mg->requeue_holder = true;
  1067. mg->invalidate = false;
  1068. mg->cache = cache;
  1069. mg->old_oblock = old_oblock;
  1070. mg->new_oblock = new_oblock;
  1071. mg->cblock = cblock;
  1072. mg->old_ocell = old_ocell;
  1073. mg->new_ocell = new_ocell;
  1074. mg->start_jiffies = jiffies;
  1075. inc_io_migrations(cache);
  1076. quiesce_migration(mg);
  1077. }
  1078. /*
  1079. * Invalidate a cache entry. No writeback occurs; any changes in the cache
  1080. * block are thrown away.
  1081. */
  1082. static void invalidate(struct cache *cache, struct prealloc *structs,
  1083. dm_oblock_t oblock, dm_cblock_t cblock,
  1084. struct dm_bio_prison_cell *cell)
  1085. {
  1086. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1087. mg->err = false;
  1088. mg->discard = false;
  1089. mg->writeback = false;
  1090. mg->demote = true;
  1091. mg->promote = false;
  1092. mg->requeue_holder = true;
  1093. mg->invalidate = true;
  1094. mg->cache = cache;
  1095. mg->old_oblock = oblock;
  1096. mg->cblock = cblock;
  1097. mg->old_ocell = cell;
  1098. mg->new_ocell = NULL;
  1099. mg->start_jiffies = jiffies;
  1100. inc_io_migrations(cache);
  1101. quiesce_migration(mg);
  1102. }
  1103. static void discard(struct cache *cache, struct prealloc *structs,
  1104. struct dm_bio_prison_cell *cell)
  1105. {
  1106. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1107. mg->err = false;
  1108. mg->discard = true;
  1109. mg->writeback = false;
  1110. mg->demote = false;
  1111. mg->promote = false;
  1112. mg->requeue_holder = false;
  1113. mg->invalidate = false;
  1114. mg->cache = cache;
  1115. mg->old_ocell = NULL;
  1116. mg->new_ocell = cell;
  1117. mg->start_jiffies = jiffies;
  1118. quiesce_migration(mg);
  1119. }
  1120. /*----------------------------------------------------------------
  1121. * bio processing
  1122. *--------------------------------------------------------------*/
  1123. static void defer_bio(struct cache *cache, struct bio *bio)
  1124. {
  1125. unsigned long flags;
  1126. spin_lock_irqsave(&cache->lock, flags);
  1127. bio_list_add(&cache->deferred_bios, bio);
  1128. spin_unlock_irqrestore(&cache->lock, flags);
  1129. wake_worker(cache);
  1130. }
  1131. static void process_flush_bio(struct cache *cache, struct bio *bio)
  1132. {
  1133. size_t pb_data_size = get_per_bio_data_size(cache);
  1134. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1135. BUG_ON(bio->bi_iter.bi_size);
  1136. if (!pb->req_nr)
  1137. remap_to_origin(cache, bio);
  1138. else
  1139. remap_to_cache(cache, bio, 0);
  1140. /*
  1141. * REQ_FLUSH is not directed at any particular block so we don't
  1142. * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH
  1143. * by dm-core.
  1144. */
  1145. issue(cache, bio);
  1146. }
  1147. static void process_discard_bio(struct cache *cache, struct prealloc *structs,
  1148. struct bio *bio)
  1149. {
  1150. int r;
  1151. dm_dblock_t b, e;
  1152. struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
  1153. calc_discard_block_range(cache, bio, &b, &e);
  1154. if (b == e) {
  1155. bio_endio(bio, 0);
  1156. return;
  1157. }
  1158. cell_prealloc = prealloc_get_cell(structs);
  1159. r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
  1160. (cell_free_fn) prealloc_put_cell,
  1161. structs, &new_ocell);
  1162. if (r > 0)
  1163. return;
  1164. discard(cache, structs, new_ocell);
  1165. }
  1166. static bool spare_migration_bandwidth(struct cache *cache)
  1167. {
  1168. sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
  1169. cache->sectors_per_block;
  1170. return current_volume < cache->migration_threshold;
  1171. }
  1172. static void inc_hit_counter(struct cache *cache, struct bio *bio)
  1173. {
  1174. atomic_inc(bio_data_dir(bio) == READ ?
  1175. &cache->stats.read_hit : &cache->stats.write_hit);
  1176. }
  1177. static void inc_miss_counter(struct cache *cache, struct bio *bio)
  1178. {
  1179. atomic_inc(bio_data_dir(bio) == READ ?
  1180. &cache->stats.read_miss : &cache->stats.write_miss);
  1181. }
  1182. static void process_bio(struct cache *cache, struct prealloc *structs,
  1183. struct bio *bio)
  1184. {
  1185. int r;
  1186. bool release_cell = true;
  1187. dm_oblock_t block = get_bio_block(cache, bio);
  1188. struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
  1189. struct policy_result lookup_result;
  1190. bool passthrough = passthrough_mode(&cache->features);
  1191. bool discarded_block, can_migrate;
  1192. /*
  1193. * Check to see if that block is currently migrating.
  1194. */
  1195. cell_prealloc = prealloc_get_cell(structs);
  1196. r = bio_detain(cache, block, bio, cell_prealloc,
  1197. (cell_free_fn) prealloc_put_cell,
  1198. structs, &new_ocell);
  1199. if (r > 0)
  1200. return;
  1201. discarded_block = is_discarded_oblock(cache, block);
  1202. can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
  1203. r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
  1204. bio, &lookup_result);
  1205. if (r == -EWOULDBLOCK)
  1206. /* migration has been denied */
  1207. lookup_result.op = POLICY_MISS;
  1208. switch (lookup_result.op) {
  1209. case POLICY_HIT:
  1210. if (passthrough) {
  1211. inc_miss_counter(cache, bio);
  1212. /*
  1213. * Passthrough always maps to the origin,
  1214. * invalidating any cache blocks that are written
  1215. * to.
  1216. */
  1217. if (bio_data_dir(bio) == WRITE) {
  1218. atomic_inc(&cache->stats.demotion);
  1219. invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
  1220. release_cell = false;
  1221. } else {
  1222. /* FIXME: factor out issue_origin() */
  1223. remap_to_origin_clear_discard(cache, bio, block);
  1224. inc_and_issue(cache, bio, new_ocell);
  1225. }
  1226. } else {
  1227. inc_hit_counter(cache, bio);
  1228. if (bio_data_dir(bio) == WRITE &&
  1229. writethrough_mode(&cache->features) &&
  1230. !is_dirty(cache, lookup_result.cblock)) {
  1231. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  1232. inc_and_issue(cache, bio, new_ocell);
  1233. } else {
  1234. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  1235. inc_and_issue(cache, bio, new_ocell);
  1236. }
  1237. }
  1238. break;
  1239. case POLICY_MISS:
  1240. inc_miss_counter(cache, bio);
  1241. remap_to_origin_clear_discard(cache, bio, block);
  1242. inc_and_issue(cache, bio, new_ocell);
  1243. break;
  1244. case POLICY_NEW:
  1245. atomic_inc(&cache->stats.promotion);
  1246. promote(cache, structs, block, lookup_result.cblock, new_ocell);
  1247. release_cell = false;
  1248. break;
  1249. case POLICY_REPLACE:
  1250. cell_prealloc = prealloc_get_cell(structs);
  1251. r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
  1252. (cell_free_fn) prealloc_put_cell,
  1253. structs, &old_ocell);
  1254. if (r > 0) {
  1255. /*
  1256. * We have to be careful to avoid lock inversion of
  1257. * the cells. So we back off, and wait for the
  1258. * old_ocell to become free.
  1259. */
  1260. policy_force_mapping(cache->policy, block,
  1261. lookup_result.old_oblock);
  1262. atomic_inc(&cache->stats.cache_cell_clash);
  1263. break;
  1264. }
  1265. atomic_inc(&cache->stats.demotion);
  1266. atomic_inc(&cache->stats.promotion);
  1267. demote_then_promote(cache, structs, lookup_result.old_oblock,
  1268. block, lookup_result.cblock,
  1269. old_ocell, new_ocell);
  1270. release_cell = false;
  1271. break;
  1272. default:
  1273. DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
  1274. (unsigned) lookup_result.op);
  1275. bio_io_error(bio);
  1276. }
  1277. if (release_cell)
  1278. cell_defer(cache, new_ocell, false);
  1279. }
  1280. static int need_commit_due_to_time(struct cache *cache)
  1281. {
  1282. return !time_in_range(jiffies, cache->last_commit_jiffies,
  1283. cache->last_commit_jiffies + COMMIT_PERIOD);
  1284. }
  1285. static int commit_if_needed(struct cache *cache)
  1286. {
  1287. int r = 0;
  1288. if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
  1289. dm_cache_changed_this_transaction(cache->cmd)) {
  1290. atomic_inc(&cache->stats.commit_count);
  1291. cache->commit_requested = false;
  1292. r = dm_cache_commit(cache->cmd, false);
  1293. cache->last_commit_jiffies = jiffies;
  1294. }
  1295. return r;
  1296. }
  1297. static void process_deferred_bios(struct cache *cache)
  1298. {
  1299. unsigned long flags;
  1300. struct bio_list bios;
  1301. struct bio *bio;
  1302. struct prealloc structs;
  1303. memset(&structs, 0, sizeof(structs));
  1304. bio_list_init(&bios);
  1305. spin_lock_irqsave(&cache->lock, flags);
  1306. bio_list_merge(&bios, &cache->deferred_bios);
  1307. bio_list_init(&cache->deferred_bios);
  1308. spin_unlock_irqrestore(&cache->lock, flags);
  1309. while (!bio_list_empty(&bios)) {
  1310. /*
  1311. * If we've got no free migration structs, and processing
  1312. * this bio might require one, we pause until there are some
  1313. * prepared mappings to process.
  1314. */
  1315. if (prealloc_data_structs(cache, &structs)) {
  1316. spin_lock_irqsave(&cache->lock, flags);
  1317. bio_list_merge(&cache->deferred_bios, &bios);
  1318. spin_unlock_irqrestore(&cache->lock, flags);
  1319. break;
  1320. }
  1321. bio = bio_list_pop(&bios);
  1322. if (bio->bi_rw & REQ_FLUSH)
  1323. process_flush_bio(cache, bio);
  1324. else if (bio->bi_rw & REQ_DISCARD)
  1325. process_discard_bio(cache, &structs, bio);
  1326. else
  1327. process_bio(cache, &structs, bio);
  1328. }
  1329. prealloc_free_structs(cache, &structs);
  1330. }
  1331. static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
  1332. {
  1333. unsigned long flags;
  1334. struct bio_list bios;
  1335. struct bio *bio;
  1336. bio_list_init(&bios);
  1337. spin_lock_irqsave(&cache->lock, flags);
  1338. bio_list_merge(&bios, &cache->deferred_flush_bios);
  1339. bio_list_init(&cache->deferred_flush_bios);
  1340. spin_unlock_irqrestore(&cache->lock, flags);
  1341. /*
  1342. * These bios have already been through inc_ds()
  1343. */
  1344. while ((bio = bio_list_pop(&bios)))
  1345. submit_bios ? generic_make_request(bio) : bio_io_error(bio);
  1346. }
  1347. static void process_deferred_writethrough_bios(struct cache *cache)
  1348. {
  1349. unsigned long flags;
  1350. struct bio_list bios;
  1351. struct bio *bio;
  1352. bio_list_init(&bios);
  1353. spin_lock_irqsave(&cache->lock, flags);
  1354. bio_list_merge(&bios, &cache->deferred_writethrough_bios);
  1355. bio_list_init(&cache->deferred_writethrough_bios);
  1356. spin_unlock_irqrestore(&cache->lock, flags);
  1357. /*
  1358. * These bios have already been through inc_ds()
  1359. */
  1360. while ((bio = bio_list_pop(&bios)))
  1361. generic_make_request(bio);
  1362. }
  1363. static void writeback_some_dirty_blocks(struct cache *cache)
  1364. {
  1365. int r = 0;
  1366. dm_oblock_t oblock;
  1367. dm_cblock_t cblock;
  1368. struct prealloc structs;
  1369. struct dm_bio_prison_cell *old_ocell;
  1370. memset(&structs, 0, sizeof(structs));
  1371. while (spare_migration_bandwidth(cache)) {
  1372. if (prealloc_data_structs(cache, &structs))
  1373. break;
  1374. r = policy_writeback_work(cache->policy, &oblock, &cblock);
  1375. if (r)
  1376. break;
  1377. r = get_cell(cache, oblock, &structs, &old_ocell);
  1378. if (r) {
  1379. policy_set_dirty(cache->policy, oblock);
  1380. break;
  1381. }
  1382. writeback(cache, &structs, oblock, cblock, old_ocell);
  1383. }
  1384. prealloc_free_structs(cache, &structs);
  1385. }
  1386. /*----------------------------------------------------------------
  1387. * Invalidations.
  1388. * Dropping something from the cache *without* writing back.
  1389. *--------------------------------------------------------------*/
  1390. static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
  1391. {
  1392. int r = 0;
  1393. uint64_t begin = from_cblock(req->cblocks->begin);
  1394. uint64_t end = from_cblock(req->cblocks->end);
  1395. while (begin != end) {
  1396. r = policy_remove_cblock(cache->policy, to_cblock(begin));
  1397. if (!r) {
  1398. r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
  1399. if (r)
  1400. break;
  1401. } else if (r == -ENODATA) {
  1402. /* harmless, already unmapped */
  1403. r = 0;
  1404. } else {
  1405. DMERR("policy_remove_cblock failed");
  1406. break;
  1407. }
  1408. begin++;
  1409. }
  1410. cache->commit_requested = true;
  1411. req->err = r;
  1412. atomic_set(&req->complete, 1);
  1413. wake_up(&req->result_wait);
  1414. }
  1415. static void process_invalidation_requests(struct cache *cache)
  1416. {
  1417. struct list_head list;
  1418. struct invalidation_request *req, *tmp;
  1419. INIT_LIST_HEAD(&list);
  1420. spin_lock(&cache->invalidation_lock);
  1421. list_splice_init(&cache->invalidation_requests, &list);
  1422. spin_unlock(&cache->invalidation_lock);
  1423. list_for_each_entry_safe (req, tmp, &list, list)
  1424. process_invalidation_request(cache, req);
  1425. }
  1426. /*----------------------------------------------------------------
  1427. * Main worker loop
  1428. *--------------------------------------------------------------*/
  1429. static bool is_quiescing(struct cache *cache)
  1430. {
  1431. return atomic_read(&cache->quiescing);
  1432. }
  1433. static void ack_quiescing(struct cache *cache)
  1434. {
  1435. if (is_quiescing(cache)) {
  1436. atomic_inc(&cache->quiescing_ack);
  1437. wake_up(&cache->quiescing_wait);
  1438. }
  1439. }
  1440. static void wait_for_quiescing_ack(struct cache *cache)
  1441. {
  1442. wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
  1443. }
  1444. static void start_quiescing(struct cache *cache)
  1445. {
  1446. atomic_inc(&cache->quiescing);
  1447. wait_for_quiescing_ack(cache);
  1448. }
  1449. static void stop_quiescing(struct cache *cache)
  1450. {
  1451. atomic_set(&cache->quiescing, 0);
  1452. atomic_set(&cache->quiescing_ack, 0);
  1453. }
  1454. static void wait_for_migrations(struct cache *cache)
  1455. {
  1456. wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
  1457. }
  1458. static void stop_worker(struct cache *cache)
  1459. {
  1460. cancel_delayed_work(&cache->waker);
  1461. flush_workqueue(cache->wq);
  1462. }
  1463. static void requeue_deferred_io(struct cache *cache)
  1464. {
  1465. struct bio *bio;
  1466. struct bio_list bios;
  1467. bio_list_init(&bios);
  1468. bio_list_merge(&bios, &cache->deferred_bios);
  1469. bio_list_init(&cache->deferred_bios);
  1470. while ((bio = bio_list_pop(&bios)))
  1471. bio_endio(bio, DM_ENDIO_REQUEUE);
  1472. }
  1473. static int more_work(struct cache *cache)
  1474. {
  1475. if (is_quiescing(cache))
  1476. return !list_empty(&cache->quiesced_migrations) ||
  1477. !list_empty(&cache->completed_migrations) ||
  1478. !list_empty(&cache->need_commit_migrations);
  1479. else
  1480. return !bio_list_empty(&cache->deferred_bios) ||
  1481. !bio_list_empty(&cache->deferred_flush_bios) ||
  1482. !bio_list_empty(&cache->deferred_writethrough_bios) ||
  1483. !list_empty(&cache->quiesced_migrations) ||
  1484. !list_empty(&cache->completed_migrations) ||
  1485. !list_empty(&cache->need_commit_migrations) ||
  1486. cache->invalidate;
  1487. }
  1488. static void do_worker(struct work_struct *ws)
  1489. {
  1490. struct cache *cache = container_of(ws, struct cache, worker);
  1491. do {
  1492. if (!is_quiescing(cache)) {
  1493. writeback_some_dirty_blocks(cache);
  1494. process_deferred_writethrough_bios(cache);
  1495. process_deferred_bios(cache);
  1496. process_invalidation_requests(cache);
  1497. }
  1498. process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard);
  1499. process_migrations(cache, &cache->completed_migrations, complete_migration);
  1500. if (commit_if_needed(cache)) {
  1501. process_deferred_flush_bios(cache, false);
  1502. process_migrations(cache, &cache->need_commit_migrations, migration_failure);
  1503. /*
  1504. * FIXME: rollback metadata or just go into a
  1505. * failure mode and error everything
  1506. */
  1507. } else {
  1508. process_deferred_flush_bios(cache, true);
  1509. process_migrations(cache, &cache->need_commit_migrations,
  1510. migration_success_post_commit);
  1511. }
  1512. ack_quiescing(cache);
  1513. } while (more_work(cache));
  1514. }
  1515. /*
  1516. * We want to commit periodically so that not too much
  1517. * unwritten metadata builds up.
  1518. */
  1519. static void do_waker(struct work_struct *ws)
  1520. {
  1521. struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
  1522. policy_tick(cache->policy);
  1523. wake_worker(cache);
  1524. queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
  1525. }
  1526. /*----------------------------------------------------------------*/
  1527. static int is_congested(struct dm_dev *dev, int bdi_bits)
  1528. {
  1529. struct request_queue *q = bdev_get_queue(dev->bdev);
  1530. return bdi_congested(&q->backing_dev_info, bdi_bits);
  1531. }
  1532. static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
  1533. {
  1534. struct cache *cache = container_of(cb, struct cache, callbacks);
  1535. return is_congested(cache->origin_dev, bdi_bits) ||
  1536. is_congested(cache->cache_dev, bdi_bits);
  1537. }
  1538. /*----------------------------------------------------------------
  1539. * Target methods
  1540. *--------------------------------------------------------------*/
  1541. /*
  1542. * This function gets called on the error paths of the constructor, so we
  1543. * have to cope with a partially initialised struct.
  1544. */
  1545. static void destroy(struct cache *cache)
  1546. {
  1547. unsigned i;
  1548. if (cache->migration_pool)
  1549. mempool_destroy(cache->migration_pool);
  1550. if (cache->all_io_ds)
  1551. dm_deferred_set_destroy(cache->all_io_ds);
  1552. if (cache->prison)
  1553. dm_bio_prison_destroy(cache->prison);
  1554. if (cache->wq)
  1555. destroy_workqueue(cache->wq);
  1556. if (cache->dirty_bitset)
  1557. free_bitset(cache->dirty_bitset);
  1558. if (cache->discard_bitset)
  1559. free_bitset(cache->discard_bitset);
  1560. if (cache->copier)
  1561. dm_kcopyd_client_destroy(cache->copier);
  1562. if (cache->cmd)
  1563. dm_cache_metadata_close(cache->cmd);
  1564. if (cache->metadata_dev)
  1565. dm_put_device(cache->ti, cache->metadata_dev);
  1566. if (cache->origin_dev)
  1567. dm_put_device(cache->ti, cache->origin_dev);
  1568. if (cache->cache_dev)
  1569. dm_put_device(cache->ti, cache->cache_dev);
  1570. if (cache->policy)
  1571. dm_cache_policy_destroy(cache->policy);
  1572. for (i = 0; i < cache->nr_ctr_args ; i++)
  1573. kfree(cache->ctr_args[i]);
  1574. kfree(cache->ctr_args);
  1575. kfree(cache);
  1576. }
  1577. static void cache_dtr(struct dm_target *ti)
  1578. {
  1579. struct cache *cache = ti->private;
  1580. destroy(cache);
  1581. }
  1582. static sector_t get_dev_size(struct dm_dev *dev)
  1583. {
  1584. return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
  1585. }
  1586. /*----------------------------------------------------------------*/
  1587. /*
  1588. * Construct a cache device mapping.
  1589. *
  1590. * cache <metadata dev> <cache dev> <origin dev> <block size>
  1591. * <#feature args> [<feature arg>]*
  1592. * <policy> <#policy args> [<policy arg>]*
  1593. *
  1594. * metadata dev : fast device holding the persistent metadata
  1595. * cache dev : fast device holding cached data blocks
  1596. * origin dev : slow device holding original data blocks
  1597. * block size : cache unit size in sectors
  1598. *
  1599. * #feature args : number of feature arguments passed
  1600. * feature args : writethrough. (The default is writeback.)
  1601. *
  1602. * policy : the replacement policy to use
  1603. * #policy args : an even number of policy arguments corresponding
  1604. * to key/value pairs passed to the policy
  1605. * policy args : key/value pairs passed to the policy
  1606. * E.g. 'sequential_threshold 1024'
  1607. * See cache-policies.txt for details.
  1608. *
  1609. * Optional feature arguments are:
  1610. * writethrough : write through caching that prohibits cache block
  1611. * content from being different from origin block content.
  1612. * Without this argument, the default behaviour is to write
  1613. * back cache block contents later for performance reasons,
  1614. * so they may differ from the corresponding origin blocks.
  1615. */
  1616. struct cache_args {
  1617. struct dm_target *ti;
  1618. struct dm_dev *metadata_dev;
  1619. struct dm_dev *cache_dev;
  1620. sector_t cache_sectors;
  1621. struct dm_dev *origin_dev;
  1622. sector_t origin_sectors;
  1623. uint32_t block_size;
  1624. const char *policy_name;
  1625. int policy_argc;
  1626. const char **policy_argv;
  1627. struct cache_features features;
  1628. };
  1629. static void destroy_cache_args(struct cache_args *ca)
  1630. {
  1631. if (ca->metadata_dev)
  1632. dm_put_device(ca->ti, ca->metadata_dev);
  1633. if (ca->cache_dev)
  1634. dm_put_device(ca->ti, ca->cache_dev);
  1635. if (ca->origin_dev)
  1636. dm_put_device(ca->ti, ca->origin_dev);
  1637. kfree(ca);
  1638. }
  1639. static bool at_least_one_arg(struct dm_arg_set *as, char **error)
  1640. {
  1641. if (!as->argc) {
  1642. *error = "Insufficient args";
  1643. return false;
  1644. }
  1645. return true;
  1646. }
  1647. static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
  1648. char **error)
  1649. {
  1650. int r;
  1651. sector_t metadata_dev_size;
  1652. char b[BDEVNAME_SIZE];
  1653. if (!at_least_one_arg(as, error))
  1654. return -EINVAL;
  1655. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1656. &ca->metadata_dev);
  1657. if (r) {
  1658. *error = "Error opening metadata device";
  1659. return r;
  1660. }
  1661. metadata_dev_size = get_dev_size(ca->metadata_dev);
  1662. if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
  1663. DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
  1664. bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
  1665. return 0;
  1666. }
  1667. static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
  1668. char **error)
  1669. {
  1670. int r;
  1671. if (!at_least_one_arg(as, error))
  1672. return -EINVAL;
  1673. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1674. &ca->cache_dev);
  1675. if (r) {
  1676. *error = "Error opening cache device";
  1677. return r;
  1678. }
  1679. ca->cache_sectors = get_dev_size(ca->cache_dev);
  1680. return 0;
  1681. }
  1682. static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
  1683. char **error)
  1684. {
  1685. int r;
  1686. if (!at_least_one_arg(as, error))
  1687. return -EINVAL;
  1688. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1689. &ca->origin_dev);
  1690. if (r) {
  1691. *error = "Error opening origin device";
  1692. return r;
  1693. }
  1694. ca->origin_sectors = get_dev_size(ca->origin_dev);
  1695. if (ca->ti->len > ca->origin_sectors) {
  1696. *error = "Device size larger than cached device";
  1697. return -EINVAL;
  1698. }
  1699. return 0;
  1700. }
  1701. static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
  1702. char **error)
  1703. {
  1704. unsigned long block_size;
  1705. if (!at_least_one_arg(as, error))
  1706. return -EINVAL;
  1707. if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
  1708. block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
  1709. block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
  1710. block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
  1711. *error = "Invalid data block size";
  1712. return -EINVAL;
  1713. }
  1714. if (block_size > ca->cache_sectors) {
  1715. *error = "Data block size is larger than the cache device";
  1716. return -EINVAL;
  1717. }
  1718. ca->block_size = block_size;
  1719. return 0;
  1720. }
  1721. static void init_features(struct cache_features *cf)
  1722. {
  1723. cf->mode = CM_WRITE;
  1724. cf->io_mode = CM_IO_WRITEBACK;
  1725. }
  1726. static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
  1727. char **error)
  1728. {
  1729. static struct dm_arg _args[] = {
  1730. {0, 1, "Invalid number of cache feature arguments"},
  1731. };
  1732. int r;
  1733. unsigned argc;
  1734. const char *arg;
  1735. struct cache_features *cf = &ca->features;
  1736. init_features(cf);
  1737. r = dm_read_arg_group(_args, as, &argc, error);
  1738. if (r)
  1739. return -EINVAL;
  1740. while (argc--) {
  1741. arg = dm_shift_arg(as);
  1742. if (!strcasecmp(arg, "writeback"))
  1743. cf->io_mode = CM_IO_WRITEBACK;
  1744. else if (!strcasecmp(arg, "writethrough"))
  1745. cf->io_mode = CM_IO_WRITETHROUGH;
  1746. else if (!strcasecmp(arg, "passthrough"))
  1747. cf->io_mode = CM_IO_PASSTHROUGH;
  1748. else {
  1749. *error = "Unrecognised cache feature requested";
  1750. return -EINVAL;
  1751. }
  1752. }
  1753. return 0;
  1754. }
  1755. static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
  1756. char **error)
  1757. {
  1758. static struct dm_arg _args[] = {
  1759. {0, 1024, "Invalid number of policy arguments"},
  1760. };
  1761. int r;
  1762. if (!at_least_one_arg(as, error))
  1763. return -EINVAL;
  1764. ca->policy_name = dm_shift_arg(as);
  1765. r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
  1766. if (r)
  1767. return -EINVAL;
  1768. ca->policy_argv = (const char **)as->argv;
  1769. dm_consume_args(as, ca->policy_argc);
  1770. return 0;
  1771. }
  1772. static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
  1773. char **error)
  1774. {
  1775. int r;
  1776. struct dm_arg_set as;
  1777. as.argc = argc;
  1778. as.argv = argv;
  1779. r = parse_metadata_dev(ca, &as, error);
  1780. if (r)
  1781. return r;
  1782. r = parse_cache_dev(ca, &as, error);
  1783. if (r)
  1784. return r;
  1785. r = parse_origin_dev(ca, &as, error);
  1786. if (r)
  1787. return r;
  1788. r = parse_block_size(ca, &as, error);
  1789. if (r)
  1790. return r;
  1791. r = parse_features(ca, &as, error);
  1792. if (r)
  1793. return r;
  1794. r = parse_policy(ca, &as, error);
  1795. if (r)
  1796. return r;
  1797. return 0;
  1798. }
  1799. /*----------------------------------------------------------------*/
  1800. static struct kmem_cache *migration_cache;
  1801. #define NOT_CORE_OPTION 1
  1802. static int process_config_option(struct cache *cache, const char *key, const char *value)
  1803. {
  1804. unsigned long tmp;
  1805. if (!strcasecmp(key, "migration_threshold")) {
  1806. if (kstrtoul(value, 10, &tmp))
  1807. return -EINVAL;
  1808. cache->migration_threshold = tmp;
  1809. return 0;
  1810. }
  1811. return NOT_CORE_OPTION;
  1812. }
  1813. static int set_config_value(struct cache *cache, const char *key, const char *value)
  1814. {
  1815. int r = process_config_option(cache, key, value);
  1816. if (r == NOT_CORE_OPTION)
  1817. r = policy_set_config_value(cache->policy, key, value);
  1818. if (r)
  1819. DMWARN("bad config value for %s: %s", key, value);
  1820. return r;
  1821. }
  1822. static int set_config_values(struct cache *cache, int argc, const char **argv)
  1823. {
  1824. int r = 0;
  1825. if (argc & 1) {
  1826. DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
  1827. return -EINVAL;
  1828. }
  1829. while (argc) {
  1830. r = set_config_value(cache, argv[0], argv[1]);
  1831. if (r)
  1832. break;
  1833. argc -= 2;
  1834. argv += 2;
  1835. }
  1836. return r;
  1837. }
  1838. static int create_cache_policy(struct cache *cache, struct cache_args *ca,
  1839. char **error)
  1840. {
  1841. struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
  1842. cache->cache_size,
  1843. cache->origin_sectors,
  1844. cache->sectors_per_block);
  1845. if (IS_ERR(p)) {
  1846. *error = "Error creating cache's policy";
  1847. return PTR_ERR(p);
  1848. }
  1849. cache->policy = p;
  1850. return 0;
  1851. }
  1852. /*
  1853. * We want the discard block size to be at least the size of the cache
  1854. * block size and have no more than 2^14 discard blocks across the origin.
  1855. */
  1856. #define MAX_DISCARD_BLOCKS (1 << 14)
  1857. static bool too_many_discard_blocks(sector_t discard_block_size,
  1858. sector_t origin_size)
  1859. {
  1860. (void) sector_div(origin_size, discard_block_size);
  1861. return origin_size > MAX_DISCARD_BLOCKS;
  1862. }
  1863. static sector_t calculate_discard_block_size(sector_t cache_block_size,
  1864. sector_t origin_size)
  1865. {
  1866. sector_t discard_block_size = cache_block_size;
  1867. if (origin_size)
  1868. while (too_many_discard_blocks(discard_block_size, origin_size))
  1869. discard_block_size *= 2;
  1870. return discard_block_size;
  1871. }
  1872. static void set_cache_size(struct cache *cache, dm_cblock_t size)
  1873. {
  1874. dm_block_t nr_blocks = from_cblock(size);
  1875. if (nr_blocks > (1 << 20) && cache->cache_size != size)
  1876. DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
  1877. "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
  1878. "Please consider increasing the cache block size to reduce the overall cache block count.",
  1879. (unsigned long long) nr_blocks);
  1880. cache->cache_size = size;
  1881. }
  1882. #define DEFAULT_MIGRATION_THRESHOLD 2048
  1883. static int cache_create(struct cache_args *ca, struct cache **result)
  1884. {
  1885. int r = 0;
  1886. char **error = &ca->ti->error;
  1887. struct cache *cache;
  1888. struct dm_target *ti = ca->ti;
  1889. dm_block_t origin_blocks;
  1890. struct dm_cache_metadata *cmd;
  1891. bool may_format = ca->features.mode == CM_WRITE;
  1892. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  1893. if (!cache)
  1894. return -ENOMEM;
  1895. cache->ti = ca->ti;
  1896. ti->private = cache;
  1897. ti->num_flush_bios = 2;
  1898. ti->flush_supported = true;
  1899. ti->num_discard_bios = 1;
  1900. ti->discards_supported = true;
  1901. ti->discard_zeroes_data_unsupported = true;
  1902. ti->split_discard_bios = false;
  1903. cache->features = ca->features;
  1904. ti->per_bio_data_size = get_per_bio_data_size(cache);
  1905. cache->callbacks.congested_fn = cache_is_congested;
  1906. dm_table_add_target_callbacks(ti->table, &cache->callbacks);
  1907. cache->metadata_dev = ca->metadata_dev;
  1908. cache->origin_dev = ca->origin_dev;
  1909. cache->cache_dev = ca->cache_dev;
  1910. ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
  1911. /* FIXME: factor out this whole section */
  1912. origin_blocks = cache->origin_sectors = ca->origin_sectors;
  1913. origin_blocks = block_div(origin_blocks, ca->block_size);
  1914. cache->origin_blocks = to_oblock(origin_blocks);
  1915. cache->sectors_per_block = ca->block_size;
  1916. if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
  1917. r = -EINVAL;
  1918. goto bad;
  1919. }
  1920. if (ca->block_size & (ca->block_size - 1)) {
  1921. dm_block_t cache_size = ca->cache_sectors;
  1922. cache->sectors_per_block_shift = -1;
  1923. cache_size = block_div(cache_size, ca->block_size);
  1924. set_cache_size(cache, to_cblock(cache_size));
  1925. } else {
  1926. cache->sectors_per_block_shift = __ffs(ca->block_size);
  1927. set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
  1928. }
  1929. r = create_cache_policy(cache, ca, error);
  1930. if (r)
  1931. goto bad;
  1932. cache->policy_nr_args = ca->policy_argc;
  1933. cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
  1934. r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
  1935. if (r) {
  1936. *error = "Error setting cache policy's config values";
  1937. goto bad;
  1938. }
  1939. cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
  1940. ca->block_size, may_format,
  1941. dm_cache_policy_get_hint_size(cache->policy));
  1942. if (IS_ERR(cmd)) {
  1943. *error = "Error creating metadata object";
  1944. r = PTR_ERR(cmd);
  1945. goto bad;
  1946. }
  1947. cache->cmd = cmd;
  1948. if (passthrough_mode(&cache->features)) {
  1949. bool all_clean;
  1950. r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
  1951. if (r) {
  1952. *error = "dm_cache_metadata_all_clean() failed";
  1953. goto bad;
  1954. }
  1955. if (!all_clean) {
  1956. *error = "Cannot enter passthrough mode unless all blocks are clean";
  1957. r = -EINVAL;
  1958. goto bad;
  1959. }
  1960. }
  1961. spin_lock_init(&cache->lock);
  1962. bio_list_init(&cache->deferred_bios);
  1963. bio_list_init(&cache->deferred_flush_bios);
  1964. bio_list_init(&cache->deferred_writethrough_bios);
  1965. INIT_LIST_HEAD(&cache->quiesced_migrations);
  1966. INIT_LIST_HEAD(&cache->completed_migrations);
  1967. INIT_LIST_HEAD(&cache->need_commit_migrations);
  1968. atomic_set(&cache->nr_allocated_migrations, 0);
  1969. atomic_set(&cache->nr_io_migrations, 0);
  1970. init_waitqueue_head(&cache->migration_wait);
  1971. init_waitqueue_head(&cache->quiescing_wait);
  1972. atomic_set(&cache->quiescing, 0);
  1973. atomic_set(&cache->quiescing_ack, 0);
  1974. r = -ENOMEM;
  1975. atomic_set(&cache->nr_dirty, 0);
  1976. cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
  1977. if (!cache->dirty_bitset) {
  1978. *error = "could not allocate dirty bitset";
  1979. goto bad;
  1980. }
  1981. clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
  1982. cache->discard_block_size =
  1983. calculate_discard_block_size(cache->sectors_per_block,
  1984. cache->origin_sectors);
  1985. cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
  1986. cache->discard_block_size));
  1987. cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
  1988. if (!cache->discard_bitset) {
  1989. *error = "could not allocate discard bitset";
  1990. goto bad;
  1991. }
  1992. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  1993. cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  1994. if (IS_ERR(cache->copier)) {
  1995. *error = "could not create kcopyd client";
  1996. r = PTR_ERR(cache->copier);
  1997. goto bad;
  1998. }
  1999. cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
  2000. if (!cache->wq) {
  2001. *error = "could not create workqueue for metadata object";
  2002. goto bad;
  2003. }
  2004. INIT_WORK(&cache->worker, do_worker);
  2005. INIT_DELAYED_WORK(&cache->waker, do_waker);
  2006. cache->last_commit_jiffies = jiffies;
  2007. cache->prison = dm_bio_prison_create();
  2008. if (!cache->prison) {
  2009. *error = "could not create bio prison";
  2010. goto bad;
  2011. }
  2012. cache->all_io_ds = dm_deferred_set_create();
  2013. if (!cache->all_io_ds) {
  2014. *error = "could not create all_io deferred set";
  2015. goto bad;
  2016. }
  2017. cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
  2018. migration_cache);
  2019. if (!cache->migration_pool) {
  2020. *error = "Error creating cache's migration mempool";
  2021. goto bad;
  2022. }
  2023. cache->need_tick_bio = true;
  2024. cache->sized = false;
  2025. cache->invalidate = false;
  2026. cache->commit_requested = false;
  2027. cache->loaded_mappings = false;
  2028. cache->loaded_discards = false;
  2029. load_stats(cache);
  2030. atomic_set(&cache->stats.demotion, 0);
  2031. atomic_set(&cache->stats.promotion, 0);
  2032. atomic_set(&cache->stats.copies_avoided, 0);
  2033. atomic_set(&cache->stats.cache_cell_clash, 0);
  2034. atomic_set(&cache->stats.commit_count, 0);
  2035. atomic_set(&cache->stats.discard_count, 0);
  2036. spin_lock_init(&cache->invalidation_lock);
  2037. INIT_LIST_HEAD(&cache->invalidation_requests);
  2038. *result = cache;
  2039. return 0;
  2040. bad:
  2041. destroy(cache);
  2042. return r;
  2043. }
  2044. static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
  2045. {
  2046. unsigned i;
  2047. const char **copy;
  2048. copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
  2049. if (!copy)
  2050. return -ENOMEM;
  2051. for (i = 0; i < argc; i++) {
  2052. copy[i] = kstrdup(argv[i], GFP_KERNEL);
  2053. if (!copy[i]) {
  2054. while (i--)
  2055. kfree(copy[i]);
  2056. kfree(copy);
  2057. return -ENOMEM;
  2058. }
  2059. }
  2060. cache->nr_ctr_args = argc;
  2061. cache->ctr_args = copy;
  2062. return 0;
  2063. }
  2064. static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
  2065. {
  2066. int r = -EINVAL;
  2067. struct cache_args *ca;
  2068. struct cache *cache = NULL;
  2069. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  2070. if (!ca) {
  2071. ti->error = "Error allocating memory for cache";
  2072. return -ENOMEM;
  2073. }
  2074. ca->ti = ti;
  2075. r = parse_cache_args(ca, argc, argv, &ti->error);
  2076. if (r)
  2077. goto out;
  2078. r = cache_create(ca, &cache);
  2079. if (r)
  2080. goto out;
  2081. r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
  2082. if (r) {
  2083. destroy(cache);
  2084. goto out;
  2085. }
  2086. ti->private = cache;
  2087. out:
  2088. destroy_cache_args(ca);
  2089. return r;
  2090. }
  2091. static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell)
  2092. {
  2093. int r;
  2094. dm_oblock_t block = get_bio_block(cache, bio);
  2095. size_t pb_data_size = get_per_bio_data_size(cache);
  2096. bool can_migrate = false;
  2097. bool discarded_block;
  2098. struct policy_result lookup_result;
  2099. struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
  2100. if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
  2101. /*
  2102. * This can only occur if the io goes to a partial block at
  2103. * the end of the origin device. We don't cache these.
  2104. * Just remap to the origin and carry on.
  2105. */
  2106. remap_to_origin(cache, bio);
  2107. return DM_MAPIO_REMAPPED;
  2108. }
  2109. if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
  2110. defer_bio(cache, bio);
  2111. return DM_MAPIO_SUBMITTED;
  2112. }
  2113. /*
  2114. * Check to see if that block is currently migrating.
  2115. */
  2116. *cell = alloc_prison_cell(cache);
  2117. if (!*cell) {
  2118. defer_bio(cache, bio);
  2119. return DM_MAPIO_SUBMITTED;
  2120. }
  2121. r = bio_detain(cache, block, bio, *cell,
  2122. (cell_free_fn) free_prison_cell,
  2123. cache, cell);
  2124. if (r) {
  2125. if (r < 0)
  2126. defer_bio(cache, bio);
  2127. return DM_MAPIO_SUBMITTED;
  2128. }
  2129. discarded_block = is_discarded_oblock(cache, block);
  2130. r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
  2131. bio, &lookup_result);
  2132. if (r == -EWOULDBLOCK) {
  2133. cell_defer(cache, *cell, true);
  2134. return DM_MAPIO_SUBMITTED;
  2135. } else if (r) {
  2136. DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
  2137. cell_defer(cache, *cell, false);
  2138. bio_io_error(bio);
  2139. return DM_MAPIO_SUBMITTED;
  2140. }
  2141. r = DM_MAPIO_REMAPPED;
  2142. switch (lookup_result.op) {
  2143. case POLICY_HIT:
  2144. if (passthrough_mode(&cache->features)) {
  2145. if (bio_data_dir(bio) == WRITE) {
  2146. /*
  2147. * We need to invalidate this block, so
  2148. * defer for the worker thread.
  2149. */
  2150. cell_defer(cache, *cell, true);
  2151. r = DM_MAPIO_SUBMITTED;
  2152. } else {
  2153. inc_miss_counter(cache, bio);
  2154. remap_to_origin_clear_discard(cache, bio, block);
  2155. }
  2156. } else {
  2157. inc_hit_counter(cache, bio);
  2158. if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
  2159. !is_dirty(cache, lookup_result.cblock))
  2160. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  2161. else
  2162. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  2163. }
  2164. break;
  2165. case POLICY_MISS:
  2166. inc_miss_counter(cache, bio);
  2167. if (pb->req_nr != 0) {
  2168. /*
  2169. * This is a duplicate writethrough io that is no
  2170. * longer needed because the block has been demoted.
  2171. */
  2172. bio_endio(bio, 0);
  2173. cell_defer(cache, *cell, false);
  2174. r = DM_MAPIO_SUBMITTED;
  2175. } else
  2176. remap_to_origin_clear_discard(cache, bio, block);
  2177. break;
  2178. default:
  2179. DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
  2180. (unsigned) lookup_result.op);
  2181. cell_defer(cache, *cell, false);
  2182. bio_io_error(bio);
  2183. r = DM_MAPIO_SUBMITTED;
  2184. }
  2185. return r;
  2186. }
  2187. static int cache_map(struct dm_target *ti, struct bio *bio)
  2188. {
  2189. int r;
  2190. struct dm_bio_prison_cell *cell = NULL;
  2191. struct cache *cache = ti->private;
  2192. r = __cache_map(cache, bio, &cell);
  2193. if (r == DM_MAPIO_REMAPPED && cell) {
  2194. inc_ds(cache, bio, cell);
  2195. cell_defer(cache, cell, false);
  2196. }
  2197. return r;
  2198. }
  2199. static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
  2200. {
  2201. struct cache *cache = ti->private;
  2202. unsigned long flags;
  2203. size_t pb_data_size = get_per_bio_data_size(cache);
  2204. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  2205. if (pb->tick) {
  2206. policy_tick(cache->policy);
  2207. spin_lock_irqsave(&cache->lock, flags);
  2208. cache->need_tick_bio = true;
  2209. spin_unlock_irqrestore(&cache->lock, flags);
  2210. }
  2211. check_for_quiesced_migrations(cache, pb);
  2212. return 0;
  2213. }
  2214. static int write_dirty_bitset(struct cache *cache)
  2215. {
  2216. unsigned i, r;
  2217. for (i = 0; i < from_cblock(cache->cache_size); i++) {
  2218. r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
  2219. is_dirty(cache, to_cblock(i)));
  2220. if (r)
  2221. return r;
  2222. }
  2223. return 0;
  2224. }
  2225. static int write_discard_bitset(struct cache *cache)
  2226. {
  2227. unsigned i, r;
  2228. r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
  2229. cache->discard_nr_blocks);
  2230. if (r) {
  2231. DMERR("could not resize on-disk discard bitset");
  2232. return r;
  2233. }
  2234. for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
  2235. r = dm_cache_set_discard(cache->cmd, to_dblock(i),
  2236. is_discarded(cache, to_dblock(i)));
  2237. if (r)
  2238. return r;
  2239. }
  2240. return 0;
  2241. }
  2242. /*
  2243. * returns true on success
  2244. */
  2245. static bool sync_metadata(struct cache *cache)
  2246. {
  2247. int r1, r2, r3, r4;
  2248. r1 = write_dirty_bitset(cache);
  2249. if (r1)
  2250. DMERR("could not write dirty bitset");
  2251. r2 = write_discard_bitset(cache);
  2252. if (r2)
  2253. DMERR("could not write discard bitset");
  2254. save_stats(cache);
  2255. r3 = dm_cache_write_hints(cache->cmd, cache->policy);
  2256. if (r3)
  2257. DMERR("could not write hints");
  2258. /*
  2259. * If writing the above metadata failed, we still commit, but don't
  2260. * set the clean shutdown flag. This will effectively force every
  2261. * dirty bit to be set on reload.
  2262. */
  2263. r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
  2264. if (r4)
  2265. DMERR("could not write cache metadata. Data loss may occur.");
  2266. return !r1 && !r2 && !r3 && !r4;
  2267. }
  2268. static void cache_postsuspend(struct dm_target *ti)
  2269. {
  2270. struct cache *cache = ti->private;
  2271. start_quiescing(cache);
  2272. wait_for_migrations(cache);
  2273. stop_worker(cache);
  2274. requeue_deferred_io(cache);
  2275. stop_quiescing(cache);
  2276. (void) sync_metadata(cache);
  2277. }
  2278. static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
  2279. bool dirty, uint32_t hint, bool hint_valid)
  2280. {
  2281. int r;
  2282. struct cache *cache = context;
  2283. r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
  2284. if (r)
  2285. return r;
  2286. if (dirty)
  2287. set_dirty(cache, oblock, cblock);
  2288. else
  2289. clear_dirty(cache, oblock, cblock);
  2290. return 0;
  2291. }
  2292. /*
  2293. * The discard block size in the on disk metadata is not
  2294. * neccessarily the same as we're currently using. So we have to
  2295. * be careful to only set the discarded attribute if we know it
  2296. * covers a complete block of the new size.
  2297. */
  2298. struct discard_load_info {
  2299. struct cache *cache;
  2300. /*
  2301. * These blocks are sized using the on disk dblock size, rather
  2302. * than the current one.
  2303. */
  2304. dm_block_t block_size;
  2305. dm_block_t discard_begin, discard_end;
  2306. };
  2307. static void discard_load_info_init(struct cache *cache,
  2308. struct discard_load_info *li)
  2309. {
  2310. li->cache = cache;
  2311. li->discard_begin = li->discard_end = 0;
  2312. }
  2313. static void set_discard_range(struct discard_load_info *li)
  2314. {
  2315. sector_t b, e;
  2316. if (li->discard_begin == li->discard_end)
  2317. return;
  2318. /*
  2319. * Convert to sectors.
  2320. */
  2321. b = li->discard_begin * li->block_size;
  2322. e = li->discard_end * li->block_size;
  2323. /*
  2324. * Then convert back to the current dblock size.
  2325. */
  2326. b = dm_sector_div_up(b, li->cache->discard_block_size);
  2327. sector_div(e, li->cache->discard_block_size);
  2328. /*
  2329. * The origin may have shrunk, so we need to check we're still in
  2330. * bounds.
  2331. */
  2332. if (e > from_dblock(li->cache->discard_nr_blocks))
  2333. e = from_dblock(li->cache->discard_nr_blocks);
  2334. for (; b < e; b++)
  2335. set_discard(li->cache, to_dblock(b));
  2336. }
  2337. static int load_discard(void *context, sector_t discard_block_size,
  2338. dm_dblock_t dblock, bool discard)
  2339. {
  2340. struct discard_load_info *li = context;
  2341. li->block_size = discard_block_size;
  2342. if (discard) {
  2343. if (from_dblock(dblock) == li->discard_end)
  2344. /*
  2345. * We're already in a discard range, just extend it.
  2346. */
  2347. li->discard_end = li->discard_end + 1ULL;
  2348. else {
  2349. /*
  2350. * Emit the old range and start a new one.
  2351. */
  2352. set_discard_range(li);
  2353. li->discard_begin = from_dblock(dblock);
  2354. li->discard_end = li->discard_begin + 1ULL;
  2355. }
  2356. } else {
  2357. set_discard_range(li);
  2358. li->discard_begin = li->discard_end = 0;
  2359. }
  2360. return 0;
  2361. }
  2362. static dm_cblock_t get_cache_dev_size(struct cache *cache)
  2363. {
  2364. sector_t size = get_dev_size(cache->cache_dev);
  2365. (void) sector_div(size, cache->sectors_per_block);
  2366. return to_cblock(size);
  2367. }
  2368. static bool can_resize(struct cache *cache, dm_cblock_t new_size)
  2369. {
  2370. if (from_cblock(new_size) > from_cblock(cache->cache_size))
  2371. return true;
  2372. /*
  2373. * We can't drop a dirty block when shrinking the cache.
  2374. */
  2375. while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
  2376. new_size = to_cblock(from_cblock(new_size) + 1);
  2377. if (is_dirty(cache, new_size)) {
  2378. DMERR("unable to shrink cache; cache block %llu is dirty",
  2379. (unsigned long long) from_cblock(new_size));
  2380. return false;
  2381. }
  2382. }
  2383. return true;
  2384. }
  2385. static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
  2386. {
  2387. int r;
  2388. r = dm_cache_resize(cache->cmd, new_size);
  2389. if (r) {
  2390. DMERR("could not resize cache metadata");
  2391. return r;
  2392. }
  2393. set_cache_size(cache, new_size);
  2394. return 0;
  2395. }
  2396. static int cache_preresume(struct dm_target *ti)
  2397. {
  2398. int r = 0;
  2399. struct cache *cache = ti->private;
  2400. dm_cblock_t csize = get_cache_dev_size(cache);
  2401. /*
  2402. * Check to see if the cache has resized.
  2403. */
  2404. if (!cache->sized) {
  2405. r = resize_cache_dev(cache, csize);
  2406. if (r)
  2407. return r;
  2408. cache->sized = true;
  2409. } else if (csize != cache->cache_size) {
  2410. if (!can_resize(cache, csize))
  2411. return -EINVAL;
  2412. r = resize_cache_dev(cache, csize);
  2413. if (r)
  2414. return r;
  2415. }
  2416. if (!cache->loaded_mappings) {
  2417. r = dm_cache_load_mappings(cache->cmd, cache->policy,
  2418. load_mapping, cache);
  2419. if (r) {
  2420. DMERR("could not load cache mappings");
  2421. return r;
  2422. }
  2423. cache->loaded_mappings = true;
  2424. }
  2425. if (!cache->loaded_discards) {
  2426. struct discard_load_info li;
  2427. /*
  2428. * The discard bitset could have been resized, or the
  2429. * discard block size changed. To be safe we start by
  2430. * setting every dblock to not discarded.
  2431. */
  2432. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  2433. discard_load_info_init(cache, &li);
  2434. r = dm_cache_load_discards(cache->cmd, load_discard, &li);
  2435. if (r) {
  2436. DMERR("could not load origin discards");
  2437. return r;
  2438. }
  2439. set_discard_range(&li);
  2440. cache->loaded_discards = true;
  2441. }
  2442. return r;
  2443. }
  2444. static void cache_resume(struct dm_target *ti)
  2445. {
  2446. struct cache *cache = ti->private;
  2447. cache->need_tick_bio = true;
  2448. do_waker(&cache->waker.work);
  2449. }
  2450. /*
  2451. * Status format:
  2452. *
  2453. * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
  2454. * <cache block size> <#used cache blocks>/<#total cache blocks>
  2455. * <#read hits> <#read misses> <#write hits> <#write misses>
  2456. * <#demotions> <#promotions> <#dirty>
  2457. * <#features> <features>*
  2458. * <#core args> <core args>
  2459. * <policy name> <#policy args> <policy args>*
  2460. */
  2461. static void cache_status(struct dm_target *ti, status_type_t type,
  2462. unsigned status_flags, char *result, unsigned maxlen)
  2463. {
  2464. int r = 0;
  2465. unsigned i;
  2466. ssize_t sz = 0;
  2467. dm_block_t nr_free_blocks_metadata = 0;
  2468. dm_block_t nr_blocks_metadata = 0;
  2469. char buf[BDEVNAME_SIZE];
  2470. struct cache *cache = ti->private;
  2471. dm_cblock_t residency;
  2472. switch (type) {
  2473. case STATUSTYPE_INFO:
  2474. /* Commit to ensure statistics aren't out-of-date */
  2475. if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
  2476. r = dm_cache_commit(cache->cmd, false);
  2477. if (r)
  2478. DMERR("could not commit metadata for accurate status");
  2479. }
  2480. r = dm_cache_get_free_metadata_block_count(cache->cmd,
  2481. &nr_free_blocks_metadata);
  2482. if (r) {
  2483. DMERR("could not get metadata free block count");
  2484. goto err;
  2485. }
  2486. r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
  2487. if (r) {
  2488. DMERR("could not get metadata device size");
  2489. goto err;
  2490. }
  2491. residency = policy_residency(cache->policy);
  2492. DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
  2493. (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
  2494. (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
  2495. (unsigned long long)nr_blocks_metadata,
  2496. cache->sectors_per_block,
  2497. (unsigned long long) from_cblock(residency),
  2498. (unsigned long long) from_cblock(cache->cache_size),
  2499. (unsigned) atomic_read(&cache->stats.read_hit),
  2500. (unsigned) atomic_read(&cache->stats.read_miss),
  2501. (unsigned) atomic_read(&cache->stats.write_hit),
  2502. (unsigned) atomic_read(&cache->stats.write_miss),
  2503. (unsigned) atomic_read(&cache->stats.demotion),
  2504. (unsigned) atomic_read(&cache->stats.promotion),
  2505. (unsigned long) atomic_read(&cache->nr_dirty));
  2506. if (writethrough_mode(&cache->features))
  2507. DMEMIT("1 writethrough ");
  2508. else if (passthrough_mode(&cache->features))
  2509. DMEMIT("1 passthrough ");
  2510. else if (writeback_mode(&cache->features))
  2511. DMEMIT("1 writeback ");
  2512. else {
  2513. DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
  2514. goto err;
  2515. }
  2516. DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
  2517. DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
  2518. if (sz < maxlen) {
  2519. r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
  2520. if (r)
  2521. DMERR("policy_emit_config_values returned %d", r);
  2522. }
  2523. break;
  2524. case STATUSTYPE_TABLE:
  2525. format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
  2526. DMEMIT("%s ", buf);
  2527. format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
  2528. DMEMIT("%s ", buf);
  2529. format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
  2530. DMEMIT("%s", buf);
  2531. for (i = 0; i < cache->nr_ctr_args - 1; i++)
  2532. DMEMIT(" %s", cache->ctr_args[i]);
  2533. if (cache->nr_ctr_args)
  2534. DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
  2535. }
  2536. return;
  2537. err:
  2538. DMEMIT("Error");
  2539. }
  2540. /*
  2541. * A cache block range can take two forms:
  2542. *
  2543. * i) A single cblock, eg. '3456'
  2544. * ii) A begin and end cblock with dots between, eg. 123-234
  2545. */
  2546. static int parse_cblock_range(struct cache *cache, const char *str,
  2547. struct cblock_range *result)
  2548. {
  2549. char dummy;
  2550. uint64_t b, e;
  2551. int r;
  2552. /*
  2553. * Try and parse form (ii) first.
  2554. */
  2555. r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
  2556. if (r < 0)
  2557. return r;
  2558. if (r == 2) {
  2559. result->begin = to_cblock(b);
  2560. result->end = to_cblock(e);
  2561. return 0;
  2562. }
  2563. /*
  2564. * That didn't work, try form (i).
  2565. */
  2566. r = sscanf(str, "%llu%c", &b, &dummy);
  2567. if (r < 0)
  2568. return r;
  2569. if (r == 1) {
  2570. result->begin = to_cblock(b);
  2571. result->end = to_cblock(from_cblock(result->begin) + 1u);
  2572. return 0;
  2573. }
  2574. DMERR("invalid cblock range '%s'", str);
  2575. return -EINVAL;
  2576. }
  2577. static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
  2578. {
  2579. uint64_t b = from_cblock(range->begin);
  2580. uint64_t e = from_cblock(range->end);
  2581. uint64_t n = from_cblock(cache->cache_size);
  2582. if (b >= n) {
  2583. DMERR("begin cblock out of range: %llu >= %llu", b, n);
  2584. return -EINVAL;
  2585. }
  2586. if (e > n) {
  2587. DMERR("end cblock out of range: %llu > %llu", e, n);
  2588. return -EINVAL;
  2589. }
  2590. if (b >= e) {
  2591. DMERR("invalid cblock range: %llu >= %llu", b, e);
  2592. return -EINVAL;
  2593. }
  2594. return 0;
  2595. }
  2596. static int request_invalidation(struct cache *cache, struct cblock_range *range)
  2597. {
  2598. struct invalidation_request req;
  2599. INIT_LIST_HEAD(&req.list);
  2600. req.cblocks = range;
  2601. atomic_set(&req.complete, 0);
  2602. req.err = 0;
  2603. init_waitqueue_head(&req.result_wait);
  2604. spin_lock(&cache->invalidation_lock);
  2605. list_add(&req.list, &cache->invalidation_requests);
  2606. spin_unlock(&cache->invalidation_lock);
  2607. wake_worker(cache);
  2608. wait_event(req.result_wait, atomic_read(&req.complete));
  2609. return req.err;
  2610. }
  2611. static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
  2612. const char **cblock_ranges)
  2613. {
  2614. int r = 0;
  2615. unsigned i;
  2616. struct cblock_range range;
  2617. if (!passthrough_mode(&cache->features)) {
  2618. DMERR("cache has to be in passthrough mode for invalidation");
  2619. return -EPERM;
  2620. }
  2621. for (i = 0; i < count; i++) {
  2622. r = parse_cblock_range(cache, cblock_ranges[i], &range);
  2623. if (r)
  2624. break;
  2625. r = validate_cblock_range(cache, &range);
  2626. if (r)
  2627. break;
  2628. /*
  2629. * Pass begin and end origin blocks to the worker and wake it.
  2630. */
  2631. r = request_invalidation(cache, &range);
  2632. if (r)
  2633. break;
  2634. }
  2635. return r;
  2636. }
  2637. /*
  2638. * Supports
  2639. * "<key> <value>"
  2640. * and
  2641. * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
  2642. *
  2643. * The key migration_threshold is supported by the cache target core.
  2644. */
  2645. static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
  2646. {
  2647. struct cache *cache = ti->private;
  2648. if (!argc)
  2649. return -EINVAL;
  2650. if (!strcasecmp(argv[0], "invalidate_cblocks"))
  2651. return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
  2652. if (argc != 2)
  2653. return -EINVAL;
  2654. return set_config_value(cache, argv[0], argv[1]);
  2655. }
  2656. static int cache_iterate_devices(struct dm_target *ti,
  2657. iterate_devices_callout_fn fn, void *data)
  2658. {
  2659. int r = 0;
  2660. struct cache *cache = ti->private;
  2661. r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
  2662. if (!r)
  2663. r = fn(ti, cache->origin_dev, 0, ti->len, data);
  2664. return r;
  2665. }
  2666. /*
  2667. * We assume I/O is going to the origin (which is the volume
  2668. * more likely to have restrictions e.g. by being striped).
  2669. * (Looking up the exact location of the data would be expensive
  2670. * and could always be out of date by the time the bio is submitted.)
  2671. */
  2672. static int cache_bvec_merge(struct dm_target *ti,
  2673. struct bvec_merge_data *bvm,
  2674. struct bio_vec *biovec, int max_size)
  2675. {
  2676. struct cache *cache = ti->private;
  2677. struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
  2678. if (!q->merge_bvec_fn)
  2679. return max_size;
  2680. bvm->bi_bdev = cache->origin_dev->bdev;
  2681. return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
  2682. }
  2683. static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
  2684. {
  2685. /*
  2686. * FIXME: these limits may be incompatible with the cache device
  2687. */
  2688. limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
  2689. cache->origin_sectors);
  2690. limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
  2691. }
  2692. static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
  2693. {
  2694. struct cache *cache = ti->private;
  2695. uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
  2696. /*
  2697. * If the system-determined stacked limits are compatible with the
  2698. * cache's blocksize (io_opt is a factor) do not override them.
  2699. */
  2700. if (io_opt_sectors < cache->sectors_per_block ||
  2701. do_div(io_opt_sectors, cache->sectors_per_block)) {
  2702. blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
  2703. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  2704. }
  2705. set_discard_limits(cache, limits);
  2706. }
  2707. /*----------------------------------------------------------------*/
  2708. static struct target_type cache_target = {
  2709. .name = "cache",
  2710. .version = {1, 6, 0},
  2711. .module = THIS_MODULE,
  2712. .ctr = cache_ctr,
  2713. .dtr = cache_dtr,
  2714. .map = cache_map,
  2715. .end_io = cache_end_io,
  2716. .postsuspend = cache_postsuspend,
  2717. .preresume = cache_preresume,
  2718. .resume = cache_resume,
  2719. .status = cache_status,
  2720. .message = cache_message,
  2721. .iterate_devices = cache_iterate_devices,
  2722. .merge = cache_bvec_merge,
  2723. .io_hints = cache_io_hints,
  2724. };
  2725. static int __init dm_cache_init(void)
  2726. {
  2727. int r;
  2728. r = dm_register_target(&cache_target);
  2729. if (r) {
  2730. DMERR("cache target registration failed: %d", r);
  2731. return r;
  2732. }
  2733. migration_cache = KMEM_CACHE(dm_cache_migration, 0);
  2734. if (!migration_cache) {
  2735. dm_unregister_target(&cache_target);
  2736. return -ENOMEM;
  2737. }
  2738. return 0;
  2739. }
  2740. static void __exit dm_cache_exit(void)
  2741. {
  2742. dm_unregister_target(&cache_target);
  2743. kmem_cache_destroy(migration_cache);
  2744. }
  2745. module_init(dm_cache_init);
  2746. module_exit(dm_cache_exit);
  2747. MODULE_DESCRIPTION(DM_NAME " cache target");
  2748. MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
  2749. MODULE_LICENSE("GPL");