dm-cache-target.c 77 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include "dm-bio-record.h"
  9. #include "dm-cache-metadata.h"
  10. #include <linux/dm-io.h>
  11. #include <linux/dm-kcopyd.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #define DM_MSG_PREFIX "cache"
  18. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  19. "A percentage of time allocated for copying to and/or from cache");
  20. /*----------------------------------------------------------------*/
  21. /*
  22. * Glossary:
  23. *
  24. * oblock: index of an origin block
  25. * cblock: index of a cache block
  26. * promotion: movement of a block from origin to cache
  27. * demotion: movement of a block from cache to origin
  28. * migration: movement of a block between the origin and cache device,
  29. * either direction
  30. */
  31. /*----------------------------------------------------------------*/
  32. static size_t bitset_size_in_bytes(unsigned nr_entries)
  33. {
  34. return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
  35. }
  36. static unsigned long *alloc_bitset(unsigned nr_entries)
  37. {
  38. size_t s = bitset_size_in_bytes(nr_entries);
  39. return vzalloc(s);
  40. }
  41. static void clear_bitset(void *bitset, unsigned nr_entries)
  42. {
  43. size_t s = bitset_size_in_bytes(nr_entries);
  44. memset(bitset, 0, s);
  45. }
  46. static void free_bitset(unsigned long *bits)
  47. {
  48. vfree(bits);
  49. }
  50. /*----------------------------------------------------------------*/
  51. /*
  52. * There are a couple of places where we let a bio run, but want to do some
  53. * work before calling its endio function. We do this by temporarily
  54. * changing the endio fn.
  55. */
  56. struct dm_hook_info {
  57. bio_end_io_t *bi_end_io;
  58. void *bi_private;
  59. };
  60. static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
  61. bio_end_io_t *bi_end_io, void *bi_private)
  62. {
  63. h->bi_end_io = bio->bi_end_io;
  64. h->bi_private = bio->bi_private;
  65. bio->bi_end_io = bi_end_io;
  66. bio->bi_private = bi_private;
  67. }
  68. static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
  69. {
  70. bio->bi_end_io = h->bi_end_io;
  71. bio->bi_private = h->bi_private;
  72. }
  73. /*----------------------------------------------------------------*/
  74. #define PRISON_CELLS 1024
  75. #define MIGRATION_POOL_SIZE 128
  76. #define COMMIT_PERIOD HZ
  77. #define MIGRATION_COUNT_WINDOW 10
  78. /*
  79. * The block size of the device holding cache data must be
  80. * between 32KB and 1GB.
  81. */
  82. #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
  83. #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  84. /*
  85. * FIXME: the cache is read/write for the time being.
  86. */
  87. enum cache_metadata_mode {
  88. CM_WRITE, /* metadata may be changed */
  89. CM_READ_ONLY, /* metadata may not be changed */
  90. };
  91. enum cache_io_mode {
  92. /*
  93. * Data is written to cached blocks only. These blocks are marked
  94. * dirty. If you lose the cache device you will lose data.
  95. * Potential performance increase for both reads and writes.
  96. */
  97. CM_IO_WRITEBACK,
  98. /*
  99. * Data is written to both cache and origin. Blocks are never
  100. * dirty. Potential performance benfit for reads only.
  101. */
  102. CM_IO_WRITETHROUGH,
  103. /*
  104. * A degraded mode useful for various cache coherency situations
  105. * (eg, rolling back snapshots). Reads and writes always go to the
  106. * origin. If a write goes to a cached oblock, then the cache
  107. * block is invalidated.
  108. */
  109. CM_IO_PASSTHROUGH
  110. };
  111. struct cache_features {
  112. enum cache_metadata_mode mode;
  113. enum cache_io_mode io_mode;
  114. };
  115. struct cache_stats {
  116. atomic_t read_hit;
  117. atomic_t read_miss;
  118. atomic_t write_hit;
  119. atomic_t write_miss;
  120. atomic_t demotion;
  121. atomic_t promotion;
  122. atomic_t copies_avoided;
  123. atomic_t cache_cell_clash;
  124. atomic_t commit_count;
  125. atomic_t discard_count;
  126. };
  127. /*
  128. * Defines a range of cblocks, begin to (end - 1) are in the range. end is
  129. * the one-past-the-end value.
  130. */
  131. struct cblock_range {
  132. dm_cblock_t begin;
  133. dm_cblock_t end;
  134. };
  135. struct invalidation_request {
  136. struct list_head list;
  137. struct cblock_range *cblocks;
  138. atomic_t complete;
  139. int err;
  140. wait_queue_head_t result_wait;
  141. };
  142. struct cache {
  143. struct dm_target *ti;
  144. struct dm_target_callbacks callbacks;
  145. struct dm_cache_metadata *cmd;
  146. /*
  147. * Metadata is written to this device.
  148. */
  149. struct dm_dev *metadata_dev;
  150. /*
  151. * The slower of the two data devices. Typically a spindle.
  152. */
  153. struct dm_dev *origin_dev;
  154. /*
  155. * The faster of the two data devices. Typically an SSD.
  156. */
  157. struct dm_dev *cache_dev;
  158. /*
  159. * Size of the origin device in _complete_ blocks and native sectors.
  160. */
  161. dm_oblock_t origin_blocks;
  162. sector_t origin_sectors;
  163. /*
  164. * Size of the cache device in blocks.
  165. */
  166. dm_cblock_t cache_size;
  167. /*
  168. * Fields for converting from sectors to blocks.
  169. */
  170. uint32_t sectors_per_block;
  171. int sectors_per_block_shift;
  172. spinlock_t lock;
  173. struct bio_list deferred_bios;
  174. struct bio_list deferred_flush_bios;
  175. struct bio_list deferred_writethrough_bios;
  176. struct list_head quiesced_migrations;
  177. struct list_head completed_migrations;
  178. struct list_head need_commit_migrations;
  179. sector_t migration_threshold;
  180. wait_queue_head_t migration_wait;
  181. atomic_t nr_migrations;
  182. wait_queue_head_t quiescing_wait;
  183. atomic_t quiescing;
  184. atomic_t quiescing_ack;
  185. /*
  186. * cache_size entries, dirty if set
  187. */
  188. dm_cblock_t nr_dirty;
  189. unsigned long *dirty_bitset;
  190. /*
  191. * origin_blocks entries, discarded if set.
  192. */
  193. dm_dblock_t discard_nr_blocks;
  194. unsigned long *discard_bitset;
  195. uint32_t discard_block_size; /* a power of 2 times sectors per block */
  196. /*
  197. * Rather than reconstructing the table line for the status we just
  198. * save it and regurgitate.
  199. */
  200. unsigned nr_ctr_args;
  201. const char **ctr_args;
  202. struct dm_kcopyd_client *copier;
  203. struct workqueue_struct *wq;
  204. struct work_struct worker;
  205. struct delayed_work waker;
  206. unsigned long last_commit_jiffies;
  207. struct dm_bio_prison *prison;
  208. struct dm_deferred_set *all_io_ds;
  209. mempool_t *migration_pool;
  210. struct dm_cache_migration *next_migration;
  211. struct dm_cache_policy *policy;
  212. unsigned policy_nr_args;
  213. bool need_tick_bio:1;
  214. bool sized:1;
  215. bool invalidate:1;
  216. bool commit_requested:1;
  217. bool loaded_mappings:1;
  218. bool loaded_discards:1;
  219. /*
  220. * Cache features such as write-through.
  221. */
  222. struct cache_features features;
  223. struct cache_stats stats;
  224. /*
  225. * Invalidation fields.
  226. */
  227. spinlock_t invalidation_lock;
  228. struct list_head invalidation_requests;
  229. };
  230. struct per_bio_data {
  231. bool tick:1;
  232. unsigned req_nr:2;
  233. struct dm_deferred_entry *all_io_entry;
  234. /*
  235. * writethrough fields. These MUST remain at the end of this
  236. * structure and the 'cache' member must be the first as it
  237. * is used to determine the offset of the writethrough fields.
  238. */
  239. struct cache *cache;
  240. dm_cblock_t cblock;
  241. struct dm_hook_info hook_info;
  242. struct dm_bio_details bio_details;
  243. };
  244. struct dm_cache_migration {
  245. struct list_head list;
  246. struct cache *cache;
  247. unsigned long start_jiffies;
  248. dm_oblock_t old_oblock;
  249. dm_oblock_t new_oblock;
  250. dm_cblock_t cblock;
  251. bool err:1;
  252. bool writeback:1;
  253. bool demote:1;
  254. bool promote:1;
  255. bool requeue_holder:1;
  256. bool invalidate:1;
  257. struct dm_bio_prison_cell *old_ocell;
  258. struct dm_bio_prison_cell *new_ocell;
  259. };
  260. /*
  261. * Processing a bio in the worker thread may require these memory
  262. * allocations. We prealloc to avoid deadlocks (the same worker thread
  263. * frees them back to the mempool).
  264. */
  265. struct prealloc {
  266. struct dm_cache_migration *mg;
  267. struct dm_bio_prison_cell *cell1;
  268. struct dm_bio_prison_cell *cell2;
  269. };
  270. static void wake_worker(struct cache *cache)
  271. {
  272. queue_work(cache->wq, &cache->worker);
  273. }
  274. /*----------------------------------------------------------------*/
  275. static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
  276. {
  277. /* FIXME: change to use a local slab. */
  278. return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
  279. }
  280. static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
  281. {
  282. dm_bio_prison_free_cell(cache->prison, cell);
  283. }
  284. static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
  285. {
  286. if (!p->mg) {
  287. p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  288. if (!p->mg)
  289. return -ENOMEM;
  290. }
  291. if (!p->cell1) {
  292. p->cell1 = alloc_prison_cell(cache);
  293. if (!p->cell1)
  294. return -ENOMEM;
  295. }
  296. if (!p->cell2) {
  297. p->cell2 = alloc_prison_cell(cache);
  298. if (!p->cell2)
  299. return -ENOMEM;
  300. }
  301. return 0;
  302. }
  303. static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
  304. {
  305. if (p->cell2)
  306. free_prison_cell(cache, p->cell2);
  307. if (p->cell1)
  308. free_prison_cell(cache, p->cell1);
  309. if (p->mg)
  310. mempool_free(p->mg, cache->migration_pool);
  311. }
  312. static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
  313. {
  314. struct dm_cache_migration *mg = p->mg;
  315. BUG_ON(!mg);
  316. p->mg = NULL;
  317. return mg;
  318. }
  319. /*
  320. * You must have a cell within the prealloc struct to return. If not this
  321. * function will BUG() rather than returning NULL.
  322. */
  323. static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
  324. {
  325. struct dm_bio_prison_cell *r = NULL;
  326. if (p->cell1) {
  327. r = p->cell1;
  328. p->cell1 = NULL;
  329. } else if (p->cell2) {
  330. r = p->cell2;
  331. p->cell2 = NULL;
  332. } else
  333. BUG();
  334. return r;
  335. }
  336. /*
  337. * You can't have more than two cells in a prealloc struct. BUG() will be
  338. * called if you try and overfill.
  339. */
  340. static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
  341. {
  342. if (!p->cell2)
  343. p->cell2 = cell;
  344. else if (!p->cell1)
  345. p->cell1 = cell;
  346. else
  347. BUG();
  348. }
  349. /*----------------------------------------------------------------*/
  350. static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
  351. {
  352. key->virtual = 0;
  353. key->dev = 0;
  354. key->block = from_oblock(oblock);
  355. }
  356. /*
  357. * The caller hands in a preallocated cell, and a free function for it.
  358. * The cell will be freed if there's an error, or if it wasn't used because
  359. * a cell with that key already exists.
  360. */
  361. typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
  362. static int bio_detain(struct cache *cache, dm_oblock_t oblock,
  363. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  364. cell_free_fn free_fn, void *free_context,
  365. struct dm_bio_prison_cell **cell_result)
  366. {
  367. int r;
  368. struct dm_cell_key key;
  369. build_key(oblock, &key);
  370. r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
  371. if (r)
  372. free_fn(free_context, cell_prealloc);
  373. return r;
  374. }
  375. static int get_cell(struct cache *cache,
  376. dm_oblock_t oblock,
  377. struct prealloc *structs,
  378. struct dm_bio_prison_cell **cell_result)
  379. {
  380. int r;
  381. struct dm_cell_key key;
  382. struct dm_bio_prison_cell *cell_prealloc;
  383. cell_prealloc = prealloc_get_cell(structs);
  384. build_key(oblock, &key);
  385. r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
  386. if (r)
  387. prealloc_put_cell(structs, cell_prealloc);
  388. return r;
  389. }
  390. /*----------------------------------------------------------------*/
  391. static bool is_dirty(struct cache *cache, dm_cblock_t b)
  392. {
  393. return test_bit(from_cblock(b), cache->dirty_bitset);
  394. }
  395. static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  396. {
  397. if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
  398. cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
  399. policy_set_dirty(cache->policy, oblock);
  400. }
  401. }
  402. static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  403. {
  404. if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
  405. policy_clear_dirty(cache->policy, oblock);
  406. cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
  407. if (!from_cblock(cache->nr_dirty))
  408. dm_table_event(cache->ti->table);
  409. }
  410. }
  411. /*----------------------------------------------------------------*/
  412. static bool block_size_is_power_of_two(struct cache *cache)
  413. {
  414. return cache->sectors_per_block_shift >= 0;
  415. }
  416. /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
  417. #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
  418. __always_inline
  419. #endif
  420. static dm_block_t block_div(dm_block_t b, uint32_t n)
  421. {
  422. do_div(b, n);
  423. return b;
  424. }
  425. static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
  426. {
  427. uint32_t discard_blocks = cache->discard_block_size;
  428. dm_block_t b = from_oblock(oblock);
  429. if (!block_size_is_power_of_two(cache))
  430. discard_blocks = discard_blocks / cache->sectors_per_block;
  431. else
  432. discard_blocks >>= cache->sectors_per_block_shift;
  433. b = block_div(b, discard_blocks);
  434. return to_dblock(b);
  435. }
  436. static void set_discard(struct cache *cache, dm_dblock_t b)
  437. {
  438. unsigned long flags;
  439. atomic_inc(&cache->stats.discard_count);
  440. spin_lock_irqsave(&cache->lock, flags);
  441. set_bit(from_dblock(b), cache->discard_bitset);
  442. spin_unlock_irqrestore(&cache->lock, flags);
  443. }
  444. static void clear_discard(struct cache *cache, dm_dblock_t b)
  445. {
  446. unsigned long flags;
  447. spin_lock_irqsave(&cache->lock, flags);
  448. clear_bit(from_dblock(b), cache->discard_bitset);
  449. spin_unlock_irqrestore(&cache->lock, flags);
  450. }
  451. static bool is_discarded(struct cache *cache, dm_dblock_t b)
  452. {
  453. int r;
  454. unsigned long flags;
  455. spin_lock_irqsave(&cache->lock, flags);
  456. r = test_bit(from_dblock(b), cache->discard_bitset);
  457. spin_unlock_irqrestore(&cache->lock, flags);
  458. return r;
  459. }
  460. static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  461. {
  462. int r;
  463. unsigned long flags;
  464. spin_lock_irqsave(&cache->lock, flags);
  465. r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
  466. cache->discard_bitset);
  467. spin_unlock_irqrestore(&cache->lock, flags);
  468. return r;
  469. }
  470. /*----------------------------------------------------------------*/
  471. static void load_stats(struct cache *cache)
  472. {
  473. struct dm_cache_statistics stats;
  474. dm_cache_metadata_get_stats(cache->cmd, &stats);
  475. atomic_set(&cache->stats.read_hit, stats.read_hits);
  476. atomic_set(&cache->stats.read_miss, stats.read_misses);
  477. atomic_set(&cache->stats.write_hit, stats.write_hits);
  478. atomic_set(&cache->stats.write_miss, stats.write_misses);
  479. }
  480. static void save_stats(struct cache *cache)
  481. {
  482. struct dm_cache_statistics stats;
  483. stats.read_hits = atomic_read(&cache->stats.read_hit);
  484. stats.read_misses = atomic_read(&cache->stats.read_miss);
  485. stats.write_hits = atomic_read(&cache->stats.write_hit);
  486. stats.write_misses = atomic_read(&cache->stats.write_miss);
  487. dm_cache_metadata_set_stats(cache->cmd, &stats);
  488. }
  489. /*----------------------------------------------------------------
  490. * Per bio data
  491. *--------------------------------------------------------------*/
  492. /*
  493. * If using writeback, leave out struct per_bio_data's writethrough fields.
  494. */
  495. #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
  496. #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
  497. static bool writethrough_mode(struct cache_features *f)
  498. {
  499. return f->io_mode == CM_IO_WRITETHROUGH;
  500. }
  501. static bool writeback_mode(struct cache_features *f)
  502. {
  503. return f->io_mode == CM_IO_WRITEBACK;
  504. }
  505. static bool passthrough_mode(struct cache_features *f)
  506. {
  507. return f->io_mode == CM_IO_PASSTHROUGH;
  508. }
  509. static size_t get_per_bio_data_size(struct cache *cache)
  510. {
  511. return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
  512. }
  513. static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
  514. {
  515. struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
  516. BUG_ON(!pb);
  517. return pb;
  518. }
  519. static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
  520. {
  521. struct per_bio_data *pb = get_per_bio_data(bio, data_size);
  522. pb->tick = false;
  523. pb->req_nr = dm_bio_get_target_bio_nr(bio);
  524. pb->all_io_entry = NULL;
  525. return pb;
  526. }
  527. /*----------------------------------------------------------------
  528. * Remapping
  529. *--------------------------------------------------------------*/
  530. static void remap_to_origin(struct cache *cache, struct bio *bio)
  531. {
  532. bio->bi_bdev = cache->origin_dev->bdev;
  533. }
  534. static void remap_to_cache(struct cache *cache, struct bio *bio,
  535. dm_cblock_t cblock)
  536. {
  537. sector_t bi_sector = bio->bi_iter.bi_sector;
  538. bio->bi_bdev = cache->cache_dev->bdev;
  539. if (!block_size_is_power_of_two(cache))
  540. bio->bi_iter.bi_sector =
  541. (from_cblock(cblock) * cache->sectors_per_block) +
  542. sector_div(bi_sector, cache->sectors_per_block);
  543. else
  544. bio->bi_iter.bi_sector =
  545. (from_cblock(cblock) << cache->sectors_per_block_shift) |
  546. (bi_sector & (cache->sectors_per_block - 1));
  547. }
  548. static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
  549. {
  550. unsigned long flags;
  551. size_t pb_data_size = get_per_bio_data_size(cache);
  552. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  553. spin_lock_irqsave(&cache->lock, flags);
  554. if (cache->need_tick_bio &&
  555. !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
  556. pb->tick = true;
  557. cache->need_tick_bio = false;
  558. }
  559. spin_unlock_irqrestore(&cache->lock, flags);
  560. }
  561. static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
  562. dm_oblock_t oblock)
  563. {
  564. check_if_tick_bio_needed(cache, bio);
  565. remap_to_origin(cache, bio);
  566. if (bio_data_dir(bio) == WRITE)
  567. clear_discard(cache, oblock_to_dblock(cache, oblock));
  568. }
  569. static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
  570. dm_oblock_t oblock, dm_cblock_t cblock)
  571. {
  572. check_if_tick_bio_needed(cache, bio);
  573. remap_to_cache(cache, bio, cblock);
  574. if (bio_data_dir(bio) == WRITE) {
  575. set_dirty(cache, oblock, cblock);
  576. clear_discard(cache, oblock_to_dblock(cache, oblock));
  577. }
  578. }
  579. static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
  580. {
  581. sector_t block_nr = bio->bi_iter.bi_sector;
  582. if (!block_size_is_power_of_two(cache))
  583. (void) sector_div(block_nr, cache->sectors_per_block);
  584. else
  585. block_nr >>= cache->sectors_per_block_shift;
  586. return to_oblock(block_nr);
  587. }
  588. static int bio_triggers_commit(struct cache *cache, struct bio *bio)
  589. {
  590. return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
  591. }
  592. static void issue(struct cache *cache, struct bio *bio)
  593. {
  594. unsigned long flags;
  595. if (!bio_triggers_commit(cache, bio)) {
  596. generic_make_request(bio);
  597. return;
  598. }
  599. /*
  600. * Batch together any bios that trigger commits and then issue a
  601. * single commit for them in do_worker().
  602. */
  603. spin_lock_irqsave(&cache->lock, flags);
  604. cache->commit_requested = true;
  605. bio_list_add(&cache->deferred_flush_bios, bio);
  606. spin_unlock_irqrestore(&cache->lock, flags);
  607. }
  608. static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
  609. {
  610. unsigned long flags;
  611. spin_lock_irqsave(&cache->lock, flags);
  612. bio_list_add(&cache->deferred_writethrough_bios, bio);
  613. spin_unlock_irqrestore(&cache->lock, flags);
  614. wake_worker(cache);
  615. }
  616. static void writethrough_endio(struct bio *bio, int err)
  617. {
  618. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  619. dm_unhook_bio(&pb->hook_info, bio);
  620. /*
  621. * Must bump bi_remaining to allow bio to complete with
  622. * restored bi_end_io.
  623. */
  624. atomic_inc(&bio->bi_remaining);
  625. if (err) {
  626. bio_endio(bio, err);
  627. return;
  628. }
  629. dm_bio_restore(&pb->bio_details, bio);
  630. remap_to_cache(pb->cache, bio, pb->cblock);
  631. /*
  632. * We can't issue this bio directly, since we're in interrupt
  633. * context. So it gets put on a bio list for processing by the
  634. * worker thread.
  635. */
  636. defer_writethrough_bio(pb->cache, bio);
  637. }
  638. /*
  639. * When running in writethrough mode we need to send writes to clean blocks
  640. * to both the cache and origin devices. In future we'd like to clone the
  641. * bio and send them in parallel, but for now we're doing them in
  642. * series as this is easier.
  643. */
  644. static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  645. dm_oblock_t oblock, dm_cblock_t cblock)
  646. {
  647. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  648. pb->cache = cache;
  649. pb->cblock = cblock;
  650. dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
  651. dm_bio_record(&pb->bio_details, bio);
  652. remap_to_origin_clear_discard(pb->cache, bio, oblock);
  653. }
  654. /*----------------------------------------------------------------
  655. * Migration processing
  656. *
  657. * Migration covers moving data from the origin device to the cache, or
  658. * vice versa.
  659. *--------------------------------------------------------------*/
  660. static void free_migration(struct dm_cache_migration *mg)
  661. {
  662. mempool_free(mg, mg->cache->migration_pool);
  663. }
  664. static void inc_nr_migrations(struct cache *cache)
  665. {
  666. atomic_inc(&cache->nr_migrations);
  667. }
  668. static void dec_nr_migrations(struct cache *cache)
  669. {
  670. atomic_dec(&cache->nr_migrations);
  671. /*
  672. * Wake the worker in case we're suspending the target.
  673. */
  674. wake_up(&cache->migration_wait);
  675. }
  676. static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  677. bool holder)
  678. {
  679. (holder ? dm_cell_release : dm_cell_release_no_holder)
  680. (cache->prison, cell, &cache->deferred_bios);
  681. free_prison_cell(cache, cell);
  682. }
  683. static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  684. bool holder)
  685. {
  686. unsigned long flags;
  687. spin_lock_irqsave(&cache->lock, flags);
  688. __cell_defer(cache, cell, holder);
  689. spin_unlock_irqrestore(&cache->lock, flags);
  690. wake_worker(cache);
  691. }
  692. static void cleanup_migration(struct dm_cache_migration *mg)
  693. {
  694. struct cache *cache = mg->cache;
  695. free_migration(mg);
  696. dec_nr_migrations(cache);
  697. }
  698. static void migration_failure(struct dm_cache_migration *mg)
  699. {
  700. struct cache *cache = mg->cache;
  701. if (mg->writeback) {
  702. DMWARN_LIMIT("writeback failed; couldn't copy block");
  703. set_dirty(cache, mg->old_oblock, mg->cblock);
  704. cell_defer(cache, mg->old_ocell, false);
  705. } else if (mg->demote) {
  706. DMWARN_LIMIT("demotion failed; couldn't copy block");
  707. policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
  708. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  709. if (mg->promote)
  710. cell_defer(cache, mg->new_ocell, true);
  711. } else {
  712. DMWARN_LIMIT("promotion failed; couldn't copy block");
  713. policy_remove_mapping(cache->policy, mg->new_oblock);
  714. cell_defer(cache, mg->new_ocell, true);
  715. }
  716. cleanup_migration(mg);
  717. }
  718. static void migration_success_pre_commit(struct dm_cache_migration *mg)
  719. {
  720. unsigned long flags;
  721. struct cache *cache = mg->cache;
  722. if (mg->writeback) {
  723. cell_defer(cache, mg->old_ocell, false);
  724. clear_dirty(cache, mg->old_oblock, mg->cblock);
  725. cleanup_migration(mg);
  726. return;
  727. } else if (mg->demote) {
  728. if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
  729. DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
  730. policy_force_mapping(cache->policy, mg->new_oblock,
  731. mg->old_oblock);
  732. if (mg->promote)
  733. cell_defer(cache, mg->new_ocell, true);
  734. cleanup_migration(mg);
  735. return;
  736. }
  737. } else {
  738. if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
  739. DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
  740. policy_remove_mapping(cache->policy, mg->new_oblock);
  741. cleanup_migration(mg);
  742. return;
  743. }
  744. }
  745. spin_lock_irqsave(&cache->lock, flags);
  746. list_add_tail(&mg->list, &cache->need_commit_migrations);
  747. cache->commit_requested = true;
  748. spin_unlock_irqrestore(&cache->lock, flags);
  749. }
  750. static void migration_success_post_commit(struct dm_cache_migration *mg)
  751. {
  752. unsigned long flags;
  753. struct cache *cache = mg->cache;
  754. if (mg->writeback) {
  755. DMWARN("writeback unexpectedly triggered commit");
  756. return;
  757. } else if (mg->demote) {
  758. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  759. if (mg->promote) {
  760. mg->demote = false;
  761. spin_lock_irqsave(&cache->lock, flags);
  762. list_add_tail(&mg->list, &cache->quiesced_migrations);
  763. spin_unlock_irqrestore(&cache->lock, flags);
  764. } else {
  765. if (mg->invalidate)
  766. policy_remove_mapping(cache->policy, mg->old_oblock);
  767. cleanup_migration(mg);
  768. }
  769. } else {
  770. if (mg->requeue_holder)
  771. cell_defer(cache, mg->new_ocell, true);
  772. else {
  773. bio_endio(mg->new_ocell->holder, 0);
  774. cell_defer(cache, mg->new_ocell, false);
  775. }
  776. clear_dirty(cache, mg->new_oblock, mg->cblock);
  777. cleanup_migration(mg);
  778. }
  779. }
  780. static void copy_complete(int read_err, unsigned long write_err, void *context)
  781. {
  782. unsigned long flags;
  783. struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
  784. struct cache *cache = mg->cache;
  785. if (read_err || write_err)
  786. mg->err = true;
  787. spin_lock_irqsave(&cache->lock, flags);
  788. list_add_tail(&mg->list, &cache->completed_migrations);
  789. spin_unlock_irqrestore(&cache->lock, flags);
  790. wake_worker(cache);
  791. }
  792. static void issue_copy_real(struct dm_cache_migration *mg)
  793. {
  794. int r;
  795. struct dm_io_region o_region, c_region;
  796. struct cache *cache = mg->cache;
  797. o_region.bdev = cache->origin_dev->bdev;
  798. o_region.count = cache->sectors_per_block;
  799. c_region.bdev = cache->cache_dev->bdev;
  800. c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
  801. c_region.count = cache->sectors_per_block;
  802. if (mg->writeback || mg->demote) {
  803. /* demote */
  804. o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
  805. r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
  806. } else {
  807. /* promote */
  808. o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
  809. r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
  810. }
  811. if (r < 0) {
  812. DMERR_LIMIT("issuing migration failed");
  813. migration_failure(mg);
  814. }
  815. }
  816. static void overwrite_endio(struct bio *bio, int err)
  817. {
  818. struct dm_cache_migration *mg = bio->bi_private;
  819. struct cache *cache = mg->cache;
  820. size_t pb_data_size = get_per_bio_data_size(cache);
  821. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  822. unsigned long flags;
  823. if (err)
  824. mg->err = true;
  825. spin_lock_irqsave(&cache->lock, flags);
  826. list_add_tail(&mg->list, &cache->completed_migrations);
  827. dm_unhook_bio(&pb->hook_info, bio);
  828. mg->requeue_holder = false;
  829. spin_unlock_irqrestore(&cache->lock, flags);
  830. wake_worker(cache);
  831. }
  832. static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
  833. {
  834. size_t pb_data_size = get_per_bio_data_size(mg->cache);
  835. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  836. dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
  837. remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
  838. generic_make_request(bio);
  839. }
  840. static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
  841. {
  842. return (bio_data_dir(bio) == WRITE) &&
  843. (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
  844. }
  845. static void avoid_copy(struct dm_cache_migration *mg)
  846. {
  847. atomic_inc(&mg->cache->stats.copies_avoided);
  848. migration_success_pre_commit(mg);
  849. }
  850. static void issue_copy(struct dm_cache_migration *mg)
  851. {
  852. bool avoid;
  853. struct cache *cache = mg->cache;
  854. if (mg->writeback || mg->demote)
  855. avoid = !is_dirty(cache, mg->cblock) ||
  856. is_discarded_oblock(cache, mg->old_oblock);
  857. else {
  858. struct bio *bio = mg->new_ocell->holder;
  859. avoid = is_discarded_oblock(cache, mg->new_oblock);
  860. if (!avoid && bio_writes_complete_block(cache, bio)) {
  861. issue_overwrite(mg, bio);
  862. return;
  863. }
  864. }
  865. avoid ? avoid_copy(mg) : issue_copy_real(mg);
  866. }
  867. static void complete_migration(struct dm_cache_migration *mg)
  868. {
  869. if (mg->err)
  870. migration_failure(mg);
  871. else
  872. migration_success_pre_commit(mg);
  873. }
  874. static void process_migrations(struct cache *cache, struct list_head *head,
  875. void (*fn)(struct dm_cache_migration *))
  876. {
  877. unsigned long flags;
  878. struct list_head list;
  879. struct dm_cache_migration *mg, *tmp;
  880. INIT_LIST_HEAD(&list);
  881. spin_lock_irqsave(&cache->lock, flags);
  882. list_splice_init(head, &list);
  883. spin_unlock_irqrestore(&cache->lock, flags);
  884. list_for_each_entry_safe(mg, tmp, &list, list)
  885. fn(mg);
  886. }
  887. static void __queue_quiesced_migration(struct dm_cache_migration *mg)
  888. {
  889. list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
  890. }
  891. static void queue_quiesced_migration(struct dm_cache_migration *mg)
  892. {
  893. unsigned long flags;
  894. struct cache *cache = mg->cache;
  895. spin_lock_irqsave(&cache->lock, flags);
  896. __queue_quiesced_migration(mg);
  897. spin_unlock_irqrestore(&cache->lock, flags);
  898. wake_worker(cache);
  899. }
  900. static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
  901. {
  902. unsigned long flags;
  903. struct dm_cache_migration *mg, *tmp;
  904. spin_lock_irqsave(&cache->lock, flags);
  905. list_for_each_entry_safe(mg, tmp, work, list)
  906. __queue_quiesced_migration(mg);
  907. spin_unlock_irqrestore(&cache->lock, flags);
  908. wake_worker(cache);
  909. }
  910. static void check_for_quiesced_migrations(struct cache *cache,
  911. struct per_bio_data *pb)
  912. {
  913. struct list_head work;
  914. if (!pb->all_io_entry)
  915. return;
  916. INIT_LIST_HEAD(&work);
  917. if (pb->all_io_entry)
  918. dm_deferred_entry_dec(pb->all_io_entry, &work);
  919. if (!list_empty(&work))
  920. queue_quiesced_migrations(cache, &work);
  921. }
  922. static void quiesce_migration(struct dm_cache_migration *mg)
  923. {
  924. if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
  925. queue_quiesced_migration(mg);
  926. }
  927. static void promote(struct cache *cache, struct prealloc *structs,
  928. dm_oblock_t oblock, dm_cblock_t cblock,
  929. struct dm_bio_prison_cell *cell)
  930. {
  931. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  932. mg->err = false;
  933. mg->writeback = false;
  934. mg->demote = false;
  935. mg->promote = true;
  936. mg->requeue_holder = true;
  937. mg->invalidate = false;
  938. mg->cache = cache;
  939. mg->new_oblock = oblock;
  940. mg->cblock = cblock;
  941. mg->old_ocell = NULL;
  942. mg->new_ocell = cell;
  943. mg->start_jiffies = jiffies;
  944. inc_nr_migrations(cache);
  945. quiesce_migration(mg);
  946. }
  947. static void writeback(struct cache *cache, struct prealloc *structs,
  948. dm_oblock_t oblock, dm_cblock_t cblock,
  949. struct dm_bio_prison_cell *cell)
  950. {
  951. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  952. mg->err = false;
  953. mg->writeback = true;
  954. mg->demote = false;
  955. mg->promote = false;
  956. mg->requeue_holder = true;
  957. mg->invalidate = false;
  958. mg->cache = cache;
  959. mg->old_oblock = oblock;
  960. mg->cblock = cblock;
  961. mg->old_ocell = cell;
  962. mg->new_ocell = NULL;
  963. mg->start_jiffies = jiffies;
  964. inc_nr_migrations(cache);
  965. quiesce_migration(mg);
  966. }
  967. static void demote_then_promote(struct cache *cache, struct prealloc *structs,
  968. dm_oblock_t old_oblock, dm_oblock_t new_oblock,
  969. dm_cblock_t cblock,
  970. struct dm_bio_prison_cell *old_ocell,
  971. struct dm_bio_prison_cell *new_ocell)
  972. {
  973. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  974. mg->err = false;
  975. mg->writeback = false;
  976. mg->demote = true;
  977. mg->promote = true;
  978. mg->requeue_holder = true;
  979. mg->invalidate = false;
  980. mg->cache = cache;
  981. mg->old_oblock = old_oblock;
  982. mg->new_oblock = new_oblock;
  983. mg->cblock = cblock;
  984. mg->old_ocell = old_ocell;
  985. mg->new_ocell = new_ocell;
  986. mg->start_jiffies = jiffies;
  987. inc_nr_migrations(cache);
  988. quiesce_migration(mg);
  989. }
  990. /*
  991. * Invalidate a cache entry. No writeback occurs; any changes in the cache
  992. * block are thrown away.
  993. */
  994. static void invalidate(struct cache *cache, struct prealloc *structs,
  995. dm_oblock_t oblock, dm_cblock_t cblock,
  996. struct dm_bio_prison_cell *cell)
  997. {
  998. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  999. mg->err = false;
  1000. mg->writeback = false;
  1001. mg->demote = true;
  1002. mg->promote = false;
  1003. mg->requeue_holder = true;
  1004. mg->invalidate = true;
  1005. mg->cache = cache;
  1006. mg->old_oblock = oblock;
  1007. mg->cblock = cblock;
  1008. mg->old_ocell = cell;
  1009. mg->new_ocell = NULL;
  1010. mg->start_jiffies = jiffies;
  1011. inc_nr_migrations(cache);
  1012. quiesce_migration(mg);
  1013. }
  1014. /*----------------------------------------------------------------
  1015. * bio processing
  1016. *--------------------------------------------------------------*/
  1017. static void defer_bio(struct cache *cache, struct bio *bio)
  1018. {
  1019. unsigned long flags;
  1020. spin_lock_irqsave(&cache->lock, flags);
  1021. bio_list_add(&cache->deferred_bios, bio);
  1022. spin_unlock_irqrestore(&cache->lock, flags);
  1023. wake_worker(cache);
  1024. }
  1025. static void process_flush_bio(struct cache *cache, struct bio *bio)
  1026. {
  1027. size_t pb_data_size = get_per_bio_data_size(cache);
  1028. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1029. BUG_ON(bio->bi_iter.bi_size);
  1030. if (!pb->req_nr)
  1031. remap_to_origin(cache, bio);
  1032. else
  1033. remap_to_cache(cache, bio, 0);
  1034. issue(cache, bio);
  1035. }
  1036. /*
  1037. * People generally discard large parts of a device, eg, the whole device
  1038. * when formatting. Splitting these large discards up into cache block
  1039. * sized ios and then quiescing (always neccessary for discard) takes too
  1040. * long.
  1041. *
  1042. * We keep it simple, and allow any size of discard to come in, and just
  1043. * mark off blocks on the discard bitset. No passdown occurs!
  1044. *
  1045. * To implement passdown we need to change the bio_prison such that a cell
  1046. * can have a key that spans many blocks.
  1047. */
  1048. static void process_discard_bio(struct cache *cache, struct bio *bio)
  1049. {
  1050. dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
  1051. cache->discard_block_size);
  1052. dm_block_t end_block = bio_end_sector(bio);
  1053. dm_block_t b;
  1054. end_block = block_div(end_block, cache->discard_block_size);
  1055. for (b = start_block; b < end_block; b++)
  1056. set_discard(cache, to_dblock(b));
  1057. bio_endio(bio, 0);
  1058. }
  1059. static bool spare_migration_bandwidth(struct cache *cache)
  1060. {
  1061. sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
  1062. cache->sectors_per_block;
  1063. return current_volume < cache->migration_threshold;
  1064. }
  1065. static void inc_hit_counter(struct cache *cache, struct bio *bio)
  1066. {
  1067. atomic_inc(bio_data_dir(bio) == READ ?
  1068. &cache->stats.read_hit : &cache->stats.write_hit);
  1069. }
  1070. static void inc_miss_counter(struct cache *cache, struct bio *bio)
  1071. {
  1072. atomic_inc(bio_data_dir(bio) == READ ?
  1073. &cache->stats.read_miss : &cache->stats.write_miss);
  1074. }
  1075. static void issue_cache_bio(struct cache *cache, struct bio *bio,
  1076. struct per_bio_data *pb,
  1077. dm_oblock_t oblock, dm_cblock_t cblock)
  1078. {
  1079. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1080. remap_to_cache_dirty(cache, bio, oblock, cblock);
  1081. issue(cache, bio);
  1082. }
  1083. static void process_bio(struct cache *cache, struct prealloc *structs,
  1084. struct bio *bio)
  1085. {
  1086. int r;
  1087. bool release_cell = true;
  1088. dm_oblock_t block = get_bio_block(cache, bio);
  1089. struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
  1090. struct policy_result lookup_result;
  1091. size_t pb_data_size = get_per_bio_data_size(cache);
  1092. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1093. bool discarded_block = is_discarded_oblock(cache, block);
  1094. bool passthrough = passthrough_mode(&cache->features);
  1095. bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
  1096. /*
  1097. * Check to see if that block is currently migrating.
  1098. */
  1099. cell_prealloc = prealloc_get_cell(structs);
  1100. r = bio_detain(cache, block, bio, cell_prealloc,
  1101. (cell_free_fn) prealloc_put_cell,
  1102. structs, &new_ocell);
  1103. if (r > 0)
  1104. return;
  1105. r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
  1106. bio, &lookup_result);
  1107. if (r == -EWOULDBLOCK)
  1108. /* migration has been denied */
  1109. lookup_result.op = POLICY_MISS;
  1110. switch (lookup_result.op) {
  1111. case POLICY_HIT:
  1112. if (passthrough) {
  1113. inc_miss_counter(cache, bio);
  1114. /*
  1115. * Passthrough always maps to the origin,
  1116. * invalidating any cache blocks that are written
  1117. * to.
  1118. */
  1119. if (bio_data_dir(bio) == WRITE) {
  1120. atomic_inc(&cache->stats.demotion);
  1121. invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
  1122. release_cell = false;
  1123. } else {
  1124. /* FIXME: factor out issue_origin() */
  1125. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1126. remap_to_origin_clear_discard(cache, bio, block);
  1127. issue(cache, bio);
  1128. }
  1129. } else {
  1130. inc_hit_counter(cache, bio);
  1131. if (bio_data_dir(bio) == WRITE &&
  1132. writethrough_mode(&cache->features) &&
  1133. !is_dirty(cache, lookup_result.cblock)) {
  1134. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1135. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  1136. issue(cache, bio);
  1137. } else
  1138. issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
  1139. }
  1140. break;
  1141. case POLICY_MISS:
  1142. inc_miss_counter(cache, bio);
  1143. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1144. remap_to_origin_clear_discard(cache, bio, block);
  1145. issue(cache, bio);
  1146. break;
  1147. case POLICY_NEW:
  1148. atomic_inc(&cache->stats.promotion);
  1149. promote(cache, structs, block, lookup_result.cblock, new_ocell);
  1150. release_cell = false;
  1151. break;
  1152. case POLICY_REPLACE:
  1153. cell_prealloc = prealloc_get_cell(structs);
  1154. r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
  1155. (cell_free_fn) prealloc_put_cell,
  1156. structs, &old_ocell);
  1157. if (r > 0) {
  1158. /*
  1159. * We have to be careful to avoid lock inversion of
  1160. * the cells. So we back off, and wait for the
  1161. * old_ocell to become free.
  1162. */
  1163. policy_force_mapping(cache->policy, block,
  1164. lookup_result.old_oblock);
  1165. atomic_inc(&cache->stats.cache_cell_clash);
  1166. break;
  1167. }
  1168. atomic_inc(&cache->stats.demotion);
  1169. atomic_inc(&cache->stats.promotion);
  1170. demote_then_promote(cache, structs, lookup_result.old_oblock,
  1171. block, lookup_result.cblock,
  1172. old_ocell, new_ocell);
  1173. release_cell = false;
  1174. break;
  1175. default:
  1176. DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
  1177. (unsigned) lookup_result.op);
  1178. bio_io_error(bio);
  1179. }
  1180. if (release_cell)
  1181. cell_defer(cache, new_ocell, false);
  1182. }
  1183. static int need_commit_due_to_time(struct cache *cache)
  1184. {
  1185. return jiffies < cache->last_commit_jiffies ||
  1186. jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
  1187. }
  1188. static int commit_if_needed(struct cache *cache)
  1189. {
  1190. int r = 0;
  1191. if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
  1192. dm_cache_changed_this_transaction(cache->cmd)) {
  1193. atomic_inc(&cache->stats.commit_count);
  1194. cache->commit_requested = false;
  1195. r = dm_cache_commit(cache->cmd, false);
  1196. cache->last_commit_jiffies = jiffies;
  1197. }
  1198. return r;
  1199. }
  1200. static void process_deferred_bios(struct cache *cache)
  1201. {
  1202. unsigned long flags;
  1203. struct bio_list bios;
  1204. struct bio *bio;
  1205. struct prealloc structs;
  1206. memset(&structs, 0, sizeof(structs));
  1207. bio_list_init(&bios);
  1208. spin_lock_irqsave(&cache->lock, flags);
  1209. bio_list_merge(&bios, &cache->deferred_bios);
  1210. bio_list_init(&cache->deferred_bios);
  1211. spin_unlock_irqrestore(&cache->lock, flags);
  1212. while (!bio_list_empty(&bios)) {
  1213. /*
  1214. * If we've got no free migration structs, and processing
  1215. * this bio might require one, we pause until there are some
  1216. * prepared mappings to process.
  1217. */
  1218. if (prealloc_data_structs(cache, &structs)) {
  1219. spin_lock_irqsave(&cache->lock, flags);
  1220. bio_list_merge(&cache->deferred_bios, &bios);
  1221. spin_unlock_irqrestore(&cache->lock, flags);
  1222. break;
  1223. }
  1224. bio = bio_list_pop(&bios);
  1225. if (bio->bi_rw & REQ_FLUSH)
  1226. process_flush_bio(cache, bio);
  1227. else if (bio->bi_rw & REQ_DISCARD)
  1228. process_discard_bio(cache, bio);
  1229. else
  1230. process_bio(cache, &structs, bio);
  1231. }
  1232. prealloc_free_structs(cache, &structs);
  1233. }
  1234. static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
  1235. {
  1236. unsigned long flags;
  1237. struct bio_list bios;
  1238. struct bio *bio;
  1239. bio_list_init(&bios);
  1240. spin_lock_irqsave(&cache->lock, flags);
  1241. bio_list_merge(&bios, &cache->deferred_flush_bios);
  1242. bio_list_init(&cache->deferred_flush_bios);
  1243. spin_unlock_irqrestore(&cache->lock, flags);
  1244. while ((bio = bio_list_pop(&bios)))
  1245. submit_bios ? generic_make_request(bio) : bio_io_error(bio);
  1246. }
  1247. static void process_deferred_writethrough_bios(struct cache *cache)
  1248. {
  1249. unsigned long flags;
  1250. struct bio_list bios;
  1251. struct bio *bio;
  1252. bio_list_init(&bios);
  1253. spin_lock_irqsave(&cache->lock, flags);
  1254. bio_list_merge(&bios, &cache->deferred_writethrough_bios);
  1255. bio_list_init(&cache->deferred_writethrough_bios);
  1256. spin_unlock_irqrestore(&cache->lock, flags);
  1257. while ((bio = bio_list_pop(&bios)))
  1258. generic_make_request(bio);
  1259. }
  1260. static void writeback_some_dirty_blocks(struct cache *cache)
  1261. {
  1262. int r = 0;
  1263. dm_oblock_t oblock;
  1264. dm_cblock_t cblock;
  1265. struct prealloc structs;
  1266. struct dm_bio_prison_cell *old_ocell;
  1267. memset(&structs, 0, sizeof(structs));
  1268. while (spare_migration_bandwidth(cache)) {
  1269. if (prealloc_data_structs(cache, &structs))
  1270. break;
  1271. r = policy_writeback_work(cache->policy, &oblock, &cblock);
  1272. if (r)
  1273. break;
  1274. r = get_cell(cache, oblock, &structs, &old_ocell);
  1275. if (r) {
  1276. policy_set_dirty(cache->policy, oblock);
  1277. break;
  1278. }
  1279. writeback(cache, &structs, oblock, cblock, old_ocell);
  1280. }
  1281. prealloc_free_structs(cache, &structs);
  1282. }
  1283. /*----------------------------------------------------------------
  1284. * Invalidations.
  1285. * Dropping something from the cache *without* writing back.
  1286. *--------------------------------------------------------------*/
  1287. static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
  1288. {
  1289. int r = 0;
  1290. uint64_t begin = from_cblock(req->cblocks->begin);
  1291. uint64_t end = from_cblock(req->cblocks->end);
  1292. while (begin != end) {
  1293. r = policy_remove_cblock(cache->policy, to_cblock(begin));
  1294. if (!r) {
  1295. r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
  1296. if (r)
  1297. break;
  1298. } else if (r == -ENODATA) {
  1299. /* harmless, already unmapped */
  1300. r = 0;
  1301. } else {
  1302. DMERR("policy_remove_cblock failed");
  1303. break;
  1304. }
  1305. begin++;
  1306. }
  1307. cache->commit_requested = true;
  1308. req->err = r;
  1309. atomic_set(&req->complete, 1);
  1310. wake_up(&req->result_wait);
  1311. }
  1312. static void process_invalidation_requests(struct cache *cache)
  1313. {
  1314. struct list_head list;
  1315. struct invalidation_request *req, *tmp;
  1316. INIT_LIST_HEAD(&list);
  1317. spin_lock(&cache->invalidation_lock);
  1318. list_splice_init(&cache->invalidation_requests, &list);
  1319. spin_unlock(&cache->invalidation_lock);
  1320. list_for_each_entry_safe (req, tmp, &list, list)
  1321. process_invalidation_request(cache, req);
  1322. }
  1323. /*----------------------------------------------------------------
  1324. * Main worker loop
  1325. *--------------------------------------------------------------*/
  1326. static bool is_quiescing(struct cache *cache)
  1327. {
  1328. return atomic_read(&cache->quiescing);
  1329. }
  1330. static void ack_quiescing(struct cache *cache)
  1331. {
  1332. if (is_quiescing(cache)) {
  1333. atomic_inc(&cache->quiescing_ack);
  1334. wake_up(&cache->quiescing_wait);
  1335. }
  1336. }
  1337. static void wait_for_quiescing_ack(struct cache *cache)
  1338. {
  1339. wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
  1340. }
  1341. static void start_quiescing(struct cache *cache)
  1342. {
  1343. atomic_inc(&cache->quiescing);
  1344. wait_for_quiescing_ack(cache);
  1345. }
  1346. static void stop_quiescing(struct cache *cache)
  1347. {
  1348. atomic_set(&cache->quiescing, 0);
  1349. atomic_set(&cache->quiescing_ack, 0);
  1350. }
  1351. static void wait_for_migrations(struct cache *cache)
  1352. {
  1353. wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
  1354. }
  1355. static void stop_worker(struct cache *cache)
  1356. {
  1357. cancel_delayed_work(&cache->waker);
  1358. flush_workqueue(cache->wq);
  1359. }
  1360. static void requeue_deferred_io(struct cache *cache)
  1361. {
  1362. struct bio *bio;
  1363. struct bio_list bios;
  1364. bio_list_init(&bios);
  1365. bio_list_merge(&bios, &cache->deferred_bios);
  1366. bio_list_init(&cache->deferred_bios);
  1367. while ((bio = bio_list_pop(&bios)))
  1368. bio_endio(bio, DM_ENDIO_REQUEUE);
  1369. }
  1370. static int more_work(struct cache *cache)
  1371. {
  1372. if (is_quiescing(cache))
  1373. return !list_empty(&cache->quiesced_migrations) ||
  1374. !list_empty(&cache->completed_migrations) ||
  1375. !list_empty(&cache->need_commit_migrations);
  1376. else
  1377. return !bio_list_empty(&cache->deferred_bios) ||
  1378. !bio_list_empty(&cache->deferred_flush_bios) ||
  1379. !bio_list_empty(&cache->deferred_writethrough_bios) ||
  1380. !list_empty(&cache->quiesced_migrations) ||
  1381. !list_empty(&cache->completed_migrations) ||
  1382. !list_empty(&cache->need_commit_migrations) ||
  1383. cache->invalidate;
  1384. }
  1385. static void do_worker(struct work_struct *ws)
  1386. {
  1387. struct cache *cache = container_of(ws, struct cache, worker);
  1388. do {
  1389. if (!is_quiescing(cache)) {
  1390. writeback_some_dirty_blocks(cache);
  1391. process_deferred_writethrough_bios(cache);
  1392. process_deferred_bios(cache);
  1393. process_invalidation_requests(cache);
  1394. }
  1395. process_migrations(cache, &cache->quiesced_migrations, issue_copy);
  1396. process_migrations(cache, &cache->completed_migrations, complete_migration);
  1397. if (commit_if_needed(cache)) {
  1398. process_deferred_flush_bios(cache, false);
  1399. /*
  1400. * FIXME: rollback metadata or just go into a
  1401. * failure mode and error everything
  1402. */
  1403. } else {
  1404. process_deferred_flush_bios(cache, true);
  1405. process_migrations(cache, &cache->need_commit_migrations,
  1406. migration_success_post_commit);
  1407. }
  1408. ack_quiescing(cache);
  1409. } while (more_work(cache));
  1410. }
  1411. /*
  1412. * We want to commit periodically so that not too much
  1413. * unwritten metadata builds up.
  1414. */
  1415. static void do_waker(struct work_struct *ws)
  1416. {
  1417. struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
  1418. policy_tick(cache->policy);
  1419. wake_worker(cache);
  1420. queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
  1421. }
  1422. /*----------------------------------------------------------------*/
  1423. static int is_congested(struct dm_dev *dev, int bdi_bits)
  1424. {
  1425. struct request_queue *q = bdev_get_queue(dev->bdev);
  1426. return bdi_congested(&q->backing_dev_info, bdi_bits);
  1427. }
  1428. static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
  1429. {
  1430. struct cache *cache = container_of(cb, struct cache, callbacks);
  1431. return is_congested(cache->origin_dev, bdi_bits) ||
  1432. is_congested(cache->cache_dev, bdi_bits);
  1433. }
  1434. /*----------------------------------------------------------------
  1435. * Target methods
  1436. *--------------------------------------------------------------*/
  1437. /*
  1438. * This function gets called on the error paths of the constructor, so we
  1439. * have to cope with a partially initialised struct.
  1440. */
  1441. static void destroy(struct cache *cache)
  1442. {
  1443. unsigned i;
  1444. if (cache->next_migration)
  1445. mempool_free(cache->next_migration, cache->migration_pool);
  1446. if (cache->migration_pool)
  1447. mempool_destroy(cache->migration_pool);
  1448. if (cache->all_io_ds)
  1449. dm_deferred_set_destroy(cache->all_io_ds);
  1450. if (cache->prison)
  1451. dm_bio_prison_destroy(cache->prison);
  1452. if (cache->wq)
  1453. destroy_workqueue(cache->wq);
  1454. if (cache->dirty_bitset)
  1455. free_bitset(cache->dirty_bitset);
  1456. if (cache->discard_bitset)
  1457. free_bitset(cache->discard_bitset);
  1458. if (cache->copier)
  1459. dm_kcopyd_client_destroy(cache->copier);
  1460. if (cache->cmd)
  1461. dm_cache_metadata_close(cache->cmd);
  1462. if (cache->metadata_dev)
  1463. dm_put_device(cache->ti, cache->metadata_dev);
  1464. if (cache->origin_dev)
  1465. dm_put_device(cache->ti, cache->origin_dev);
  1466. if (cache->cache_dev)
  1467. dm_put_device(cache->ti, cache->cache_dev);
  1468. if (cache->policy)
  1469. dm_cache_policy_destroy(cache->policy);
  1470. for (i = 0; i < cache->nr_ctr_args ; i++)
  1471. kfree(cache->ctr_args[i]);
  1472. kfree(cache->ctr_args);
  1473. kfree(cache);
  1474. }
  1475. static void cache_dtr(struct dm_target *ti)
  1476. {
  1477. struct cache *cache = ti->private;
  1478. destroy(cache);
  1479. }
  1480. static sector_t get_dev_size(struct dm_dev *dev)
  1481. {
  1482. return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
  1483. }
  1484. /*----------------------------------------------------------------*/
  1485. /*
  1486. * Construct a cache device mapping.
  1487. *
  1488. * cache <metadata dev> <cache dev> <origin dev> <block size>
  1489. * <#feature args> [<feature arg>]*
  1490. * <policy> <#policy args> [<policy arg>]*
  1491. *
  1492. * metadata dev : fast device holding the persistent metadata
  1493. * cache dev : fast device holding cached data blocks
  1494. * origin dev : slow device holding original data blocks
  1495. * block size : cache unit size in sectors
  1496. *
  1497. * #feature args : number of feature arguments passed
  1498. * feature args : writethrough. (The default is writeback.)
  1499. *
  1500. * policy : the replacement policy to use
  1501. * #policy args : an even number of policy arguments corresponding
  1502. * to key/value pairs passed to the policy
  1503. * policy args : key/value pairs passed to the policy
  1504. * E.g. 'sequential_threshold 1024'
  1505. * See cache-policies.txt for details.
  1506. *
  1507. * Optional feature arguments are:
  1508. * writethrough : write through caching that prohibits cache block
  1509. * content from being different from origin block content.
  1510. * Without this argument, the default behaviour is to write
  1511. * back cache block contents later for performance reasons,
  1512. * so they may differ from the corresponding origin blocks.
  1513. */
  1514. struct cache_args {
  1515. struct dm_target *ti;
  1516. struct dm_dev *metadata_dev;
  1517. struct dm_dev *cache_dev;
  1518. sector_t cache_sectors;
  1519. struct dm_dev *origin_dev;
  1520. sector_t origin_sectors;
  1521. uint32_t block_size;
  1522. const char *policy_name;
  1523. int policy_argc;
  1524. const char **policy_argv;
  1525. struct cache_features features;
  1526. };
  1527. static void destroy_cache_args(struct cache_args *ca)
  1528. {
  1529. if (ca->metadata_dev)
  1530. dm_put_device(ca->ti, ca->metadata_dev);
  1531. if (ca->cache_dev)
  1532. dm_put_device(ca->ti, ca->cache_dev);
  1533. if (ca->origin_dev)
  1534. dm_put_device(ca->ti, ca->origin_dev);
  1535. kfree(ca);
  1536. }
  1537. static bool at_least_one_arg(struct dm_arg_set *as, char **error)
  1538. {
  1539. if (!as->argc) {
  1540. *error = "Insufficient args";
  1541. return false;
  1542. }
  1543. return true;
  1544. }
  1545. static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
  1546. char **error)
  1547. {
  1548. int r;
  1549. sector_t metadata_dev_size;
  1550. char b[BDEVNAME_SIZE];
  1551. if (!at_least_one_arg(as, error))
  1552. return -EINVAL;
  1553. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1554. &ca->metadata_dev);
  1555. if (r) {
  1556. *error = "Error opening metadata device";
  1557. return r;
  1558. }
  1559. metadata_dev_size = get_dev_size(ca->metadata_dev);
  1560. if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
  1561. DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
  1562. bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
  1563. return 0;
  1564. }
  1565. static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
  1566. char **error)
  1567. {
  1568. int r;
  1569. if (!at_least_one_arg(as, error))
  1570. return -EINVAL;
  1571. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1572. &ca->cache_dev);
  1573. if (r) {
  1574. *error = "Error opening cache device";
  1575. return r;
  1576. }
  1577. ca->cache_sectors = get_dev_size(ca->cache_dev);
  1578. return 0;
  1579. }
  1580. static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
  1581. char **error)
  1582. {
  1583. int r;
  1584. if (!at_least_one_arg(as, error))
  1585. return -EINVAL;
  1586. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1587. &ca->origin_dev);
  1588. if (r) {
  1589. *error = "Error opening origin device";
  1590. return r;
  1591. }
  1592. ca->origin_sectors = get_dev_size(ca->origin_dev);
  1593. if (ca->ti->len > ca->origin_sectors) {
  1594. *error = "Device size larger than cached device";
  1595. return -EINVAL;
  1596. }
  1597. return 0;
  1598. }
  1599. static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
  1600. char **error)
  1601. {
  1602. unsigned long block_size;
  1603. if (!at_least_one_arg(as, error))
  1604. return -EINVAL;
  1605. if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
  1606. block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
  1607. block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
  1608. block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
  1609. *error = "Invalid data block size";
  1610. return -EINVAL;
  1611. }
  1612. if (block_size > ca->cache_sectors) {
  1613. *error = "Data block size is larger than the cache device";
  1614. return -EINVAL;
  1615. }
  1616. ca->block_size = block_size;
  1617. return 0;
  1618. }
  1619. static void init_features(struct cache_features *cf)
  1620. {
  1621. cf->mode = CM_WRITE;
  1622. cf->io_mode = CM_IO_WRITEBACK;
  1623. }
  1624. static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
  1625. char **error)
  1626. {
  1627. static struct dm_arg _args[] = {
  1628. {0, 1, "Invalid number of cache feature arguments"},
  1629. };
  1630. int r;
  1631. unsigned argc;
  1632. const char *arg;
  1633. struct cache_features *cf = &ca->features;
  1634. init_features(cf);
  1635. r = dm_read_arg_group(_args, as, &argc, error);
  1636. if (r)
  1637. return -EINVAL;
  1638. while (argc--) {
  1639. arg = dm_shift_arg(as);
  1640. if (!strcasecmp(arg, "writeback"))
  1641. cf->io_mode = CM_IO_WRITEBACK;
  1642. else if (!strcasecmp(arg, "writethrough"))
  1643. cf->io_mode = CM_IO_WRITETHROUGH;
  1644. else if (!strcasecmp(arg, "passthrough"))
  1645. cf->io_mode = CM_IO_PASSTHROUGH;
  1646. else {
  1647. *error = "Unrecognised cache feature requested";
  1648. return -EINVAL;
  1649. }
  1650. }
  1651. return 0;
  1652. }
  1653. static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
  1654. char **error)
  1655. {
  1656. static struct dm_arg _args[] = {
  1657. {0, 1024, "Invalid number of policy arguments"},
  1658. };
  1659. int r;
  1660. if (!at_least_one_arg(as, error))
  1661. return -EINVAL;
  1662. ca->policy_name = dm_shift_arg(as);
  1663. r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
  1664. if (r)
  1665. return -EINVAL;
  1666. ca->policy_argv = (const char **)as->argv;
  1667. dm_consume_args(as, ca->policy_argc);
  1668. return 0;
  1669. }
  1670. static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
  1671. char **error)
  1672. {
  1673. int r;
  1674. struct dm_arg_set as;
  1675. as.argc = argc;
  1676. as.argv = argv;
  1677. r = parse_metadata_dev(ca, &as, error);
  1678. if (r)
  1679. return r;
  1680. r = parse_cache_dev(ca, &as, error);
  1681. if (r)
  1682. return r;
  1683. r = parse_origin_dev(ca, &as, error);
  1684. if (r)
  1685. return r;
  1686. r = parse_block_size(ca, &as, error);
  1687. if (r)
  1688. return r;
  1689. r = parse_features(ca, &as, error);
  1690. if (r)
  1691. return r;
  1692. r = parse_policy(ca, &as, error);
  1693. if (r)
  1694. return r;
  1695. return 0;
  1696. }
  1697. /*----------------------------------------------------------------*/
  1698. static struct kmem_cache *migration_cache;
  1699. #define NOT_CORE_OPTION 1
  1700. static int process_config_option(struct cache *cache, const char *key, const char *value)
  1701. {
  1702. unsigned long tmp;
  1703. if (!strcasecmp(key, "migration_threshold")) {
  1704. if (kstrtoul(value, 10, &tmp))
  1705. return -EINVAL;
  1706. cache->migration_threshold = tmp;
  1707. return 0;
  1708. }
  1709. return NOT_CORE_OPTION;
  1710. }
  1711. static int set_config_value(struct cache *cache, const char *key, const char *value)
  1712. {
  1713. int r = process_config_option(cache, key, value);
  1714. if (r == NOT_CORE_OPTION)
  1715. r = policy_set_config_value(cache->policy, key, value);
  1716. if (r)
  1717. DMWARN("bad config value for %s: %s", key, value);
  1718. return r;
  1719. }
  1720. static int set_config_values(struct cache *cache, int argc, const char **argv)
  1721. {
  1722. int r = 0;
  1723. if (argc & 1) {
  1724. DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
  1725. return -EINVAL;
  1726. }
  1727. while (argc) {
  1728. r = set_config_value(cache, argv[0], argv[1]);
  1729. if (r)
  1730. break;
  1731. argc -= 2;
  1732. argv += 2;
  1733. }
  1734. return r;
  1735. }
  1736. static int create_cache_policy(struct cache *cache, struct cache_args *ca,
  1737. char **error)
  1738. {
  1739. struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
  1740. cache->cache_size,
  1741. cache->origin_sectors,
  1742. cache->sectors_per_block);
  1743. if (IS_ERR(p)) {
  1744. *error = "Error creating cache's policy";
  1745. return PTR_ERR(p);
  1746. }
  1747. cache->policy = p;
  1748. return 0;
  1749. }
  1750. /*
  1751. * We want the discard block size to be a power of two, at least the size
  1752. * of the cache block size, and have no more than 2^14 discard blocks
  1753. * across the origin.
  1754. */
  1755. #define MAX_DISCARD_BLOCKS (1 << 14)
  1756. static bool too_many_discard_blocks(sector_t discard_block_size,
  1757. sector_t origin_size)
  1758. {
  1759. (void) sector_div(origin_size, discard_block_size);
  1760. return origin_size > MAX_DISCARD_BLOCKS;
  1761. }
  1762. static sector_t calculate_discard_block_size(sector_t cache_block_size,
  1763. sector_t origin_size)
  1764. {
  1765. sector_t discard_block_size;
  1766. discard_block_size = roundup_pow_of_two(cache_block_size);
  1767. if (origin_size)
  1768. while (too_many_discard_blocks(discard_block_size, origin_size))
  1769. discard_block_size *= 2;
  1770. return discard_block_size;
  1771. }
  1772. #define DEFAULT_MIGRATION_THRESHOLD 2048
  1773. static int cache_create(struct cache_args *ca, struct cache **result)
  1774. {
  1775. int r = 0;
  1776. char **error = &ca->ti->error;
  1777. struct cache *cache;
  1778. struct dm_target *ti = ca->ti;
  1779. dm_block_t origin_blocks;
  1780. struct dm_cache_metadata *cmd;
  1781. bool may_format = ca->features.mode == CM_WRITE;
  1782. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  1783. if (!cache)
  1784. return -ENOMEM;
  1785. cache->ti = ca->ti;
  1786. ti->private = cache;
  1787. ti->num_flush_bios = 2;
  1788. ti->flush_supported = true;
  1789. ti->num_discard_bios = 1;
  1790. ti->discards_supported = true;
  1791. ti->discard_zeroes_data_unsupported = true;
  1792. cache->features = ca->features;
  1793. ti->per_bio_data_size = get_per_bio_data_size(cache);
  1794. cache->callbacks.congested_fn = cache_is_congested;
  1795. dm_table_add_target_callbacks(ti->table, &cache->callbacks);
  1796. cache->metadata_dev = ca->metadata_dev;
  1797. cache->origin_dev = ca->origin_dev;
  1798. cache->cache_dev = ca->cache_dev;
  1799. ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
  1800. /* FIXME: factor out this whole section */
  1801. origin_blocks = cache->origin_sectors = ca->origin_sectors;
  1802. origin_blocks = block_div(origin_blocks, ca->block_size);
  1803. cache->origin_blocks = to_oblock(origin_blocks);
  1804. cache->sectors_per_block = ca->block_size;
  1805. if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
  1806. r = -EINVAL;
  1807. goto bad;
  1808. }
  1809. if (ca->block_size & (ca->block_size - 1)) {
  1810. dm_block_t cache_size = ca->cache_sectors;
  1811. cache->sectors_per_block_shift = -1;
  1812. cache_size = block_div(cache_size, ca->block_size);
  1813. cache->cache_size = to_cblock(cache_size);
  1814. } else {
  1815. cache->sectors_per_block_shift = __ffs(ca->block_size);
  1816. cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
  1817. }
  1818. r = create_cache_policy(cache, ca, error);
  1819. if (r)
  1820. goto bad;
  1821. cache->policy_nr_args = ca->policy_argc;
  1822. cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
  1823. r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
  1824. if (r) {
  1825. *error = "Error setting cache policy's config values";
  1826. goto bad;
  1827. }
  1828. cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
  1829. ca->block_size, may_format,
  1830. dm_cache_policy_get_hint_size(cache->policy));
  1831. if (IS_ERR(cmd)) {
  1832. *error = "Error creating metadata object";
  1833. r = PTR_ERR(cmd);
  1834. goto bad;
  1835. }
  1836. cache->cmd = cmd;
  1837. if (passthrough_mode(&cache->features)) {
  1838. bool all_clean;
  1839. r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
  1840. if (r) {
  1841. *error = "dm_cache_metadata_all_clean() failed";
  1842. goto bad;
  1843. }
  1844. if (!all_clean) {
  1845. *error = "Cannot enter passthrough mode unless all blocks are clean";
  1846. r = -EINVAL;
  1847. goto bad;
  1848. }
  1849. }
  1850. spin_lock_init(&cache->lock);
  1851. bio_list_init(&cache->deferred_bios);
  1852. bio_list_init(&cache->deferred_flush_bios);
  1853. bio_list_init(&cache->deferred_writethrough_bios);
  1854. INIT_LIST_HEAD(&cache->quiesced_migrations);
  1855. INIT_LIST_HEAD(&cache->completed_migrations);
  1856. INIT_LIST_HEAD(&cache->need_commit_migrations);
  1857. atomic_set(&cache->nr_migrations, 0);
  1858. init_waitqueue_head(&cache->migration_wait);
  1859. init_waitqueue_head(&cache->quiescing_wait);
  1860. atomic_set(&cache->quiescing, 0);
  1861. atomic_set(&cache->quiescing_ack, 0);
  1862. r = -ENOMEM;
  1863. cache->nr_dirty = 0;
  1864. cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
  1865. if (!cache->dirty_bitset) {
  1866. *error = "could not allocate dirty bitset";
  1867. goto bad;
  1868. }
  1869. clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
  1870. cache->discard_block_size =
  1871. calculate_discard_block_size(cache->sectors_per_block,
  1872. cache->origin_sectors);
  1873. cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
  1874. cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
  1875. if (!cache->discard_bitset) {
  1876. *error = "could not allocate discard bitset";
  1877. goto bad;
  1878. }
  1879. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  1880. cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  1881. if (IS_ERR(cache->copier)) {
  1882. *error = "could not create kcopyd client";
  1883. r = PTR_ERR(cache->copier);
  1884. goto bad;
  1885. }
  1886. cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
  1887. if (!cache->wq) {
  1888. *error = "could not create workqueue for metadata object";
  1889. goto bad;
  1890. }
  1891. INIT_WORK(&cache->worker, do_worker);
  1892. INIT_DELAYED_WORK(&cache->waker, do_waker);
  1893. cache->last_commit_jiffies = jiffies;
  1894. cache->prison = dm_bio_prison_create(PRISON_CELLS);
  1895. if (!cache->prison) {
  1896. *error = "could not create bio prison";
  1897. goto bad;
  1898. }
  1899. cache->all_io_ds = dm_deferred_set_create();
  1900. if (!cache->all_io_ds) {
  1901. *error = "could not create all_io deferred set";
  1902. goto bad;
  1903. }
  1904. cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
  1905. migration_cache);
  1906. if (!cache->migration_pool) {
  1907. *error = "Error creating cache's migration mempool";
  1908. goto bad;
  1909. }
  1910. cache->next_migration = NULL;
  1911. cache->need_tick_bio = true;
  1912. cache->sized = false;
  1913. cache->invalidate = false;
  1914. cache->commit_requested = false;
  1915. cache->loaded_mappings = false;
  1916. cache->loaded_discards = false;
  1917. load_stats(cache);
  1918. atomic_set(&cache->stats.demotion, 0);
  1919. atomic_set(&cache->stats.promotion, 0);
  1920. atomic_set(&cache->stats.copies_avoided, 0);
  1921. atomic_set(&cache->stats.cache_cell_clash, 0);
  1922. atomic_set(&cache->stats.commit_count, 0);
  1923. atomic_set(&cache->stats.discard_count, 0);
  1924. spin_lock_init(&cache->invalidation_lock);
  1925. INIT_LIST_HEAD(&cache->invalidation_requests);
  1926. *result = cache;
  1927. return 0;
  1928. bad:
  1929. destroy(cache);
  1930. return r;
  1931. }
  1932. static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
  1933. {
  1934. unsigned i;
  1935. const char **copy;
  1936. copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
  1937. if (!copy)
  1938. return -ENOMEM;
  1939. for (i = 0; i < argc; i++) {
  1940. copy[i] = kstrdup(argv[i], GFP_KERNEL);
  1941. if (!copy[i]) {
  1942. while (i--)
  1943. kfree(copy[i]);
  1944. kfree(copy);
  1945. return -ENOMEM;
  1946. }
  1947. }
  1948. cache->nr_ctr_args = argc;
  1949. cache->ctr_args = copy;
  1950. return 0;
  1951. }
  1952. static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
  1953. {
  1954. int r = -EINVAL;
  1955. struct cache_args *ca;
  1956. struct cache *cache = NULL;
  1957. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1958. if (!ca) {
  1959. ti->error = "Error allocating memory for cache";
  1960. return -ENOMEM;
  1961. }
  1962. ca->ti = ti;
  1963. r = parse_cache_args(ca, argc, argv, &ti->error);
  1964. if (r)
  1965. goto out;
  1966. r = cache_create(ca, &cache);
  1967. if (r)
  1968. goto out;
  1969. r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
  1970. if (r) {
  1971. destroy(cache);
  1972. goto out;
  1973. }
  1974. ti->private = cache;
  1975. out:
  1976. destroy_cache_args(ca);
  1977. return r;
  1978. }
  1979. static int cache_map(struct dm_target *ti, struct bio *bio)
  1980. {
  1981. struct cache *cache = ti->private;
  1982. int r;
  1983. dm_oblock_t block = get_bio_block(cache, bio);
  1984. size_t pb_data_size = get_per_bio_data_size(cache);
  1985. bool can_migrate = false;
  1986. bool discarded_block;
  1987. struct dm_bio_prison_cell *cell;
  1988. struct policy_result lookup_result;
  1989. struct per_bio_data *pb;
  1990. if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
  1991. /*
  1992. * This can only occur if the io goes to a partial block at
  1993. * the end of the origin device. We don't cache these.
  1994. * Just remap to the origin and carry on.
  1995. */
  1996. remap_to_origin_clear_discard(cache, bio, block);
  1997. return DM_MAPIO_REMAPPED;
  1998. }
  1999. pb = init_per_bio_data(bio, pb_data_size);
  2000. if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
  2001. defer_bio(cache, bio);
  2002. return DM_MAPIO_SUBMITTED;
  2003. }
  2004. /*
  2005. * Check to see if that block is currently migrating.
  2006. */
  2007. cell = alloc_prison_cell(cache);
  2008. if (!cell) {
  2009. defer_bio(cache, bio);
  2010. return DM_MAPIO_SUBMITTED;
  2011. }
  2012. r = bio_detain(cache, block, bio, cell,
  2013. (cell_free_fn) free_prison_cell,
  2014. cache, &cell);
  2015. if (r) {
  2016. if (r < 0)
  2017. defer_bio(cache, bio);
  2018. return DM_MAPIO_SUBMITTED;
  2019. }
  2020. discarded_block = is_discarded_oblock(cache, block);
  2021. r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
  2022. bio, &lookup_result);
  2023. if (r == -EWOULDBLOCK) {
  2024. cell_defer(cache, cell, true);
  2025. return DM_MAPIO_SUBMITTED;
  2026. } else if (r) {
  2027. DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
  2028. bio_io_error(bio);
  2029. return DM_MAPIO_SUBMITTED;
  2030. }
  2031. r = DM_MAPIO_REMAPPED;
  2032. switch (lookup_result.op) {
  2033. case POLICY_HIT:
  2034. if (passthrough_mode(&cache->features)) {
  2035. if (bio_data_dir(bio) == WRITE) {
  2036. /*
  2037. * We need to invalidate this block, so
  2038. * defer for the worker thread.
  2039. */
  2040. cell_defer(cache, cell, true);
  2041. r = DM_MAPIO_SUBMITTED;
  2042. } else {
  2043. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  2044. inc_miss_counter(cache, bio);
  2045. remap_to_origin_clear_discard(cache, bio, block);
  2046. cell_defer(cache, cell, false);
  2047. }
  2048. } else {
  2049. inc_hit_counter(cache, bio);
  2050. if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
  2051. !is_dirty(cache, lookup_result.cblock))
  2052. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  2053. else
  2054. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  2055. cell_defer(cache, cell, false);
  2056. }
  2057. break;
  2058. case POLICY_MISS:
  2059. inc_miss_counter(cache, bio);
  2060. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  2061. if (pb->req_nr != 0) {
  2062. /*
  2063. * This is a duplicate writethrough io that is no
  2064. * longer needed because the block has been demoted.
  2065. */
  2066. bio_endio(bio, 0);
  2067. cell_defer(cache, cell, false);
  2068. return DM_MAPIO_SUBMITTED;
  2069. } else {
  2070. remap_to_origin_clear_discard(cache, bio, block);
  2071. cell_defer(cache, cell, false);
  2072. }
  2073. break;
  2074. default:
  2075. DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
  2076. (unsigned) lookup_result.op);
  2077. bio_io_error(bio);
  2078. r = DM_MAPIO_SUBMITTED;
  2079. }
  2080. return r;
  2081. }
  2082. static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
  2083. {
  2084. struct cache *cache = ti->private;
  2085. unsigned long flags;
  2086. size_t pb_data_size = get_per_bio_data_size(cache);
  2087. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  2088. if (pb->tick) {
  2089. policy_tick(cache->policy);
  2090. spin_lock_irqsave(&cache->lock, flags);
  2091. cache->need_tick_bio = true;
  2092. spin_unlock_irqrestore(&cache->lock, flags);
  2093. }
  2094. check_for_quiesced_migrations(cache, pb);
  2095. return 0;
  2096. }
  2097. static int write_dirty_bitset(struct cache *cache)
  2098. {
  2099. unsigned i, r;
  2100. for (i = 0; i < from_cblock(cache->cache_size); i++) {
  2101. r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
  2102. is_dirty(cache, to_cblock(i)));
  2103. if (r)
  2104. return r;
  2105. }
  2106. return 0;
  2107. }
  2108. static int write_discard_bitset(struct cache *cache)
  2109. {
  2110. unsigned i, r;
  2111. r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
  2112. cache->discard_nr_blocks);
  2113. if (r) {
  2114. DMERR("could not resize on-disk discard bitset");
  2115. return r;
  2116. }
  2117. for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
  2118. r = dm_cache_set_discard(cache->cmd, to_dblock(i),
  2119. is_discarded(cache, to_dblock(i)));
  2120. if (r)
  2121. return r;
  2122. }
  2123. return 0;
  2124. }
  2125. static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
  2126. uint32_t hint)
  2127. {
  2128. struct cache *cache = context;
  2129. return dm_cache_save_hint(cache->cmd, cblock, hint);
  2130. }
  2131. static int write_hints(struct cache *cache)
  2132. {
  2133. int r;
  2134. r = dm_cache_begin_hints(cache->cmd, cache->policy);
  2135. if (r) {
  2136. DMERR("dm_cache_begin_hints failed");
  2137. return r;
  2138. }
  2139. r = policy_walk_mappings(cache->policy, save_hint, cache);
  2140. if (r)
  2141. DMERR("policy_walk_mappings failed");
  2142. return r;
  2143. }
  2144. /*
  2145. * returns true on success
  2146. */
  2147. static bool sync_metadata(struct cache *cache)
  2148. {
  2149. int r1, r2, r3, r4;
  2150. r1 = write_dirty_bitset(cache);
  2151. if (r1)
  2152. DMERR("could not write dirty bitset");
  2153. r2 = write_discard_bitset(cache);
  2154. if (r2)
  2155. DMERR("could not write discard bitset");
  2156. save_stats(cache);
  2157. r3 = write_hints(cache);
  2158. if (r3)
  2159. DMERR("could not write hints");
  2160. /*
  2161. * If writing the above metadata failed, we still commit, but don't
  2162. * set the clean shutdown flag. This will effectively force every
  2163. * dirty bit to be set on reload.
  2164. */
  2165. r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
  2166. if (r4)
  2167. DMERR("could not write cache metadata. Data loss may occur.");
  2168. return !r1 && !r2 && !r3 && !r4;
  2169. }
  2170. static void cache_postsuspend(struct dm_target *ti)
  2171. {
  2172. struct cache *cache = ti->private;
  2173. start_quiescing(cache);
  2174. wait_for_migrations(cache);
  2175. stop_worker(cache);
  2176. requeue_deferred_io(cache);
  2177. stop_quiescing(cache);
  2178. (void) sync_metadata(cache);
  2179. }
  2180. static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
  2181. bool dirty, uint32_t hint, bool hint_valid)
  2182. {
  2183. int r;
  2184. struct cache *cache = context;
  2185. r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
  2186. if (r)
  2187. return r;
  2188. if (dirty)
  2189. set_dirty(cache, oblock, cblock);
  2190. else
  2191. clear_dirty(cache, oblock, cblock);
  2192. return 0;
  2193. }
  2194. static int load_discard(void *context, sector_t discard_block_size,
  2195. dm_dblock_t dblock, bool discard)
  2196. {
  2197. struct cache *cache = context;
  2198. /* FIXME: handle mis-matched block size */
  2199. if (discard)
  2200. set_discard(cache, dblock);
  2201. else
  2202. clear_discard(cache, dblock);
  2203. return 0;
  2204. }
  2205. static dm_cblock_t get_cache_dev_size(struct cache *cache)
  2206. {
  2207. sector_t size = get_dev_size(cache->cache_dev);
  2208. (void) sector_div(size, cache->sectors_per_block);
  2209. return to_cblock(size);
  2210. }
  2211. static bool can_resize(struct cache *cache, dm_cblock_t new_size)
  2212. {
  2213. if (from_cblock(new_size) > from_cblock(cache->cache_size))
  2214. return true;
  2215. /*
  2216. * We can't drop a dirty block when shrinking the cache.
  2217. */
  2218. while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
  2219. new_size = to_cblock(from_cblock(new_size) + 1);
  2220. if (is_dirty(cache, new_size)) {
  2221. DMERR("unable to shrink cache; cache block %llu is dirty",
  2222. (unsigned long long) from_cblock(new_size));
  2223. return false;
  2224. }
  2225. }
  2226. return true;
  2227. }
  2228. static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
  2229. {
  2230. int r;
  2231. r = dm_cache_resize(cache->cmd, cache->cache_size);
  2232. if (r) {
  2233. DMERR("could not resize cache metadata");
  2234. return r;
  2235. }
  2236. cache->cache_size = new_size;
  2237. return 0;
  2238. }
  2239. static int cache_preresume(struct dm_target *ti)
  2240. {
  2241. int r = 0;
  2242. struct cache *cache = ti->private;
  2243. dm_cblock_t csize = get_cache_dev_size(cache);
  2244. /*
  2245. * Check to see if the cache has resized.
  2246. */
  2247. if (!cache->sized) {
  2248. r = resize_cache_dev(cache, csize);
  2249. if (r)
  2250. return r;
  2251. cache->sized = true;
  2252. } else if (csize != cache->cache_size) {
  2253. if (!can_resize(cache, csize))
  2254. return -EINVAL;
  2255. r = resize_cache_dev(cache, csize);
  2256. if (r)
  2257. return r;
  2258. }
  2259. if (!cache->loaded_mappings) {
  2260. r = dm_cache_load_mappings(cache->cmd, cache->policy,
  2261. load_mapping, cache);
  2262. if (r) {
  2263. DMERR("could not load cache mappings");
  2264. return r;
  2265. }
  2266. cache->loaded_mappings = true;
  2267. }
  2268. if (!cache->loaded_discards) {
  2269. r = dm_cache_load_discards(cache->cmd, load_discard, cache);
  2270. if (r) {
  2271. DMERR("could not load origin discards");
  2272. return r;
  2273. }
  2274. cache->loaded_discards = true;
  2275. }
  2276. return r;
  2277. }
  2278. static void cache_resume(struct dm_target *ti)
  2279. {
  2280. struct cache *cache = ti->private;
  2281. cache->need_tick_bio = true;
  2282. do_waker(&cache->waker.work);
  2283. }
  2284. /*
  2285. * Status format:
  2286. *
  2287. * <#used metadata blocks>/<#total metadata blocks>
  2288. * <#read hits> <#read misses> <#write hits> <#write misses>
  2289. * <#demotions> <#promotions> <#blocks in cache> <#dirty>
  2290. * <#features> <features>*
  2291. * <#core args> <core args>
  2292. * <#policy args> <policy args>*
  2293. */
  2294. static void cache_status(struct dm_target *ti, status_type_t type,
  2295. unsigned status_flags, char *result, unsigned maxlen)
  2296. {
  2297. int r = 0;
  2298. unsigned i;
  2299. ssize_t sz = 0;
  2300. dm_block_t nr_free_blocks_metadata = 0;
  2301. dm_block_t nr_blocks_metadata = 0;
  2302. char buf[BDEVNAME_SIZE];
  2303. struct cache *cache = ti->private;
  2304. dm_cblock_t residency;
  2305. switch (type) {
  2306. case STATUSTYPE_INFO:
  2307. /* Commit to ensure statistics aren't out-of-date */
  2308. if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
  2309. r = dm_cache_commit(cache->cmd, false);
  2310. if (r)
  2311. DMERR("could not commit metadata for accurate status");
  2312. }
  2313. r = dm_cache_get_free_metadata_block_count(cache->cmd,
  2314. &nr_free_blocks_metadata);
  2315. if (r) {
  2316. DMERR("could not get metadata free block count");
  2317. goto err;
  2318. }
  2319. r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
  2320. if (r) {
  2321. DMERR("could not get metadata device size");
  2322. goto err;
  2323. }
  2324. residency = policy_residency(cache->policy);
  2325. DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
  2326. (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
  2327. (unsigned long long)nr_blocks_metadata,
  2328. (unsigned) atomic_read(&cache->stats.read_hit),
  2329. (unsigned) atomic_read(&cache->stats.read_miss),
  2330. (unsigned) atomic_read(&cache->stats.write_hit),
  2331. (unsigned) atomic_read(&cache->stats.write_miss),
  2332. (unsigned) atomic_read(&cache->stats.demotion),
  2333. (unsigned) atomic_read(&cache->stats.promotion),
  2334. (unsigned long long) from_cblock(residency),
  2335. cache->nr_dirty);
  2336. if (writethrough_mode(&cache->features))
  2337. DMEMIT("1 writethrough ");
  2338. else if (passthrough_mode(&cache->features))
  2339. DMEMIT("1 passthrough ");
  2340. else if (writeback_mode(&cache->features))
  2341. DMEMIT("1 writeback ");
  2342. else {
  2343. DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
  2344. goto err;
  2345. }
  2346. DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
  2347. if (sz < maxlen) {
  2348. r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
  2349. if (r)
  2350. DMERR("policy_emit_config_values returned %d", r);
  2351. }
  2352. break;
  2353. case STATUSTYPE_TABLE:
  2354. format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
  2355. DMEMIT("%s ", buf);
  2356. format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
  2357. DMEMIT("%s ", buf);
  2358. format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
  2359. DMEMIT("%s", buf);
  2360. for (i = 0; i < cache->nr_ctr_args - 1; i++)
  2361. DMEMIT(" %s", cache->ctr_args[i]);
  2362. if (cache->nr_ctr_args)
  2363. DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
  2364. }
  2365. return;
  2366. err:
  2367. DMEMIT("Error");
  2368. }
  2369. /*
  2370. * A cache block range can take two forms:
  2371. *
  2372. * i) A single cblock, eg. '3456'
  2373. * ii) A begin and end cblock with dots between, eg. 123-234
  2374. */
  2375. static int parse_cblock_range(struct cache *cache, const char *str,
  2376. struct cblock_range *result)
  2377. {
  2378. char dummy;
  2379. uint64_t b, e;
  2380. int r;
  2381. /*
  2382. * Try and parse form (ii) first.
  2383. */
  2384. r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
  2385. if (r < 0)
  2386. return r;
  2387. if (r == 2) {
  2388. result->begin = to_cblock(b);
  2389. result->end = to_cblock(e);
  2390. return 0;
  2391. }
  2392. /*
  2393. * That didn't work, try form (i).
  2394. */
  2395. r = sscanf(str, "%llu%c", &b, &dummy);
  2396. if (r < 0)
  2397. return r;
  2398. if (r == 1) {
  2399. result->begin = to_cblock(b);
  2400. result->end = to_cblock(from_cblock(result->begin) + 1u);
  2401. return 0;
  2402. }
  2403. DMERR("invalid cblock range '%s'", str);
  2404. return -EINVAL;
  2405. }
  2406. static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
  2407. {
  2408. uint64_t b = from_cblock(range->begin);
  2409. uint64_t e = from_cblock(range->end);
  2410. uint64_t n = from_cblock(cache->cache_size);
  2411. if (b >= n) {
  2412. DMERR("begin cblock out of range: %llu >= %llu", b, n);
  2413. return -EINVAL;
  2414. }
  2415. if (e > n) {
  2416. DMERR("end cblock out of range: %llu > %llu", e, n);
  2417. return -EINVAL;
  2418. }
  2419. if (b >= e) {
  2420. DMERR("invalid cblock range: %llu >= %llu", b, e);
  2421. return -EINVAL;
  2422. }
  2423. return 0;
  2424. }
  2425. static int request_invalidation(struct cache *cache, struct cblock_range *range)
  2426. {
  2427. struct invalidation_request req;
  2428. INIT_LIST_HEAD(&req.list);
  2429. req.cblocks = range;
  2430. atomic_set(&req.complete, 0);
  2431. req.err = 0;
  2432. init_waitqueue_head(&req.result_wait);
  2433. spin_lock(&cache->invalidation_lock);
  2434. list_add(&req.list, &cache->invalidation_requests);
  2435. spin_unlock(&cache->invalidation_lock);
  2436. wake_worker(cache);
  2437. wait_event(req.result_wait, atomic_read(&req.complete));
  2438. return req.err;
  2439. }
  2440. static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
  2441. const char **cblock_ranges)
  2442. {
  2443. int r = 0;
  2444. unsigned i;
  2445. struct cblock_range range;
  2446. if (!passthrough_mode(&cache->features)) {
  2447. DMERR("cache has to be in passthrough mode for invalidation");
  2448. return -EPERM;
  2449. }
  2450. for (i = 0; i < count; i++) {
  2451. r = parse_cblock_range(cache, cblock_ranges[i], &range);
  2452. if (r)
  2453. break;
  2454. r = validate_cblock_range(cache, &range);
  2455. if (r)
  2456. break;
  2457. /*
  2458. * Pass begin and end origin blocks to the worker and wake it.
  2459. */
  2460. r = request_invalidation(cache, &range);
  2461. if (r)
  2462. break;
  2463. }
  2464. return r;
  2465. }
  2466. /*
  2467. * Supports
  2468. * "<key> <value>"
  2469. * and
  2470. * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
  2471. *
  2472. * The key migration_threshold is supported by the cache target core.
  2473. */
  2474. static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
  2475. {
  2476. struct cache *cache = ti->private;
  2477. if (!argc)
  2478. return -EINVAL;
  2479. if (!strcasecmp(argv[0], "invalidate_cblocks"))
  2480. return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
  2481. if (argc != 2)
  2482. return -EINVAL;
  2483. return set_config_value(cache, argv[0], argv[1]);
  2484. }
  2485. static int cache_iterate_devices(struct dm_target *ti,
  2486. iterate_devices_callout_fn fn, void *data)
  2487. {
  2488. int r = 0;
  2489. struct cache *cache = ti->private;
  2490. r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
  2491. if (!r)
  2492. r = fn(ti, cache->origin_dev, 0, ti->len, data);
  2493. return r;
  2494. }
  2495. /*
  2496. * We assume I/O is going to the origin (which is the volume
  2497. * more likely to have restrictions e.g. by being striped).
  2498. * (Looking up the exact location of the data would be expensive
  2499. * and could always be out of date by the time the bio is submitted.)
  2500. */
  2501. static int cache_bvec_merge(struct dm_target *ti,
  2502. struct bvec_merge_data *bvm,
  2503. struct bio_vec *biovec, int max_size)
  2504. {
  2505. struct cache *cache = ti->private;
  2506. struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
  2507. if (!q->merge_bvec_fn)
  2508. return max_size;
  2509. bvm->bi_bdev = cache->origin_dev->bdev;
  2510. return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
  2511. }
  2512. static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
  2513. {
  2514. /*
  2515. * FIXME: these limits may be incompatible with the cache device
  2516. */
  2517. limits->max_discard_sectors = cache->discard_block_size * 1024;
  2518. limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
  2519. }
  2520. static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
  2521. {
  2522. struct cache *cache = ti->private;
  2523. uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
  2524. /*
  2525. * If the system-determined stacked limits are compatible with the
  2526. * cache's blocksize (io_opt is a factor) do not override them.
  2527. */
  2528. if (io_opt_sectors < cache->sectors_per_block ||
  2529. do_div(io_opt_sectors, cache->sectors_per_block)) {
  2530. blk_limits_io_min(limits, 0);
  2531. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  2532. }
  2533. set_discard_limits(cache, limits);
  2534. }
  2535. /*----------------------------------------------------------------*/
  2536. static struct target_type cache_target = {
  2537. .name = "cache",
  2538. .version = {1, 2, 0},
  2539. .module = THIS_MODULE,
  2540. .ctr = cache_ctr,
  2541. .dtr = cache_dtr,
  2542. .map = cache_map,
  2543. .end_io = cache_end_io,
  2544. .postsuspend = cache_postsuspend,
  2545. .preresume = cache_preresume,
  2546. .resume = cache_resume,
  2547. .status = cache_status,
  2548. .message = cache_message,
  2549. .iterate_devices = cache_iterate_devices,
  2550. .merge = cache_bvec_merge,
  2551. .io_hints = cache_io_hints,
  2552. };
  2553. static int __init dm_cache_init(void)
  2554. {
  2555. int r;
  2556. r = dm_register_target(&cache_target);
  2557. if (r) {
  2558. DMERR("cache target registration failed: %d", r);
  2559. return r;
  2560. }
  2561. migration_cache = KMEM_CACHE(dm_cache_migration, 0);
  2562. if (!migration_cache) {
  2563. dm_unregister_target(&cache_target);
  2564. return -ENOMEM;
  2565. }
  2566. return 0;
  2567. }
  2568. static void __exit dm_cache_exit(void)
  2569. {
  2570. dm_unregister_target(&cache_target);
  2571. kmem_cache_destroy(migration_cache);
  2572. }
  2573. module_init(dm_cache_init);
  2574. module_exit(dm_cache_exit);
  2575. MODULE_DESCRIPTION(DM_NAME " cache target");
  2576. MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
  2577. MODULE_LICENSE("GPL");