dm-cache-target.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include "dm-bio-record.h"
  9. #include "dm-cache-metadata.h"
  10. #include <linux/dm-io.h>
  11. #include <linux/dm-kcopyd.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #define DM_MSG_PREFIX "cache"
  18. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  19. "A percentage of time allocated for copying to and/or from cache");
  20. /*----------------------------------------------------------------*/
  21. /*
  22. * Glossary:
  23. *
  24. * oblock: index of an origin block
  25. * cblock: index of a cache block
  26. * promotion: movement of a block from origin to cache
  27. * demotion: movement of a block from cache to origin
  28. * migration: movement of a block between the origin and cache device,
  29. * either direction
  30. */
  31. /*----------------------------------------------------------------*/
  32. static size_t bitset_size_in_bytes(unsigned nr_entries)
  33. {
  34. return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
  35. }
  36. static unsigned long *alloc_bitset(unsigned nr_entries)
  37. {
  38. size_t s = bitset_size_in_bytes(nr_entries);
  39. return vzalloc(s);
  40. }
  41. static void clear_bitset(void *bitset, unsigned nr_entries)
  42. {
  43. size_t s = bitset_size_in_bytes(nr_entries);
  44. memset(bitset, 0, s);
  45. }
  46. static void free_bitset(unsigned long *bits)
  47. {
  48. vfree(bits);
  49. }
  50. /*----------------------------------------------------------------*/
  51. /*
  52. * There are a couple of places where we let a bio run, but want to do some
  53. * work before calling its endio function. We do this by temporarily
  54. * changing the endio fn.
  55. */
  56. struct dm_hook_info {
  57. bio_end_io_t *bi_end_io;
  58. void *bi_private;
  59. };
  60. static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
  61. bio_end_io_t *bi_end_io, void *bi_private)
  62. {
  63. h->bi_end_io = bio->bi_end_io;
  64. h->bi_private = bio->bi_private;
  65. bio->bi_end_io = bi_end_io;
  66. bio->bi_private = bi_private;
  67. }
  68. static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
  69. {
  70. bio->bi_end_io = h->bi_end_io;
  71. bio->bi_private = h->bi_private;
  72. /*
  73. * Must bump bi_remaining to allow bio to complete with
  74. * restored bi_end_io.
  75. */
  76. atomic_inc(&bio->bi_remaining);
  77. }
  78. /*----------------------------------------------------------------*/
  79. #define MIGRATION_POOL_SIZE 128
  80. #define COMMIT_PERIOD HZ
  81. #define MIGRATION_COUNT_WINDOW 10
  82. /*
  83. * The block size of the device holding cache data must be
  84. * between 32KB and 1GB.
  85. */
  86. #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
  87. #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  88. /*
  89. * FIXME: the cache is read/write for the time being.
  90. */
  91. enum cache_metadata_mode {
  92. CM_WRITE, /* metadata may be changed */
  93. CM_READ_ONLY, /* metadata may not be changed */
  94. };
  95. enum cache_io_mode {
  96. /*
  97. * Data is written to cached blocks only. These blocks are marked
  98. * dirty. If you lose the cache device you will lose data.
  99. * Potential performance increase for both reads and writes.
  100. */
  101. CM_IO_WRITEBACK,
  102. /*
  103. * Data is written to both cache and origin. Blocks are never
  104. * dirty. Potential performance benfit for reads only.
  105. */
  106. CM_IO_WRITETHROUGH,
  107. /*
  108. * A degraded mode useful for various cache coherency situations
  109. * (eg, rolling back snapshots). Reads and writes always go to the
  110. * origin. If a write goes to a cached oblock, then the cache
  111. * block is invalidated.
  112. */
  113. CM_IO_PASSTHROUGH
  114. };
  115. struct cache_features {
  116. enum cache_metadata_mode mode;
  117. enum cache_io_mode io_mode;
  118. };
  119. struct cache_stats {
  120. atomic_t read_hit;
  121. atomic_t read_miss;
  122. atomic_t write_hit;
  123. atomic_t write_miss;
  124. atomic_t demotion;
  125. atomic_t promotion;
  126. atomic_t copies_avoided;
  127. atomic_t cache_cell_clash;
  128. atomic_t commit_count;
  129. atomic_t discard_count;
  130. };
  131. /*
  132. * Defines a range of cblocks, begin to (end - 1) are in the range. end is
  133. * the one-past-the-end value.
  134. */
  135. struct cblock_range {
  136. dm_cblock_t begin;
  137. dm_cblock_t end;
  138. };
  139. struct invalidation_request {
  140. struct list_head list;
  141. struct cblock_range *cblocks;
  142. atomic_t complete;
  143. int err;
  144. wait_queue_head_t result_wait;
  145. };
  146. struct cache {
  147. struct dm_target *ti;
  148. struct dm_target_callbacks callbacks;
  149. struct dm_cache_metadata *cmd;
  150. /*
  151. * Metadata is written to this device.
  152. */
  153. struct dm_dev *metadata_dev;
  154. /*
  155. * The slower of the two data devices. Typically a spindle.
  156. */
  157. struct dm_dev *origin_dev;
  158. /*
  159. * The faster of the two data devices. Typically an SSD.
  160. */
  161. struct dm_dev *cache_dev;
  162. /*
  163. * Size of the origin device in _complete_ blocks and native sectors.
  164. */
  165. dm_oblock_t origin_blocks;
  166. sector_t origin_sectors;
  167. /*
  168. * Size of the cache device in blocks.
  169. */
  170. dm_cblock_t cache_size;
  171. /*
  172. * Fields for converting from sectors to blocks.
  173. */
  174. uint32_t sectors_per_block;
  175. int sectors_per_block_shift;
  176. spinlock_t lock;
  177. struct bio_list deferred_bios;
  178. struct bio_list deferred_flush_bios;
  179. struct bio_list deferred_writethrough_bios;
  180. struct list_head quiesced_migrations;
  181. struct list_head completed_migrations;
  182. struct list_head need_commit_migrations;
  183. sector_t migration_threshold;
  184. wait_queue_head_t migration_wait;
  185. atomic_t nr_allocated_migrations;
  186. /*
  187. * The number of in flight migrations that are performing
  188. * background io. eg, promotion, writeback.
  189. */
  190. atomic_t nr_io_migrations;
  191. wait_queue_head_t quiescing_wait;
  192. atomic_t quiescing;
  193. atomic_t quiescing_ack;
  194. /*
  195. * cache_size entries, dirty if set
  196. */
  197. atomic_t nr_dirty;
  198. unsigned long *dirty_bitset;
  199. /*
  200. * origin_blocks entries, discarded if set.
  201. */
  202. dm_dblock_t discard_nr_blocks;
  203. unsigned long *discard_bitset;
  204. uint32_t discard_block_size; /* a power of 2 times sectors per block */
  205. /*
  206. * Rather than reconstructing the table line for the status we just
  207. * save it and regurgitate.
  208. */
  209. unsigned nr_ctr_args;
  210. const char **ctr_args;
  211. struct dm_kcopyd_client *copier;
  212. struct workqueue_struct *wq;
  213. struct work_struct worker;
  214. struct delayed_work waker;
  215. unsigned long last_commit_jiffies;
  216. struct dm_bio_prison *prison;
  217. struct dm_deferred_set *all_io_ds;
  218. mempool_t *migration_pool;
  219. struct dm_cache_policy *policy;
  220. unsigned policy_nr_args;
  221. bool need_tick_bio:1;
  222. bool sized:1;
  223. bool invalidate:1;
  224. bool commit_requested:1;
  225. bool loaded_mappings:1;
  226. bool loaded_discards:1;
  227. /*
  228. * Cache features such as write-through.
  229. */
  230. struct cache_features features;
  231. struct cache_stats stats;
  232. /*
  233. * Invalidation fields.
  234. */
  235. spinlock_t invalidation_lock;
  236. struct list_head invalidation_requests;
  237. };
  238. struct per_bio_data {
  239. bool tick:1;
  240. unsigned req_nr:2;
  241. struct dm_deferred_entry *all_io_entry;
  242. struct dm_hook_info hook_info;
  243. /*
  244. * writethrough fields. These MUST remain at the end of this
  245. * structure and the 'cache' member must be the first as it
  246. * is used to determine the offset of the writethrough fields.
  247. */
  248. struct cache *cache;
  249. dm_cblock_t cblock;
  250. struct dm_bio_details bio_details;
  251. };
  252. struct dm_cache_migration {
  253. struct list_head list;
  254. struct cache *cache;
  255. unsigned long start_jiffies;
  256. dm_oblock_t old_oblock;
  257. dm_oblock_t new_oblock;
  258. dm_cblock_t cblock;
  259. bool err:1;
  260. bool discard:1;
  261. bool writeback:1;
  262. bool demote:1;
  263. bool promote:1;
  264. bool requeue_holder:1;
  265. bool invalidate:1;
  266. struct dm_bio_prison_cell *old_ocell;
  267. struct dm_bio_prison_cell *new_ocell;
  268. };
  269. /*
  270. * Processing a bio in the worker thread may require these memory
  271. * allocations. We prealloc to avoid deadlocks (the same worker thread
  272. * frees them back to the mempool).
  273. */
  274. struct prealloc {
  275. struct dm_cache_migration *mg;
  276. struct dm_bio_prison_cell *cell1;
  277. struct dm_bio_prison_cell *cell2;
  278. };
  279. static void wake_worker(struct cache *cache)
  280. {
  281. queue_work(cache->wq, &cache->worker);
  282. }
  283. /*----------------------------------------------------------------*/
  284. static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
  285. {
  286. /* FIXME: change to use a local slab. */
  287. return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
  288. }
  289. static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
  290. {
  291. dm_bio_prison_free_cell(cache->prison, cell);
  292. }
  293. static struct dm_cache_migration *alloc_migration(struct cache *cache)
  294. {
  295. struct dm_cache_migration *mg;
  296. mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  297. if (mg) {
  298. mg->cache = cache;
  299. atomic_inc(&mg->cache->nr_allocated_migrations);
  300. }
  301. return mg;
  302. }
  303. static void free_migration(struct dm_cache_migration *mg)
  304. {
  305. if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
  306. wake_up(&mg->cache->migration_wait);
  307. mempool_free(mg, mg->cache->migration_pool);
  308. }
  309. static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
  310. {
  311. if (!p->mg) {
  312. p->mg = alloc_migration(cache);
  313. if (!p->mg)
  314. return -ENOMEM;
  315. }
  316. if (!p->cell1) {
  317. p->cell1 = alloc_prison_cell(cache);
  318. if (!p->cell1)
  319. return -ENOMEM;
  320. }
  321. if (!p->cell2) {
  322. p->cell2 = alloc_prison_cell(cache);
  323. if (!p->cell2)
  324. return -ENOMEM;
  325. }
  326. return 0;
  327. }
  328. static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
  329. {
  330. if (p->cell2)
  331. free_prison_cell(cache, p->cell2);
  332. if (p->cell1)
  333. free_prison_cell(cache, p->cell1);
  334. if (p->mg)
  335. free_migration(p->mg);
  336. }
  337. static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
  338. {
  339. struct dm_cache_migration *mg = p->mg;
  340. BUG_ON(!mg);
  341. p->mg = NULL;
  342. return mg;
  343. }
  344. /*
  345. * You must have a cell within the prealloc struct to return. If not this
  346. * function will BUG() rather than returning NULL.
  347. */
  348. static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
  349. {
  350. struct dm_bio_prison_cell *r = NULL;
  351. if (p->cell1) {
  352. r = p->cell1;
  353. p->cell1 = NULL;
  354. } else if (p->cell2) {
  355. r = p->cell2;
  356. p->cell2 = NULL;
  357. } else
  358. BUG();
  359. return r;
  360. }
  361. /*
  362. * You can't have more than two cells in a prealloc struct. BUG() will be
  363. * called if you try and overfill.
  364. */
  365. static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
  366. {
  367. if (!p->cell2)
  368. p->cell2 = cell;
  369. else if (!p->cell1)
  370. p->cell1 = cell;
  371. else
  372. BUG();
  373. }
  374. /*----------------------------------------------------------------*/
  375. static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key)
  376. {
  377. key->virtual = 0;
  378. key->dev = 0;
  379. key->block_begin = from_oblock(begin);
  380. key->block_end = from_oblock(end);
  381. }
  382. /*
  383. * The caller hands in a preallocated cell, and a free function for it.
  384. * The cell will be freed if there's an error, or if it wasn't used because
  385. * a cell with that key already exists.
  386. */
  387. typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
  388. static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end,
  389. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  390. cell_free_fn free_fn, void *free_context,
  391. struct dm_bio_prison_cell **cell_result)
  392. {
  393. int r;
  394. struct dm_cell_key key;
  395. build_key(oblock_begin, oblock_end, &key);
  396. r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
  397. if (r)
  398. free_fn(free_context, cell_prealloc);
  399. return r;
  400. }
  401. static int bio_detain(struct cache *cache, dm_oblock_t oblock,
  402. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  403. cell_free_fn free_fn, void *free_context,
  404. struct dm_bio_prison_cell **cell_result)
  405. {
  406. dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
  407. return bio_detain_range(cache, oblock, end, bio,
  408. cell_prealloc, free_fn, free_context, cell_result);
  409. }
  410. static int get_cell(struct cache *cache,
  411. dm_oblock_t oblock,
  412. struct prealloc *structs,
  413. struct dm_bio_prison_cell **cell_result)
  414. {
  415. int r;
  416. struct dm_cell_key key;
  417. struct dm_bio_prison_cell *cell_prealloc;
  418. cell_prealloc = prealloc_get_cell(structs);
  419. build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key);
  420. r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
  421. if (r)
  422. prealloc_put_cell(structs, cell_prealloc);
  423. return r;
  424. }
  425. /*----------------------------------------------------------------*/
  426. static bool is_dirty(struct cache *cache, dm_cblock_t b)
  427. {
  428. return test_bit(from_cblock(b), cache->dirty_bitset);
  429. }
  430. static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  431. {
  432. if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
  433. atomic_inc(&cache->nr_dirty);
  434. policy_set_dirty(cache->policy, oblock);
  435. }
  436. }
  437. static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  438. {
  439. if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
  440. policy_clear_dirty(cache->policy, oblock);
  441. if (atomic_dec_return(&cache->nr_dirty) == 0)
  442. dm_table_event(cache->ti->table);
  443. }
  444. }
  445. /*----------------------------------------------------------------*/
  446. static bool block_size_is_power_of_two(struct cache *cache)
  447. {
  448. return cache->sectors_per_block_shift >= 0;
  449. }
  450. /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
  451. #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
  452. __always_inline
  453. #endif
  454. static dm_block_t block_div(dm_block_t b, uint32_t n)
  455. {
  456. do_div(b, n);
  457. return b;
  458. }
  459. static dm_block_t oblocks_per_dblock(struct cache *cache)
  460. {
  461. dm_block_t oblocks = cache->discard_block_size;
  462. if (block_size_is_power_of_two(cache))
  463. oblocks >>= cache->sectors_per_block_shift;
  464. else
  465. oblocks = block_div(oblocks, cache->sectors_per_block);
  466. return oblocks;
  467. }
  468. static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
  469. {
  470. return to_dblock(block_div(from_oblock(oblock),
  471. oblocks_per_dblock(cache)));
  472. }
  473. static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock)
  474. {
  475. return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache));
  476. }
  477. static void set_discard(struct cache *cache, dm_dblock_t b)
  478. {
  479. unsigned long flags;
  480. BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
  481. atomic_inc(&cache->stats.discard_count);
  482. spin_lock_irqsave(&cache->lock, flags);
  483. set_bit(from_dblock(b), cache->discard_bitset);
  484. spin_unlock_irqrestore(&cache->lock, flags);
  485. }
  486. static void clear_discard(struct cache *cache, dm_dblock_t b)
  487. {
  488. unsigned long flags;
  489. spin_lock_irqsave(&cache->lock, flags);
  490. clear_bit(from_dblock(b), cache->discard_bitset);
  491. spin_unlock_irqrestore(&cache->lock, flags);
  492. }
  493. static bool is_discarded(struct cache *cache, dm_dblock_t b)
  494. {
  495. int r;
  496. unsigned long flags;
  497. spin_lock_irqsave(&cache->lock, flags);
  498. r = test_bit(from_dblock(b), cache->discard_bitset);
  499. spin_unlock_irqrestore(&cache->lock, flags);
  500. return r;
  501. }
  502. static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  503. {
  504. int r;
  505. unsigned long flags;
  506. spin_lock_irqsave(&cache->lock, flags);
  507. r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
  508. cache->discard_bitset);
  509. spin_unlock_irqrestore(&cache->lock, flags);
  510. return r;
  511. }
  512. /*----------------------------------------------------------------*/
  513. static void load_stats(struct cache *cache)
  514. {
  515. struct dm_cache_statistics stats;
  516. dm_cache_metadata_get_stats(cache->cmd, &stats);
  517. atomic_set(&cache->stats.read_hit, stats.read_hits);
  518. atomic_set(&cache->stats.read_miss, stats.read_misses);
  519. atomic_set(&cache->stats.write_hit, stats.write_hits);
  520. atomic_set(&cache->stats.write_miss, stats.write_misses);
  521. }
  522. static void save_stats(struct cache *cache)
  523. {
  524. struct dm_cache_statistics stats;
  525. stats.read_hits = atomic_read(&cache->stats.read_hit);
  526. stats.read_misses = atomic_read(&cache->stats.read_miss);
  527. stats.write_hits = atomic_read(&cache->stats.write_hit);
  528. stats.write_misses = atomic_read(&cache->stats.write_miss);
  529. dm_cache_metadata_set_stats(cache->cmd, &stats);
  530. }
  531. /*----------------------------------------------------------------
  532. * Per bio data
  533. *--------------------------------------------------------------*/
  534. /*
  535. * If using writeback, leave out struct per_bio_data's writethrough fields.
  536. */
  537. #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
  538. #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
  539. static bool writethrough_mode(struct cache_features *f)
  540. {
  541. return f->io_mode == CM_IO_WRITETHROUGH;
  542. }
  543. static bool writeback_mode(struct cache_features *f)
  544. {
  545. return f->io_mode == CM_IO_WRITEBACK;
  546. }
  547. static bool passthrough_mode(struct cache_features *f)
  548. {
  549. return f->io_mode == CM_IO_PASSTHROUGH;
  550. }
  551. static size_t get_per_bio_data_size(struct cache *cache)
  552. {
  553. return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
  554. }
  555. static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
  556. {
  557. struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
  558. BUG_ON(!pb);
  559. return pb;
  560. }
  561. static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
  562. {
  563. struct per_bio_data *pb = get_per_bio_data(bio, data_size);
  564. pb->tick = false;
  565. pb->req_nr = dm_bio_get_target_bio_nr(bio);
  566. pb->all_io_entry = NULL;
  567. return pb;
  568. }
  569. /*----------------------------------------------------------------
  570. * Remapping
  571. *--------------------------------------------------------------*/
  572. static void remap_to_origin(struct cache *cache, struct bio *bio)
  573. {
  574. bio->bi_bdev = cache->origin_dev->bdev;
  575. }
  576. static void remap_to_cache(struct cache *cache, struct bio *bio,
  577. dm_cblock_t cblock)
  578. {
  579. sector_t bi_sector = bio->bi_iter.bi_sector;
  580. sector_t block = from_cblock(cblock);
  581. bio->bi_bdev = cache->cache_dev->bdev;
  582. if (!block_size_is_power_of_two(cache))
  583. bio->bi_iter.bi_sector =
  584. (block * cache->sectors_per_block) +
  585. sector_div(bi_sector, cache->sectors_per_block);
  586. else
  587. bio->bi_iter.bi_sector =
  588. (block << cache->sectors_per_block_shift) |
  589. (bi_sector & (cache->sectors_per_block - 1));
  590. }
  591. static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
  592. {
  593. unsigned long flags;
  594. size_t pb_data_size = get_per_bio_data_size(cache);
  595. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  596. spin_lock_irqsave(&cache->lock, flags);
  597. if (cache->need_tick_bio &&
  598. !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
  599. pb->tick = true;
  600. cache->need_tick_bio = false;
  601. }
  602. spin_unlock_irqrestore(&cache->lock, flags);
  603. }
  604. static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
  605. dm_oblock_t oblock)
  606. {
  607. check_if_tick_bio_needed(cache, bio);
  608. remap_to_origin(cache, bio);
  609. if (bio_data_dir(bio) == WRITE)
  610. clear_discard(cache, oblock_to_dblock(cache, oblock));
  611. }
  612. static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
  613. dm_oblock_t oblock, dm_cblock_t cblock)
  614. {
  615. check_if_tick_bio_needed(cache, bio);
  616. remap_to_cache(cache, bio, cblock);
  617. if (bio_data_dir(bio) == WRITE) {
  618. set_dirty(cache, oblock, cblock);
  619. clear_discard(cache, oblock_to_dblock(cache, oblock));
  620. }
  621. }
  622. static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
  623. {
  624. sector_t block_nr = bio->bi_iter.bi_sector;
  625. if (!block_size_is_power_of_two(cache))
  626. (void) sector_div(block_nr, cache->sectors_per_block);
  627. else
  628. block_nr >>= cache->sectors_per_block_shift;
  629. return to_oblock(block_nr);
  630. }
  631. static int bio_triggers_commit(struct cache *cache, struct bio *bio)
  632. {
  633. return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
  634. }
  635. /*
  636. * You must increment the deferred set whilst the prison cell is held. To
  637. * encourage this, we ask for 'cell' to be passed in.
  638. */
  639. static void inc_ds(struct cache *cache, struct bio *bio,
  640. struct dm_bio_prison_cell *cell)
  641. {
  642. size_t pb_data_size = get_per_bio_data_size(cache);
  643. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  644. BUG_ON(!cell);
  645. BUG_ON(pb->all_io_entry);
  646. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  647. }
  648. static void issue(struct cache *cache, struct bio *bio)
  649. {
  650. unsigned long flags;
  651. if (!bio_triggers_commit(cache, bio)) {
  652. generic_make_request(bio);
  653. return;
  654. }
  655. /*
  656. * Batch together any bios that trigger commits and then issue a
  657. * single commit for them in do_worker().
  658. */
  659. spin_lock_irqsave(&cache->lock, flags);
  660. cache->commit_requested = true;
  661. bio_list_add(&cache->deferred_flush_bios, bio);
  662. spin_unlock_irqrestore(&cache->lock, flags);
  663. }
  664. static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
  665. {
  666. inc_ds(cache, bio, cell);
  667. issue(cache, bio);
  668. }
  669. static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
  670. {
  671. unsigned long flags;
  672. spin_lock_irqsave(&cache->lock, flags);
  673. bio_list_add(&cache->deferred_writethrough_bios, bio);
  674. spin_unlock_irqrestore(&cache->lock, flags);
  675. wake_worker(cache);
  676. }
  677. static void writethrough_endio(struct bio *bio, int err)
  678. {
  679. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  680. dm_unhook_bio(&pb->hook_info, bio);
  681. if (err) {
  682. bio_endio(bio, err);
  683. return;
  684. }
  685. dm_bio_restore(&pb->bio_details, bio);
  686. remap_to_cache(pb->cache, bio, pb->cblock);
  687. /*
  688. * We can't issue this bio directly, since we're in interrupt
  689. * context. So it gets put on a bio list for processing by the
  690. * worker thread.
  691. */
  692. defer_writethrough_bio(pb->cache, bio);
  693. }
  694. /*
  695. * When running in writethrough mode we need to send writes to clean blocks
  696. * to both the cache and origin devices. In future we'd like to clone the
  697. * bio and send them in parallel, but for now we're doing them in
  698. * series as this is easier.
  699. */
  700. static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  701. dm_oblock_t oblock, dm_cblock_t cblock)
  702. {
  703. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  704. pb->cache = cache;
  705. pb->cblock = cblock;
  706. dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
  707. dm_bio_record(&pb->bio_details, bio);
  708. remap_to_origin_clear_discard(pb->cache, bio, oblock);
  709. }
  710. /*----------------------------------------------------------------
  711. * Migration processing
  712. *
  713. * Migration covers moving data from the origin device to the cache, or
  714. * vice versa.
  715. *--------------------------------------------------------------*/
  716. static void inc_io_migrations(struct cache *cache)
  717. {
  718. atomic_inc(&cache->nr_io_migrations);
  719. }
  720. static void dec_io_migrations(struct cache *cache)
  721. {
  722. atomic_dec(&cache->nr_io_migrations);
  723. }
  724. static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  725. bool holder)
  726. {
  727. (holder ? dm_cell_release : dm_cell_release_no_holder)
  728. (cache->prison, cell, &cache->deferred_bios);
  729. free_prison_cell(cache, cell);
  730. }
  731. static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  732. bool holder)
  733. {
  734. unsigned long flags;
  735. spin_lock_irqsave(&cache->lock, flags);
  736. __cell_defer(cache, cell, holder);
  737. spin_unlock_irqrestore(&cache->lock, flags);
  738. wake_worker(cache);
  739. }
  740. static void free_io_migration(struct dm_cache_migration *mg)
  741. {
  742. dec_io_migrations(mg->cache);
  743. free_migration(mg);
  744. }
  745. static void migration_failure(struct dm_cache_migration *mg)
  746. {
  747. struct cache *cache = mg->cache;
  748. if (mg->writeback) {
  749. DMWARN_LIMIT("writeback failed; couldn't copy block");
  750. set_dirty(cache, mg->old_oblock, mg->cblock);
  751. cell_defer(cache, mg->old_ocell, false);
  752. } else if (mg->demote) {
  753. DMWARN_LIMIT("demotion failed; couldn't copy block");
  754. policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
  755. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  756. if (mg->promote)
  757. cell_defer(cache, mg->new_ocell, true);
  758. } else {
  759. DMWARN_LIMIT("promotion failed; couldn't copy block");
  760. policy_remove_mapping(cache->policy, mg->new_oblock);
  761. cell_defer(cache, mg->new_ocell, true);
  762. }
  763. free_io_migration(mg);
  764. }
  765. static void migration_success_pre_commit(struct dm_cache_migration *mg)
  766. {
  767. unsigned long flags;
  768. struct cache *cache = mg->cache;
  769. if (mg->writeback) {
  770. clear_dirty(cache, mg->old_oblock, mg->cblock);
  771. cell_defer(cache, mg->old_ocell, false);
  772. free_io_migration(mg);
  773. return;
  774. } else if (mg->demote) {
  775. if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
  776. DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
  777. policy_force_mapping(cache->policy, mg->new_oblock,
  778. mg->old_oblock);
  779. if (mg->promote)
  780. cell_defer(cache, mg->new_ocell, true);
  781. free_io_migration(mg);
  782. return;
  783. }
  784. } else {
  785. if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
  786. DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
  787. policy_remove_mapping(cache->policy, mg->new_oblock);
  788. free_io_migration(mg);
  789. return;
  790. }
  791. }
  792. spin_lock_irqsave(&cache->lock, flags);
  793. list_add_tail(&mg->list, &cache->need_commit_migrations);
  794. cache->commit_requested = true;
  795. spin_unlock_irqrestore(&cache->lock, flags);
  796. }
  797. static void migration_success_post_commit(struct dm_cache_migration *mg)
  798. {
  799. unsigned long flags;
  800. struct cache *cache = mg->cache;
  801. if (mg->writeback) {
  802. DMWARN("writeback unexpectedly triggered commit");
  803. return;
  804. } else if (mg->demote) {
  805. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  806. if (mg->promote) {
  807. mg->demote = false;
  808. spin_lock_irqsave(&cache->lock, flags);
  809. list_add_tail(&mg->list, &cache->quiesced_migrations);
  810. spin_unlock_irqrestore(&cache->lock, flags);
  811. } else {
  812. if (mg->invalidate)
  813. policy_remove_mapping(cache->policy, mg->old_oblock);
  814. free_io_migration(mg);
  815. }
  816. } else {
  817. if (mg->requeue_holder) {
  818. clear_dirty(cache, mg->new_oblock, mg->cblock);
  819. cell_defer(cache, mg->new_ocell, true);
  820. } else {
  821. /*
  822. * The block was promoted via an overwrite, so it's dirty.
  823. */
  824. set_dirty(cache, mg->new_oblock, mg->cblock);
  825. bio_endio(mg->new_ocell->holder, 0);
  826. cell_defer(cache, mg->new_ocell, false);
  827. }
  828. free_io_migration(mg);
  829. }
  830. }
  831. static void copy_complete(int read_err, unsigned long write_err, void *context)
  832. {
  833. unsigned long flags;
  834. struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
  835. struct cache *cache = mg->cache;
  836. if (read_err || write_err)
  837. mg->err = true;
  838. spin_lock_irqsave(&cache->lock, flags);
  839. list_add_tail(&mg->list, &cache->completed_migrations);
  840. spin_unlock_irqrestore(&cache->lock, flags);
  841. wake_worker(cache);
  842. }
  843. static void issue_copy(struct dm_cache_migration *mg)
  844. {
  845. int r;
  846. struct dm_io_region o_region, c_region;
  847. struct cache *cache = mg->cache;
  848. sector_t cblock = from_cblock(mg->cblock);
  849. o_region.bdev = cache->origin_dev->bdev;
  850. o_region.count = cache->sectors_per_block;
  851. c_region.bdev = cache->cache_dev->bdev;
  852. c_region.sector = cblock * cache->sectors_per_block;
  853. c_region.count = cache->sectors_per_block;
  854. if (mg->writeback || mg->demote) {
  855. /* demote */
  856. o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
  857. r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
  858. } else {
  859. /* promote */
  860. o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
  861. r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
  862. }
  863. if (r < 0) {
  864. DMERR_LIMIT("issuing migration failed");
  865. migration_failure(mg);
  866. }
  867. }
  868. static void overwrite_endio(struct bio *bio, int err)
  869. {
  870. struct dm_cache_migration *mg = bio->bi_private;
  871. struct cache *cache = mg->cache;
  872. size_t pb_data_size = get_per_bio_data_size(cache);
  873. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  874. unsigned long flags;
  875. dm_unhook_bio(&pb->hook_info, bio);
  876. if (err)
  877. mg->err = true;
  878. mg->requeue_holder = false;
  879. spin_lock_irqsave(&cache->lock, flags);
  880. list_add_tail(&mg->list, &cache->completed_migrations);
  881. spin_unlock_irqrestore(&cache->lock, flags);
  882. wake_worker(cache);
  883. }
  884. static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
  885. {
  886. size_t pb_data_size = get_per_bio_data_size(mg->cache);
  887. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  888. dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
  889. remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
  890. /*
  891. * No need to inc_ds() here, since the cell will be held for the
  892. * duration of the io.
  893. */
  894. generic_make_request(bio);
  895. }
  896. static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
  897. {
  898. return (bio_data_dir(bio) == WRITE) &&
  899. (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
  900. }
  901. static void avoid_copy(struct dm_cache_migration *mg)
  902. {
  903. atomic_inc(&mg->cache->stats.copies_avoided);
  904. migration_success_pre_commit(mg);
  905. }
  906. static void calc_discard_block_range(struct cache *cache, struct bio *bio,
  907. dm_dblock_t *b, dm_dblock_t *e)
  908. {
  909. sector_t sb = bio->bi_iter.bi_sector;
  910. sector_t se = bio_end_sector(bio);
  911. *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
  912. if (se - sb < cache->discard_block_size)
  913. *e = *b;
  914. else
  915. *e = to_dblock(block_div(se, cache->discard_block_size));
  916. }
  917. static void issue_discard(struct dm_cache_migration *mg)
  918. {
  919. dm_dblock_t b, e;
  920. struct bio *bio = mg->new_ocell->holder;
  921. calc_discard_block_range(mg->cache, bio, &b, &e);
  922. while (b != e) {
  923. set_discard(mg->cache, b);
  924. b = to_dblock(from_dblock(b) + 1);
  925. }
  926. bio_endio(bio, 0);
  927. cell_defer(mg->cache, mg->new_ocell, false);
  928. free_migration(mg);
  929. }
  930. static void issue_copy_or_discard(struct dm_cache_migration *mg)
  931. {
  932. bool avoid;
  933. struct cache *cache = mg->cache;
  934. if (mg->discard) {
  935. issue_discard(mg);
  936. return;
  937. }
  938. if (mg->writeback || mg->demote)
  939. avoid = !is_dirty(cache, mg->cblock) ||
  940. is_discarded_oblock(cache, mg->old_oblock);
  941. else {
  942. struct bio *bio = mg->new_ocell->holder;
  943. avoid = is_discarded_oblock(cache, mg->new_oblock);
  944. if (writeback_mode(&cache->features) &&
  945. !avoid && bio_writes_complete_block(cache, bio)) {
  946. issue_overwrite(mg, bio);
  947. return;
  948. }
  949. }
  950. avoid ? avoid_copy(mg) : issue_copy(mg);
  951. }
  952. static void complete_migration(struct dm_cache_migration *mg)
  953. {
  954. if (mg->err)
  955. migration_failure(mg);
  956. else
  957. migration_success_pre_commit(mg);
  958. }
  959. static void process_migrations(struct cache *cache, struct list_head *head,
  960. void (*fn)(struct dm_cache_migration *))
  961. {
  962. unsigned long flags;
  963. struct list_head list;
  964. struct dm_cache_migration *mg, *tmp;
  965. INIT_LIST_HEAD(&list);
  966. spin_lock_irqsave(&cache->lock, flags);
  967. list_splice_init(head, &list);
  968. spin_unlock_irqrestore(&cache->lock, flags);
  969. list_for_each_entry_safe(mg, tmp, &list, list)
  970. fn(mg);
  971. }
  972. static void __queue_quiesced_migration(struct dm_cache_migration *mg)
  973. {
  974. list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
  975. }
  976. static void queue_quiesced_migration(struct dm_cache_migration *mg)
  977. {
  978. unsigned long flags;
  979. struct cache *cache = mg->cache;
  980. spin_lock_irqsave(&cache->lock, flags);
  981. __queue_quiesced_migration(mg);
  982. spin_unlock_irqrestore(&cache->lock, flags);
  983. wake_worker(cache);
  984. }
  985. static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
  986. {
  987. unsigned long flags;
  988. struct dm_cache_migration *mg, *tmp;
  989. spin_lock_irqsave(&cache->lock, flags);
  990. list_for_each_entry_safe(mg, tmp, work, list)
  991. __queue_quiesced_migration(mg);
  992. spin_unlock_irqrestore(&cache->lock, flags);
  993. wake_worker(cache);
  994. }
  995. static void check_for_quiesced_migrations(struct cache *cache,
  996. struct per_bio_data *pb)
  997. {
  998. struct list_head work;
  999. if (!pb->all_io_entry)
  1000. return;
  1001. INIT_LIST_HEAD(&work);
  1002. dm_deferred_entry_dec(pb->all_io_entry, &work);
  1003. if (!list_empty(&work))
  1004. queue_quiesced_migrations(cache, &work);
  1005. }
  1006. static void quiesce_migration(struct dm_cache_migration *mg)
  1007. {
  1008. if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
  1009. queue_quiesced_migration(mg);
  1010. }
  1011. static void promote(struct cache *cache, struct prealloc *structs,
  1012. dm_oblock_t oblock, dm_cblock_t cblock,
  1013. struct dm_bio_prison_cell *cell)
  1014. {
  1015. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1016. mg->err = false;
  1017. mg->discard = false;
  1018. mg->writeback = false;
  1019. mg->demote = false;
  1020. mg->promote = true;
  1021. mg->requeue_holder = true;
  1022. mg->invalidate = false;
  1023. mg->cache = cache;
  1024. mg->new_oblock = oblock;
  1025. mg->cblock = cblock;
  1026. mg->old_ocell = NULL;
  1027. mg->new_ocell = cell;
  1028. mg->start_jiffies = jiffies;
  1029. inc_io_migrations(cache);
  1030. quiesce_migration(mg);
  1031. }
  1032. static void writeback(struct cache *cache, struct prealloc *structs,
  1033. dm_oblock_t oblock, dm_cblock_t cblock,
  1034. struct dm_bio_prison_cell *cell)
  1035. {
  1036. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1037. mg->err = false;
  1038. mg->discard = false;
  1039. mg->writeback = true;
  1040. mg->demote = false;
  1041. mg->promote = false;
  1042. mg->requeue_holder = true;
  1043. mg->invalidate = false;
  1044. mg->cache = cache;
  1045. mg->old_oblock = oblock;
  1046. mg->cblock = cblock;
  1047. mg->old_ocell = cell;
  1048. mg->new_ocell = NULL;
  1049. mg->start_jiffies = jiffies;
  1050. inc_io_migrations(cache);
  1051. quiesce_migration(mg);
  1052. }
  1053. static void demote_then_promote(struct cache *cache, struct prealloc *structs,
  1054. dm_oblock_t old_oblock, dm_oblock_t new_oblock,
  1055. dm_cblock_t cblock,
  1056. struct dm_bio_prison_cell *old_ocell,
  1057. struct dm_bio_prison_cell *new_ocell)
  1058. {
  1059. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1060. mg->err = false;
  1061. mg->discard = false;
  1062. mg->writeback = false;
  1063. mg->demote = true;
  1064. mg->promote = true;
  1065. mg->requeue_holder = true;
  1066. mg->invalidate = false;
  1067. mg->cache = cache;
  1068. mg->old_oblock = old_oblock;
  1069. mg->new_oblock = new_oblock;
  1070. mg->cblock = cblock;
  1071. mg->old_ocell = old_ocell;
  1072. mg->new_ocell = new_ocell;
  1073. mg->start_jiffies = jiffies;
  1074. inc_io_migrations(cache);
  1075. quiesce_migration(mg);
  1076. }
  1077. /*
  1078. * Invalidate a cache entry. No writeback occurs; any changes in the cache
  1079. * block are thrown away.
  1080. */
  1081. static void invalidate(struct cache *cache, struct prealloc *structs,
  1082. dm_oblock_t oblock, dm_cblock_t cblock,
  1083. struct dm_bio_prison_cell *cell)
  1084. {
  1085. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1086. mg->err = false;
  1087. mg->discard = false;
  1088. mg->writeback = false;
  1089. mg->demote = true;
  1090. mg->promote = false;
  1091. mg->requeue_holder = true;
  1092. mg->invalidate = true;
  1093. mg->cache = cache;
  1094. mg->old_oblock = oblock;
  1095. mg->cblock = cblock;
  1096. mg->old_ocell = cell;
  1097. mg->new_ocell = NULL;
  1098. mg->start_jiffies = jiffies;
  1099. inc_io_migrations(cache);
  1100. quiesce_migration(mg);
  1101. }
  1102. static void discard(struct cache *cache, struct prealloc *structs,
  1103. struct dm_bio_prison_cell *cell)
  1104. {
  1105. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  1106. mg->err = false;
  1107. mg->discard = true;
  1108. mg->writeback = false;
  1109. mg->demote = false;
  1110. mg->promote = false;
  1111. mg->requeue_holder = false;
  1112. mg->invalidate = false;
  1113. mg->cache = cache;
  1114. mg->old_ocell = NULL;
  1115. mg->new_ocell = cell;
  1116. mg->start_jiffies = jiffies;
  1117. quiesce_migration(mg);
  1118. }
  1119. /*----------------------------------------------------------------
  1120. * bio processing
  1121. *--------------------------------------------------------------*/
  1122. static void defer_bio(struct cache *cache, struct bio *bio)
  1123. {
  1124. unsigned long flags;
  1125. spin_lock_irqsave(&cache->lock, flags);
  1126. bio_list_add(&cache->deferred_bios, bio);
  1127. spin_unlock_irqrestore(&cache->lock, flags);
  1128. wake_worker(cache);
  1129. }
  1130. static void process_flush_bio(struct cache *cache, struct bio *bio)
  1131. {
  1132. size_t pb_data_size = get_per_bio_data_size(cache);
  1133. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1134. BUG_ON(bio->bi_iter.bi_size);
  1135. if (!pb->req_nr)
  1136. remap_to_origin(cache, bio);
  1137. else
  1138. remap_to_cache(cache, bio, 0);
  1139. /*
  1140. * REQ_FLUSH is not directed at any particular block so we don't
  1141. * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH
  1142. * by dm-core.
  1143. */
  1144. issue(cache, bio);
  1145. }
  1146. static void process_discard_bio(struct cache *cache, struct prealloc *structs,
  1147. struct bio *bio)
  1148. {
  1149. int r;
  1150. dm_dblock_t b, e;
  1151. struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
  1152. calc_discard_block_range(cache, bio, &b, &e);
  1153. if (b == e) {
  1154. bio_endio(bio, 0);
  1155. return;
  1156. }
  1157. cell_prealloc = prealloc_get_cell(structs);
  1158. r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
  1159. (cell_free_fn) prealloc_put_cell,
  1160. structs, &new_ocell);
  1161. if (r > 0)
  1162. return;
  1163. discard(cache, structs, new_ocell);
  1164. }
  1165. static bool spare_migration_bandwidth(struct cache *cache)
  1166. {
  1167. sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
  1168. cache->sectors_per_block;
  1169. return current_volume < cache->migration_threshold;
  1170. }
  1171. static void inc_hit_counter(struct cache *cache, struct bio *bio)
  1172. {
  1173. atomic_inc(bio_data_dir(bio) == READ ?
  1174. &cache->stats.read_hit : &cache->stats.write_hit);
  1175. }
  1176. static void inc_miss_counter(struct cache *cache, struct bio *bio)
  1177. {
  1178. atomic_inc(bio_data_dir(bio) == READ ?
  1179. &cache->stats.read_miss : &cache->stats.write_miss);
  1180. }
  1181. static void process_bio(struct cache *cache, struct prealloc *structs,
  1182. struct bio *bio)
  1183. {
  1184. int r;
  1185. bool release_cell = true;
  1186. dm_oblock_t block = get_bio_block(cache, bio);
  1187. struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
  1188. struct policy_result lookup_result;
  1189. bool passthrough = passthrough_mode(&cache->features);
  1190. bool discarded_block, can_migrate;
  1191. /*
  1192. * Check to see if that block is currently migrating.
  1193. */
  1194. cell_prealloc = prealloc_get_cell(structs);
  1195. r = bio_detain(cache, block, bio, cell_prealloc,
  1196. (cell_free_fn) prealloc_put_cell,
  1197. structs, &new_ocell);
  1198. if (r > 0)
  1199. return;
  1200. discarded_block = is_discarded_oblock(cache, block);
  1201. can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
  1202. r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
  1203. bio, &lookup_result);
  1204. if (r == -EWOULDBLOCK)
  1205. /* migration has been denied */
  1206. lookup_result.op = POLICY_MISS;
  1207. switch (lookup_result.op) {
  1208. case POLICY_HIT:
  1209. if (passthrough) {
  1210. inc_miss_counter(cache, bio);
  1211. /*
  1212. * Passthrough always maps to the origin,
  1213. * invalidating any cache blocks that are written
  1214. * to.
  1215. */
  1216. if (bio_data_dir(bio) == WRITE) {
  1217. atomic_inc(&cache->stats.demotion);
  1218. invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
  1219. release_cell = false;
  1220. } else {
  1221. /* FIXME: factor out issue_origin() */
  1222. remap_to_origin_clear_discard(cache, bio, block);
  1223. inc_and_issue(cache, bio, new_ocell);
  1224. }
  1225. } else {
  1226. inc_hit_counter(cache, bio);
  1227. if (bio_data_dir(bio) == WRITE &&
  1228. writethrough_mode(&cache->features) &&
  1229. !is_dirty(cache, lookup_result.cblock)) {
  1230. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  1231. inc_and_issue(cache, bio, new_ocell);
  1232. } else {
  1233. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  1234. inc_and_issue(cache, bio, new_ocell);
  1235. }
  1236. }
  1237. break;
  1238. case POLICY_MISS:
  1239. inc_miss_counter(cache, bio);
  1240. remap_to_origin_clear_discard(cache, bio, block);
  1241. inc_and_issue(cache, bio, new_ocell);
  1242. break;
  1243. case POLICY_NEW:
  1244. atomic_inc(&cache->stats.promotion);
  1245. promote(cache, structs, block, lookup_result.cblock, new_ocell);
  1246. release_cell = false;
  1247. break;
  1248. case POLICY_REPLACE:
  1249. cell_prealloc = prealloc_get_cell(structs);
  1250. r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
  1251. (cell_free_fn) prealloc_put_cell,
  1252. structs, &old_ocell);
  1253. if (r > 0) {
  1254. /*
  1255. * We have to be careful to avoid lock inversion of
  1256. * the cells. So we back off, and wait for the
  1257. * old_ocell to become free.
  1258. */
  1259. policy_force_mapping(cache->policy, block,
  1260. lookup_result.old_oblock);
  1261. atomic_inc(&cache->stats.cache_cell_clash);
  1262. break;
  1263. }
  1264. atomic_inc(&cache->stats.demotion);
  1265. atomic_inc(&cache->stats.promotion);
  1266. demote_then_promote(cache, structs, lookup_result.old_oblock,
  1267. block, lookup_result.cblock,
  1268. old_ocell, new_ocell);
  1269. release_cell = false;
  1270. break;
  1271. default:
  1272. DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
  1273. (unsigned) lookup_result.op);
  1274. bio_io_error(bio);
  1275. }
  1276. if (release_cell)
  1277. cell_defer(cache, new_ocell, false);
  1278. }
  1279. static int need_commit_due_to_time(struct cache *cache)
  1280. {
  1281. return jiffies < cache->last_commit_jiffies ||
  1282. jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
  1283. }
  1284. static int commit_if_needed(struct cache *cache)
  1285. {
  1286. int r = 0;
  1287. if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
  1288. dm_cache_changed_this_transaction(cache->cmd)) {
  1289. atomic_inc(&cache->stats.commit_count);
  1290. cache->commit_requested = false;
  1291. r = dm_cache_commit(cache->cmd, false);
  1292. cache->last_commit_jiffies = jiffies;
  1293. }
  1294. return r;
  1295. }
  1296. static void process_deferred_bios(struct cache *cache)
  1297. {
  1298. unsigned long flags;
  1299. struct bio_list bios;
  1300. struct bio *bio;
  1301. struct prealloc structs;
  1302. memset(&structs, 0, sizeof(structs));
  1303. bio_list_init(&bios);
  1304. spin_lock_irqsave(&cache->lock, flags);
  1305. bio_list_merge(&bios, &cache->deferred_bios);
  1306. bio_list_init(&cache->deferred_bios);
  1307. spin_unlock_irqrestore(&cache->lock, flags);
  1308. while (!bio_list_empty(&bios)) {
  1309. /*
  1310. * If we've got no free migration structs, and processing
  1311. * this bio might require one, we pause until there are some
  1312. * prepared mappings to process.
  1313. */
  1314. if (prealloc_data_structs(cache, &structs)) {
  1315. spin_lock_irqsave(&cache->lock, flags);
  1316. bio_list_merge(&cache->deferred_bios, &bios);
  1317. spin_unlock_irqrestore(&cache->lock, flags);
  1318. break;
  1319. }
  1320. bio = bio_list_pop(&bios);
  1321. if (bio->bi_rw & REQ_FLUSH)
  1322. process_flush_bio(cache, bio);
  1323. else if (bio->bi_rw & REQ_DISCARD)
  1324. process_discard_bio(cache, &structs, bio);
  1325. else
  1326. process_bio(cache, &structs, bio);
  1327. }
  1328. prealloc_free_structs(cache, &structs);
  1329. }
  1330. static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
  1331. {
  1332. unsigned long flags;
  1333. struct bio_list bios;
  1334. struct bio *bio;
  1335. bio_list_init(&bios);
  1336. spin_lock_irqsave(&cache->lock, flags);
  1337. bio_list_merge(&bios, &cache->deferred_flush_bios);
  1338. bio_list_init(&cache->deferred_flush_bios);
  1339. spin_unlock_irqrestore(&cache->lock, flags);
  1340. /*
  1341. * These bios have already been through inc_ds()
  1342. */
  1343. while ((bio = bio_list_pop(&bios)))
  1344. submit_bios ? generic_make_request(bio) : bio_io_error(bio);
  1345. }
  1346. static void process_deferred_writethrough_bios(struct cache *cache)
  1347. {
  1348. unsigned long flags;
  1349. struct bio_list bios;
  1350. struct bio *bio;
  1351. bio_list_init(&bios);
  1352. spin_lock_irqsave(&cache->lock, flags);
  1353. bio_list_merge(&bios, &cache->deferred_writethrough_bios);
  1354. bio_list_init(&cache->deferred_writethrough_bios);
  1355. spin_unlock_irqrestore(&cache->lock, flags);
  1356. /*
  1357. * These bios have already been through inc_ds()
  1358. */
  1359. while ((bio = bio_list_pop(&bios)))
  1360. generic_make_request(bio);
  1361. }
  1362. static void writeback_some_dirty_blocks(struct cache *cache)
  1363. {
  1364. int r = 0;
  1365. dm_oblock_t oblock;
  1366. dm_cblock_t cblock;
  1367. struct prealloc structs;
  1368. struct dm_bio_prison_cell *old_ocell;
  1369. memset(&structs, 0, sizeof(structs));
  1370. while (spare_migration_bandwidth(cache)) {
  1371. if (prealloc_data_structs(cache, &structs))
  1372. break;
  1373. r = policy_writeback_work(cache->policy, &oblock, &cblock);
  1374. if (r)
  1375. break;
  1376. r = get_cell(cache, oblock, &structs, &old_ocell);
  1377. if (r) {
  1378. policy_set_dirty(cache->policy, oblock);
  1379. break;
  1380. }
  1381. writeback(cache, &structs, oblock, cblock, old_ocell);
  1382. }
  1383. prealloc_free_structs(cache, &structs);
  1384. }
  1385. /*----------------------------------------------------------------
  1386. * Invalidations.
  1387. * Dropping something from the cache *without* writing back.
  1388. *--------------------------------------------------------------*/
  1389. static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
  1390. {
  1391. int r = 0;
  1392. uint64_t begin = from_cblock(req->cblocks->begin);
  1393. uint64_t end = from_cblock(req->cblocks->end);
  1394. while (begin != end) {
  1395. r = policy_remove_cblock(cache->policy, to_cblock(begin));
  1396. if (!r) {
  1397. r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
  1398. if (r)
  1399. break;
  1400. } else if (r == -ENODATA) {
  1401. /* harmless, already unmapped */
  1402. r = 0;
  1403. } else {
  1404. DMERR("policy_remove_cblock failed");
  1405. break;
  1406. }
  1407. begin++;
  1408. }
  1409. cache->commit_requested = true;
  1410. req->err = r;
  1411. atomic_set(&req->complete, 1);
  1412. wake_up(&req->result_wait);
  1413. }
  1414. static void process_invalidation_requests(struct cache *cache)
  1415. {
  1416. struct list_head list;
  1417. struct invalidation_request *req, *tmp;
  1418. INIT_LIST_HEAD(&list);
  1419. spin_lock(&cache->invalidation_lock);
  1420. list_splice_init(&cache->invalidation_requests, &list);
  1421. spin_unlock(&cache->invalidation_lock);
  1422. list_for_each_entry_safe (req, tmp, &list, list)
  1423. process_invalidation_request(cache, req);
  1424. }
  1425. /*----------------------------------------------------------------
  1426. * Main worker loop
  1427. *--------------------------------------------------------------*/
  1428. static bool is_quiescing(struct cache *cache)
  1429. {
  1430. return atomic_read(&cache->quiescing);
  1431. }
  1432. static void ack_quiescing(struct cache *cache)
  1433. {
  1434. if (is_quiescing(cache)) {
  1435. atomic_inc(&cache->quiescing_ack);
  1436. wake_up(&cache->quiescing_wait);
  1437. }
  1438. }
  1439. static void wait_for_quiescing_ack(struct cache *cache)
  1440. {
  1441. wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
  1442. }
  1443. static void start_quiescing(struct cache *cache)
  1444. {
  1445. atomic_inc(&cache->quiescing);
  1446. wait_for_quiescing_ack(cache);
  1447. }
  1448. static void stop_quiescing(struct cache *cache)
  1449. {
  1450. atomic_set(&cache->quiescing, 0);
  1451. atomic_set(&cache->quiescing_ack, 0);
  1452. }
  1453. static void wait_for_migrations(struct cache *cache)
  1454. {
  1455. wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
  1456. }
  1457. static void stop_worker(struct cache *cache)
  1458. {
  1459. cancel_delayed_work(&cache->waker);
  1460. flush_workqueue(cache->wq);
  1461. }
  1462. static void requeue_deferred_io(struct cache *cache)
  1463. {
  1464. struct bio *bio;
  1465. struct bio_list bios;
  1466. bio_list_init(&bios);
  1467. bio_list_merge(&bios, &cache->deferred_bios);
  1468. bio_list_init(&cache->deferred_bios);
  1469. while ((bio = bio_list_pop(&bios)))
  1470. bio_endio(bio, DM_ENDIO_REQUEUE);
  1471. }
  1472. static int more_work(struct cache *cache)
  1473. {
  1474. if (is_quiescing(cache))
  1475. return !list_empty(&cache->quiesced_migrations) ||
  1476. !list_empty(&cache->completed_migrations) ||
  1477. !list_empty(&cache->need_commit_migrations);
  1478. else
  1479. return !bio_list_empty(&cache->deferred_bios) ||
  1480. !bio_list_empty(&cache->deferred_flush_bios) ||
  1481. !bio_list_empty(&cache->deferred_writethrough_bios) ||
  1482. !list_empty(&cache->quiesced_migrations) ||
  1483. !list_empty(&cache->completed_migrations) ||
  1484. !list_empty(&cache->need_commit_migrations) ||
  1485. cache->invalidate;
  1486. }
  1487. static void do_worker(struct work_struct *ws)
  1488. {
  1489. struct cache *cache = container_of(ws, struct cache, worker);
  1490. do {
  1491. if (!is_quiescing(cache)) {
  1492. writeback_some_dirty_blocks(cache);
  1493. process_deferred_writethrough_bios(cache);
  1494. process_deferred_bios(cache);
  1495. process_invalidation_requests(cache);
  1496. }
  1497. process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard);
  1498. process_migrations(cache, &cache->completed_migrations, complete_migration);
  1499. if (commit_if_needed(cache)) {
  1500. process_deferred_flush_bios(cache, false);
  1501. process_migrations(cache, &cache->need_commit_migrations, migration_failure);
  1502. /*
  1503. * FIXME: rollback metadata or just go into a
  1504. * failure mode and error everything
  1505. */
  1506. } else {
  1507. process_deferred_flush_bios(cache, true);
  1508. process_migrations(cache, &cache->need_commit_migrations,
  1509. migration_success_post_commit);
  1510. }
  1511. ack_quiescing(cache);
  1512. } while (more_work(cache));
  1513. }
  1514. /*
  1515. * We want to commit periodically so that not too much
  1516. * unwritten metadata builds up.
  1517. */
  1518. static void do_waker(struct work_struct *ws)
  1519. {
  1520. struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
  1521. policy_tick(cache->policy);
  1522. wake_worker(cache);
  1523. queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
  1524. }
  1525. /*----------------------------------------------------------------*/
  1526. static int is_congested(struct dm_dev *dev, int bdi_bits)
  1527. {
  1528. struct request_queue *q = bdev_get_queue(dev->bdev);
  1529. return bdi_congested(&q->backing_dev_info, bdi_bits);
  1530. }
  1531. static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
  1532. {
  1533. struct cache *cache = container_of(cb, struct cache, callbacks);
  1534. return is_congested(cache->origin_dev, bdi_bits) ||
  1535. is_congested(cache->cache_dev, bdi_bits);
  1536. }
  1537. /*----------------------------------------------------------------
  1538. * Target methods
  1539. *--------------------------------------------------------------*/
  1540. /*
  1541. * This function gets called on the error paths of the constructor, so we
  1542. * have to cope with a partially initialised struct.
  1543. */
  1544. static void destroy(struct cache *cache)
  1545. {
  1546. unsigned i;
  1547. if (cache->migration_pool)
  1548. mempool_destroy(cache->migration_pool);
  1549. if (cache->all_io_ds)
  1550. dm_deferred_set_destroy(cache->all_io_ds);
  1551. if (cache->prison)
  1552. dm_bio_prison_destroy(cache->prison);
  1553. if (cache->wq)
  1554. destroy_workqueue(cache->wq);
  1555. if (cache->dirty_bitset)
  1556. free_bitset(cache->dirty_bitset);
  1557. if (cache->discard_bitset)
  1558. free_bitset(cache->discard_bitset);
  1559. if (cache->copier)
  1560. dm_kcopyd_client_destroy(cache->copier);
  1561. if (cache->cmd)
  1562. dm_cache_metadata_close(cache->cmd);
  1563. if (cache->metadata_dev)
  1564. dm_put_device(cache->ti, cache->metadata_dev);
  1565. if (cache->origin_dev)
  1566. dm_put_device(cache->ti, cache->origin_dev);
  1567. if (cache->cache_dev)
  1568. dm_put_device(cache->ti, cache->cache_dev);
  1569. if (cache->policy)
  1570. dm_cache_policy_destroy(cache->policy);
  1571. for (i = 0; i < cache->nr_ctr_args ; i++)
  1572. kfree(cache->ctr_args[i]);
  1573. kfree(cache->ctr_args);
  1574. kfree(cache);
  1575. }
  1576. static void cache_dtr(struct dm_target *ti)
  1577. {
  1578. struct cache *cache = ti->private;
  1579. destroy(cache);
  1580. }
  1581. static sector_t get_dev_size(struct dm_dev *dev)
  1582. {
  1583. return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
  1584. }
  1585. /*----------------------------------------------------------------*/
  1586. /*
  1587. * Construct a cache device mapping.
  1588. *
  1589. * cache <metadata dev> <cache dev> <origin dev> <block size>
  1590. * <#feature args> [<feature arg>]*
  1591. * <policy> <#policy args> [<policy arg>]*
  1592. *
  1593. * metadata dev : fast device holding the persistent metadata
  1594. * cache dev : fast device holding cached data blocks
  1595. * origin dev : slow device holding original data blocks
  1596. * block size : cache unit size in sectors
  1597. *
  1598. * #feature args : number of feature arguments passed
  1599. * feature args : writethrough. (The default is writeback.)
  1600. *
  1601. * policy : the replacement policy to use
  1602. * #policy args : an even number of policy arguments corresponding
  1603. * to key/value pairs passed to the policy
  1604. * policy args : key/value pairs passed to the policy
  1605. * E.g. 'sequential_threshold 1024'
  1606. * See cache-policies.txt for details.
  1607. *
  1608. * Optional feature arguments are:
  1609. * writethrough : write through caching that prohibits cache block
  1610. * content from being different from origin block content.
  1611. * Without this argument, the default behaviour is to write
  1612. * back cache block contents later for performance reasons,
  1613. * so they may differ from the corresponding origin blocks.
  1614. */
  1615. struct cache_args {
  1616. struct dm_target *ti;
  1617. struct dm_dev *metadata_dev;
  1618. struct dm_dev *cache_dev;
  1619. sector_t cache_sectors;
  1620. struct dm_dev *origin_dev;
  1621. sector_t origin_sectors;
  1622. uint32_t block_size;
  1623. const char *policy_name;
  1624. int policy_argc;
  1625. const char **policy_argv;
  1626. struct cache_features features;
  1627. };
  1628. static void destroy_cache_args(struct cache_args *ca)
  1629. {
  1630. if (ca->metadata_dev)
  1631. dm_put_device(ca->ti, ca->metadata_dev);
  1632. if (ca->cache_dev)
  1633. dm_put_device(ca->ti, ca->cache_dev);
  1634. if (ca->origin_dev)
  1635. dm_put_device(ca->ti, ca->origin_dev);
  1636. kfree(ca);
  1637. }
  1638. static bool at_least_one_arg(struct dm_arg_set *as, char **error)
  1639. {
  1640. if (!as->argc) {
  1641. *error = "Insufficient args";
  1642. return false;
  1643. }
  1644. return true;
  1645. }
  1646. static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
  1647. char **error)
  1648. {
  1649. int r;
  1650. sector_t metadata_dev_size;
  1651. char b[BDEVNAME_SIZE];
  1652. if (!at_least_one_arg(as, error))
  1653. return -EINVAL;
  1654. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1655. &ca->metadata_dev);
  1656. if (r) {
  1657. *error = "Error opening metadata device";
  1658. return r;
  1659. }
  1660. metadata_dev_size = get_dev_size(ca->metadata_dev);
  1661. if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
  1662. DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
  1663. bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
  1664. return 0;
  1665. }
  1666. static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
  1667. char **error)
  1668. {
  1669. int r;
  1670. if (!at_least_one_arg(as, error))
  1671. return -EINVAL;
  1672. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1673. &ca->cache_dev);
  1674. if (r) {
  1675. *error = "Error opening cache device";
  1676. return r;
  1677. }
  1678. ca->cache_sectors = get_dev_size(ca->cache_dev);
  1679. return 0;
  1680. }
  1681. static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
  1682. char **error)
  1683. {
  1684. int r;
  1685. if (!at_least_one_arg(as, error))
  1686. return -EINVAL;
  1687. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1688. &ca->origin_dev);
  1689. if (r) {
  1690. *error = "Error opening origin device";
  1691. return r;
  1692. }
  1693. ca->origin_sectors = get_dev_size(ca->origin_dev);
  1694. if (ca->ti->len > ca->origin_sectors) {
  1695. *error = "Device size larger than cached device";
  1696. return -EINVAL;
  1697. }
  1698. return 0;
  1699. }
  1700. static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
  1701. char **error)
  1702. {
  1703. unsigned long block_size;
  1704. if (!at_least_one_arg(as, error))
  1705. return -EINVAL;
  1706. if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
  1707. block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
  1708. block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
  1709. block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
  1710. *error = "Invalid data block size";
  1711. return -EINVAL;
  1712. }
  1713. if (block_size > ca->cache_sectors) {
  1714. *error = "Data block size is larger than the cache device";
  1715. return -EINVAL;
  1716. }
  1717. ca->block_size = block_size;
  1718. return 0;
  1719. }
  1720. static void init_features(struct cache_features *cf)
  1721. {
  1722. cf->mode = CM_WRITE;
  1723. cf->io_mode = CM_IO_WRITEBACK;
  1724. }
  1725. static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
  1726. char **error)
  1727. {
  1728. static struct dm_arg _args[] = {
  1729. {0, 1, "Invalid number of cache feature arguments"},
  1730. };
  1731. int r;
  1732. unsigned argc;
  1733. const char *arg;
  1734. struct cache_features *cf = &ca->features;
  1735. init_features(cf);
  1736. r = dm_read_arg_group(_args, as, &argc, error);
  1737. if (r)
  1738. return -EINVAL;
  1739. while (argc--) {
  1740. arg = dm_shift_arg(as);
  1741. if (!strcasecmp(arg, "writeback"))
  1742. cf->io_mode = CM_IO_WRITEBACK;
  1743. else if (!strcasecmp(arg, "writethrough"))
  1744. cf->io_mode = CM_IO_WRITETHROUGH;
  1745. else if (!strcasecmp(arg, "passthrough"))
  1746. cf->io_mode = CM_IO_PASSTHROUGH;
  1747. else {
  1748. *error = "Unrecognised cache feature requested";
  1749. return -EINVAL;
  1750. }
  1751. }
  1752. return 0;
  1753. }
  1754. static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
  1755. char **error)
  1756. {
  1757. static struct dm_arg _args[] = {
  1758. {0, 1024, "Invalid number of policy arguments"},
  1759. };
  1760. int r;
  1761. if (!at_least_one_arg(as, error))
  1762. return -EINVAL;
  1763. ca->policy_name = dm_shift_arg(as);
  1764. r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
  1765. if (r)
  1766. return -EINVAL;
  1767. ca->policy_argv = (const char **)as->argv;
  1768. dm_consume_args(as, ca->policy_argc);
  1769. return 0;
  1770. }
  1771. static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
  1772. char **error)
  1773. {
  1774. int r;
  1775. struct dm_arg_set as;
  1776. as.argc = argc;
  1777. as.argv = argv;
  1778. r = parse_metadata_dev(ca, &as, error);
  1779. if (r)
  1780. return r;
  1781. r = parse_cache_dev(ca, &as, error);
  1782. if (r)
  1783. return r;
  1784. r = parse_origin_dev(ca, &as, error);
  1785. if (r)
  1786. return r;
  1787. r = parse_block_size(ca, &as, error);
  1788. if (r)
  1789. return r;
  1790. r = parse_features(ca, &as, error);
  1791. if (r)
  1792. return r;
  1793. r = parse_policy(ca, &as, error);
  1794. if (r)
  1795. return r;
  1796. return 0;
  1797. }
  1798. /*----------------------------------------------------------------*/
  1799. static struct kmem_cache *migration_cache;
  1800. #define NOT_CORE_OPTION 1
  1801. static int process_config_option(struct cache *cache, const char *key, const char *value)
  1802. {
  1803. unsigned long tmp;
  1804. if (!strcasecmp(key, "migration_threshold")) {
  1805. if (kstrtoul(value, 10, &tmp))
  1806. return -EINVAL;
  1807. cache->migration_threshold = tmp;
  1808. return 0;
  1809. }
  1810. return NOT_CORE_OPTION;
  1811. }
  1812. static int set_config_value(struct cache *cache, const char *key, const char *value)
  1813. {
  1814. int r = process_config_option(cache, key, value);
  1815. if (r == NOT_CORE_OPTION)
  1816. r = policy_set_config_value(cache->policy, key, value);
  1817. if (r)
  1818. DMWARN("bad config value for %s: %s", key, value);
  1819. return r;
  1820. }
  1821. static int set_config_values(struct cache *cache, int argc, const char **argv)
  1822. {
  1823. int r = 0;
  1824. if (argc & 1) {
  1825. DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
  1826. return -EINVAL;
  1827. }
  1828. while (argc) {
  1829. r = set_config_value(cache, argv[0], argv[1]);
  1830. if (r)
  1831. break;
  1832. argc -= 2;
  1833. argv += 2;
  1834. }
  1835. return r;
  1836. }
  1837. static int create_cache_policy(struct cache *cache, struct cache_args *ca,
  1838. char **error)
  1839. {
  1840. struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
  1841. cache->cache_size,
  1842. cache->origin_sectors,
  1843. cache->sectors_per_block);
  1844. if (IS_ERR(p)) {
  1845. *error = "Error creating cache's policy";
  1846. return PTR_ERR(p);
  1847. }
  1848. cache->policy = p;
  1849. return 0;
  1850. }
  1851. /*
  1852. * We want the discard block size to be at least the size of the cache
  1853. * block size and have no more than 2^14 discard blocks across the origin.
  1854. */
  1855. #define MAX_DISCARD_BLOCKS (1 << 14)
  1856. static bool too_many_discard_blocks(sector_t discard_block_size,
  1857. sector_t origin_size)
  1858. {
  1859. (void) sector_div(origin_size, discard_block_size);
  1860. return origin_size > MAX_DISCARD_BLOCKS;
  1861. }
  1862. static sector_t calculate_discard_block_size(sector_t cache_block_size,
  1863. sector_t origin_size)
  1864. {
  1865. sector_t discard_block_size = cache_block_size;
  1866. if (origin_size)
  1867. while (too_many_discard_blocks(discard_block_size, origin_size))
  1868. discard_block_size *= 2;
  1869. return discard_block_size;
  1870. }
  1871. static void set_cache_size(struct cache *cache, dm_cblock_t size)
  1872. {
  1873. dm_block_t nr_blocks = from_cblock(size);
  1874. if (nr_blocks > (1 << 20) && cache->cache_size != size)
  1875. DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
  1876. "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
  1877. "Please consider increasing the cache block size to reduce the overall cache block count.",
  1878. (unsigned long long) nr_blocks);
  1879. cache->cache_size = size;
  1880. }
  1881. #define DEFAULT_MIGRATION_THRESHOLD 2048
  1882. static int cache_create(struct cache_args *ca, struct cache **result)
  1883. {
  1884. int r = 0;
  1885. char **error = &ca->ti->error;
  1886. struct cache *cache;
  1887. struct dm_target *ti = ca->ti;
  1888. dm_block_t origin_blocks;
  1889. struct dm_cache_metadata *cmd;
  1890. bool may_format = ca->features.mode == CM_WRITE;
  1891. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  1892. if (!cache)
  1893. return -ENOMEM;
  1894. cache->ti = ca->ti;
  1895. ti->private = cache;
  1896. ti->num_flush_bios = 2;
  1897. ti->flush_supported = true;
  1898. ti->num_discard_bios = 1;
  1899. ti->discards_supported = true;
  1900. ti->discard_zeroes_data_unsupported = true;
  1901. ti->split_discard_bios = false;
  1902. cache->features = ca->features;
  1903. ti->per_bio_data_size = get_per_bio_data_size(cache);
  1904. cache->callbacks.congested_fn = cache_is_congested;
  1905. dm_table_add_target_callbacks(ti->table, &cache->callbacks);
  1906. cache->metadata_dev = ca->metadata_dev;
  1907. cache->origin_dev = ca->origin_dev;
  1908. cache->cache_dev = ca->cache_dev;
  1909. ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
  1910. /* FIXME: factor out this whole section */
  1911. origin_blocks = cache->origin_sectors = ca->origin_sectors;
  1912. origin_blocks = block_div(origin_blocks, ca->block_size);
  1913. cache->origin_blocks = to_oblock(origin_blocks);
  1914. cache->sectors_per_block = ca->block_size;
  1915. if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
  1916. r = -EINVAL;
  1917. goto bad;
  1918. }
  1919. if (ca->block_size & (ca->block_size - 1)) {
  1920. dm_block_t cache_size = ca->cache_sectors;
  1921. cache->sectors_per_block_shift = -1;
  1922. cache_size = block_div(cache_size, ca->block_size);
  1923. set_cache_size(cache, to_cblock(cache_size));
  1924. } else {
  1925. cache->sectors_per_block_shift = __ffs(ca->block_size);
  1926. set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
  1927. }
  1928. r = create_cache_policy(cache, ca, error);
  1929. if (r)
  1930. goto bad;
  1931. cache->policy_nr_args = ca->policy_argc;
  1932. cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
  1933. r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
  1934. if (r) {
  1935. *error = "Error setting cache policy's config values";
  1936. goto bad;
  1937. }
  1938. cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
  1939. ca->block_size, may_format,
  1940. dm_cache_policy_get_hint_size(cache->policy));
  1941. if (IS_ERR(cmd)) {
  1942. *error = "Error creating metadata object";
  1943. r = PTR_ERR(cmd);
  1944. goto bad;
  1945. }
  1946. cache->cmd = cmd;
  1947. if (passthrough_mode(&cache->features)) {
  1948. bool all_clean;
  1949. r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
  1950. if (r) {
  1951. *error = "dm_cache_metadata_all_clean() failed";
  1952. goto bad;
  1953. }
  1954. if (!all_clean) {
  1955. *error = "Cannot enter passthrough mode unless all blocks are clean";
  1956. r = -EINVAL;
  1957. goto bad;
  1958. }
  1959. }
  1960. spin_lock_init(&cache->lock);
  1961. bio_list_init(&cache->deferred_bios);
  1962. bio_list_init(&cache->deferred_flush_bios);
  1963. bio_list_init(&cache->deferred_writethrough_bios);
  1964. INIT_LIST_HEAD(&cache->quiesced_migrations);
  1965. INIT_LIST_HEAD(&cache->completed_migrations);
  1966. INIT_LIST_HEAD(&cache->need_commit_migrations);
  1967. atomic_set(&cache->nr_allocated_migrations, 0);
  1968. atomic_set(&cache->nr_io_migrations, 0);
  1969. init_waitqueue_head(&cache->migration_wait);
  1970. init_waitqueue_head(&cache->quiescing_wait);
  1971. atomic_set(&cache->quiescing, 0);
  1972. atomic_set(&cache->quiescing_ack, 0);
  1973. r = -ENOMEM;
  1974. atomic_set(&cache->nr_dirty, 0);
  1975. cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
  1976. if (!cache->dirty_bitset) {
  1977. *error = "could not allocate dirty bitset";
  1978. goto bad;
  1979. }
  1980. clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
  1981. cache->discard_block_size =
  1982. calculate_discard_block_size(cache->sectors_per_block,
  1983. cache->origin_sectors);
  1984. cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
  1985. cache->discard_block_size));
  1986. cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
  1987. if (!cache->discard_bitset) {
  1988. *error = "could not allocate discard bitset";
  1989. goto bad;
  1990. }
  1991. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  1992. cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  1993. if (IS_ERR(cache->copier)) {
  1994. *error = "could not create kcopyd client";
  1995. r = PTR_ERR(cache->copier);
  1996. goto bad;
  1997. }
  1998. cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
  1999. if (!cache->wq) {
  2000. *error = "could not create workqueue for metadata object";
  2001. goto bad;
  2002. }
  2003. INIT_WORK(&cache->worker, do_worker);
  2004. INIT_DELAYED_WORK(&cache->waker, do_waker);
  2005. cache->last_commit_jiffies = jiffies;
  2006. cache->prison = dm_bio_prison_create();
  2007. if (!cache->prison) {
  2008. *error = "could not create bio prison";
  2009. goto bad;
  2010. }
  2011. cache->all_io_ds = dm_deferred_set_create();
  2012. if (!cache->all_io_ds) {
  2013. *error = "could not create all_io deferred set";
  2014. goto bad;
  2015. }
  2016. cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
  2017. migration_cache);
  2018. if (!cache->migration_pool) {
  2019. *error = "Error creating cache's migration mempool";
  2020. goto bad;
  2021. }
  2022. cache->need_tick_bio = true;
  2023. cache->sized = false;
  2024. cache->invalidate = false;
  2025. cache->commit_requested = false;
  2026. cache->loaded_mappings = false;
  2027. cache->loaded_discards = false;
  2028. load_stats(cache);
  2029. atomic_set(&cache->stats.demotion, 0);
  2030. atomic_set(&cache->stats.promotion, 0);
  2031. atomic_set(&cache->stats.copies_avoided, 0);
  2032. atomic_set(&cache->stats.cache_cell_clash, 0);
  2033. atomic_set(&cache->stats.commit_count, 0);
  2034. atomic_set(&cache->stats.discard_count, 0);
  2035. spin_lock_init(&cache->invalidation_lock);
  2036. INIT_LIST_HEAD(&cache->invalidation_requests);
  2037. *result = cache;
  2038. return 0;
  2039. bad:
  2040. destroy(cache);
  2041. return r;
  2042. }
  2043. static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
  2044. {
  2045. unsigned i;
  2046. const char **copy;
  2047. copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
  2048. if (!copy)
  2049. return -ENOMEM;
  2050. for (i = 0; i < argc; i++) {
  2051. copy[i] = kstrdup(argv[i], GFP_KERNEL);
  2052. if (!copy[i]) {
  2053. while (i--)
  2054. kfree(copy[i]);
  2055. kfree(copy);
  2056. return -ENOMEM;
  2057. }
  2058. }
  2059. cache->nr_ctr_args = argc;
  2060. cache->ctr_args = copy;
  2061. return 0;
  2062. }
  2063. static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
  2064. {
  2065. int r = -EINVAL;
  2066. struct cache_args *ca;
  2067. struct cache *cache = NULL;
  2068. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  2069. if (!ca) {
  2070. ti->error = "Error allocating memory for cache";
  2071. return -ENOMEM;
  2072. }
  2073. ca->ti = ti;
  2074. r = parse_cache_args(ca, argc, argv, &ti->error);
  2075. if (r)
  2076. goto out;
  2077. r = cache_create(ca, &cache);
  2078. if (r)
  2079. goto out;
  2080. r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
  2081. if (r) {
  2082. destroy(cache);
  2083. goto out;
  2084. }
  2085. ti->private = cache;
  2086. out:
  2087. destroy_cache_args(ca);
  2088. return r;
  2089. }
  2090. static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell)
  2091. {
  2092. int r;
  2093. dm_oblock_t block = get_bio_block(cache, bio);
  2094. size_t pb_data_size = get_per_bio_data_size(cache);
  2095. bool can_migrate = false;
  2096. bool discarded_block;
  2097. struct policy_result lookup_result;
  2098. struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
  2099. if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
  2100. /*
  2101. * This can only occur if the io goes to a partial block at
  2102. * the end of the origin device. We don't cache these.
  2103. * Just remap to the origin and carry on.
  2104. */
  2105. remap_to_origin(cache, bio);
  2106. return DM_MAPIO_REMAPPED;
  2107. }
  2108. if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
  2109. defer_bio(cache, bio);
  2110. return DM_MAPIO_SUBMITTED;
  2111. }
  2112. /*
  2113. * Check to see if that block is currently migrating.
  2114. */
  2115. *cell = alloc_prison_cell(cache);
  2116. if (!*cell) {
  2117. defer_bio(cache, bio);
  2118. return DM_MAPIO_SUBMITTED;
  2119. }
  2120. r = bio_detain(cache, block, bio, *cell,
  2121. (cell_free_fn) free_prison_cell,
  2122. cache, cell);
  2123. if (r) {
  2124. if (r < 0)
  2125. defer_bio(cache, bio);
  2126. return DM_MAPIO_SUBMITTED;
  2127. }
  2128. discarded_block = is_discarded_oblock(cache, block);
  2129. r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
  2130. bio, &lookup_result);
  2131. if (r == -EWOULDBLOCK) {
  2132. cell_defer(cache, *cell, true);
  2133. return DM_MAPIO_SUBMITTED;
  2134. } else if (r) {
  2135. DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
  2136. cell_defer(cache, *cell, false);
  2137. bio_io_error(bio);
  2138. return DM_MAPIO_SUBMITTED;
  2139. }
  2140. r = DM_MAPIO_REMAPPED;
  2141. switch (lookup_result.op) {
  2142. case POLICY_HIT:
  2143. if (passthrough_mode(&cache->features)) {
  2144. if (bio_data_dir(bio) == WRITE) {
  2145. /*
  2146. * We need to invalidate this block, so
  2147. * defer for the worker thread.
  2148. */
  2149. cell_defer(cache, *cell, true);
  2150. r = DM_MAPIO_SUBMITTED;
  2151. } else {
  2152. inc_miss_counter(cache, bio);
  2153. remap_to_origin_clear_discard(cache, bio, block);
  2154. }
  2155. } else {
  2156. inc_hit_counter(cache, bio);
  2157. if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
  2158. !is_dirty(cache, lookup_result.cblock))
  2159. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  2160. else
  2161. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  2162. }
  2163. break;
  2164. case POLICY_MISS:
  2165. inc_miss_counter(cache, bio);
  2166. if (pb->req_nr != 0) {
  2167. /*
  2168. * This is a duplicate writethrough io that is no
  2169. * longer needed because the block has been demoted.
  2170. */
  2171. bio_endio(bio, 0);
  2172. cell_defer(cache, *cell, false);
  2173. r = DM_MAPIO_SUBMITTED;
  2174. } else
  2175. remap_to_origin_clear_discard(cache, bio, block);
  2176. break;
  2177. default:
  2178. DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
  2179. (unsigned) lookup_result.op);
  2180. cell_defer(cache, *cell, false);
  2181. bio_io_error(bio);
  2182. r = DM_MAPIO_SUBMITTED;
  2183. }
  2184. return r;
  2185. }
  2186. static int cache_map(struct dm_target *ti, struct bio *bio)
  2187. {
  2188. int r;
  2189. struct dm_bio_prison_cell *cell = NULL;
  2190. struct cache *cache = ti->private;
  2191. r = __cache_map(cache, bio, &cell);
  2192. if (r == DM_MAPIO_REMAPPED && cell) {
  2193. inc_ds(cache, bio, cell);
  2194. cell_defer(cache, cell, false);
  2195. }
  2196. return r;
  2197. }
  2198. static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
  2199. {
  2200. struct cache *cache = ti->private;
  2201. unsigned long flags;
  2202. size_t pb_data_size = get_per_bio_data_size(cache);
  2203. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  2204. if (pb->tick) {
  2205. policy_tick(cache->policy);
  2206. spin_lock_irqsave(&cache->lock, flags);
  2207. cache->need_tick_bio = true;
  2208. spin_unlock_irqrestore(&cache->lock, flags);
  2209. }
  2210. check_for_quiesced_migrations(cache, pb);
  2211. return 0;
  2212. }
  2213. static int write_dirty_bitset(struct cache *cache)
  2214. {
  2215. unsigned i, r;
  2216. for (i = 0; i < from_cblock(cache->cache_size); i++) {
  2217. r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
  2218. is_dirty(cache, to_cblock(i)));
  2219. if (r)
  2220. return r;
  2221. }
  2222. return 0;
  2223. }
  2224. static int write_discard_bitset(struct cache *cache)
  2225. {
  2226. unsigned i, r;
  2227. r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
  2228. cache->discard_nr_blocks);
  2229. if (r) {
  2230. DMERR("could not resize on-disk discard bitset");
  2231. return r;
  2232. }
  2233. for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
  2234. r = dm_cache_set_discard(cache->cmd, to_dblock(i),
  2235. is_discarded(cache, to_dblock(i)));
  2236. if (r)
  2237. return r;
  2238. }
  2239. return 0;
  2240. }
  2241. /*
  2242. * returns true on success
  2243. */
  2244. static bool sync_metadata(struct cache *cache)
  2245. {
  2246. int r1, r2, r3, r4;
  2247. r1 = write_dirty_bitset(cache);
  2248. if (r1)
  2249. DMERR("could not write dirty bitset");
  2250. r2 = write_discard_bitset(cache);
  2251. if (r2)
  2252. DMERR("could not write discard bitset");
  2253. save_stats(cache);
  2254. r3 = dm_cache_write_hints(cache->cmd, cache->policy);
  2255. if (r3)
  2256. DMERR("could not write hints");
  2257. /*
  2258. * If writing the above metadata failed, we still commit, but don't
  2259. * set the clean shutdown flag. This will effectively force every
  2260. * dirty bit to be set on reload.
  2261. */
  2262. r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
  2263. if (r4)
  2264. DMERR("could not write cache metadata. Data loss may occur.");
  2265. return !r1 && !r2 && !r3 && !r4;
  2266. }
  2267. static void cache_postsuspend(struct dm_target *ti)
  2268. {
  2269. struct cache *cache = ti->private;
  2270. start_quiescing(cache);
  2271. wait_for_migrations(cache);
  2272. stop_worker(cache);
  2273. requeue_deferred_io(cache);
  2274. stop_quiescing(cache);
  2275. (void) sync_metadata(cache);
  2276. }
  2277. static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
  2278. bool dirty, uint32_t hint, bool hint_valid)
  2279. {
  2280. int r;
  2281. struct cache *cache = context;
  2282. r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
  2283. if (r)
  2284. return r;
  2285. if (dirty)
  2286. set_dirty(cache, oblock, cblock);
  2287. else
  2288. clear_dirty(cache, oblock, cblock);
  2289. return 0;
  2290. }
  2291. /*
  2292. * The discard block size in the on disk metadata is not
  2293. * neccessarily the same as we're currently using. So we have to
  2294. * be careful to only set the discarded attribute if we know it
  2295. * covers a complete block of the new size.
  2296. */
  2297. struct discard_load_info {
  2298. struct cache *cache;
  2299. /*
  2300. * These blocks are sized using the on disk dblock size, rather
  2301. * than the current one.
  2302. */
  2303. dm_block_t block_size;
  2304. dm_block_t discard_begin, discard_end;
  2305. };
  2306. static void discard_load_info_init(struct cache *cache,
  2307. struct discard_load_info *li)
  2308. {
  2309. li->cache = cache;
  2310. li->discard_begin = li->discard_end = 0;
  2311. }
  2312. static void set_discard_range(struct discard_load_info *li)
  2313. {
  2314. sector_t b, e;
  2315. if (li->discard_begin == li->discard_end)
  2316. return;
  2317. /*
  2318. * Convert to sectors.
  2319. */
  2320. b = li->discard_begin * li->block_size;
  2321. e = li->discard_end * li->block_size;
  2322. /*
  2323. * Then convert back to the current dblock size.
  2324. */
  2325. b = dm_sector_div_up(b, li->cache->discard_block_size);
  2326. sector_div(e, li->cache->discard_block_size);
  2327. /*
  2328. * The origin may have shrunk, so we need to check we're still in
  2329. * bounds.
  2330. */
  2331. if (e > from_dblock(li->cache->discard_nr_blocks))
  2332. e = from_dblock(li->cache->discard_nr_blocks);
  2333. for (; b < e; b++)
  2334. set_discard(li->cache, to_dblock(b));
  2335. }
  2336. static int load_discard(void *context, sector_t discard_block_size,
  2337. dm_dblock_t dblock, bool discard)
  2338. {
  2339. struct discard_load_info *li = context;
  2340. li->block_size = discard_block_size;
  2341. if (discard) {
  2342. if (from_dblock(dblock) == li->discard_end)
  2343. /*
  2344. * We're already in a discard range, just extend it.
  2345. */
  2346. li->discard_end = li->discard_end + 1ULL;
  2347. else {
  2348. /*
  2349. * Emit the old range and start a new one.
  2350. */
  2351. set_discard_range(li);
  2352. li->discard_begin = from_dblock(dblock);
  2353. li->discard_end = li->discard_begin + 1ULL;
  2354. }
  2355. } else {
  2356. set_discard_range(li);
  2357. li->discard_begin = li->discard_end = 0;
  2358. }
  2359. return 0;
  2360. }
  2361. static dm_cblock_t get_cache_dev_size(struct cache *cache)
  2362. {
  2363. sector_t size = get_dev_size(cache->cache_dev);
  2364. (void) sector_div(size, cache->sectors_per_block);
  2365. return to_cblock(size);
  2366. }
  2367. static bool can_resize(struct cache *cache, dm_cblock_t new_size)
  2368. {
  2369. if (from_cblock(new_size) > from_cblock(cache->cache_size))
  2370. return true;
  2371. /*
  2372. * We can't drop a dirty block when shrinking the cache.
  2373. */
  2374. while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
  2375. new_size = to_cblock(from_cblock(new_size) + 1);
  2376. if (is_dirty(cache, new_size)) {
  2377. DMERR("unable to shrink cache; cache block %llu is dirty",
  2378. (unsigned long long) from_cblock(new_size));
  2379. return false;
  2380. }
  2381. }
  2382. return true;
  2383. }
  2384. static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
  2385. {
  2386. int r;
  2387. r = dm_cache_resize(cache->cmd, new_size);
  2388. if (r) {
  2389. DMERR("could not resize cache metadata");
  2390. return r;
  2391. }
  2392. set_cache_size(cache, new_size);
  2393. return 0;
  2394. }
  2395. static int cache_preresume(struct dm_target *ti)
  2396. {
  2397. int r = 0;
  2398. struct cache *cache = ti->private;
  2399. dm_cblock_t csize = get_cache_dev_size(cache);
  2400. /*
  2401. * Check to see if the cache has resized.
  2402. */
  2403. if (!cache->sized) {
  2404. r = resize_cache_dev(cache, csize);
  2405. if (r)
  2406. return r;
  2407. cache->sized = true;
  2408. } else if (csize != cache->cache_size) {
  2409. if (!can_resize(cache, csize))
  2410. return -EINVAL;
  2411. r = resize_cache_dev(cache, csize);
  2412. if (r)
  2413. return r;
  2414. }
  2415. if (!cache->loaded_mappings) {
  2416. r = dm_cache_load_mappings(cache->cmd, cache->policy,
  2417. load_mapping, cache);
  2418. if (r) {
  2419. DMERR("could not load cache mappings");
  2420. return r;
  2421. }
  2422. cache->loaded_mappings = true;
  2423. }
  2424. if (!cache->loaded_discards) {
  2425. struct discard_load_info li;
  2426. /*
  2427. * The discard bitset could have been resized, or the
  2428. * discard block size changed. To be safe we start by
  2429. * setting every dblock to not discarded.
  2430. */
  2431. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  2432. discard_load_info_init(cache, &li);
  2433. r = dm_cache_load_discards(cache->cmd, load_discard, &li);
  2434. if (r) {
  2435. DMERR("could not load origin discards");
  2436. return r;
  2437. }
  2438. set_discard_range(&li);
  2439. cache->loaded_discards = true;
  2440. }
  2441. return r;
  2442. }
  2443. static void cache_resume(struct dm_target *ti)
  2444. {
  2445. struct cache *cache = ti->private;
  2446. cache->need_tick_bio = true;
  2447. do_waker(&cache->waker.work);
  2448. }
  2449. /*
  2450. * Status format:
  2451. *
  2452. * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
  2453. * <cache block size> <#used cache blocks>/<#total cache blocks>
  2454. * <#read hits> <#read misses> <#write hits> <#write misses>
  2455. * <#demotions> <#promotions> <#dirty>
  2456. * <#features> <features>*
  2457. * <#core args> <core args>
  2458. * <policy name> <#policy args> <policy args>*
  2459. */
  2460. static void cache_status(struct dm_target *ti, status_type_t type,
  2461. unsigned status_flags, char *result, unsigned maxlen)
  2462. {
  2463. int r = 0;
  2464. unsigned i;
  2465. ssize_t sz = 0;
  2466. dm_block_t nr_free_blocks_metadata = 0;
  2467. dm_block_t nr_blocks_metadata = 0;
  2468. char buf[BDEVNAME_SIZE];
  2469. struct cache *cache = ti->private;
  2470. dm_cblock_t residency;
  2471. switch (type) {
  2472. case STATUSTYPE_INFO:
  2473. /* Commit to ensure statistics aren't out-of-date */
  2474. if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
  2475. r = dm_cache_commit(cache->cmd, false);
  2476. if (r)
  2477. DMERR("could not commit metadata for accurate status");
  2478. }
  2479. r = dm_cache_get_free_metadata_block_count(cache->cmd,
  2480. &nr_free_blocks_metadata);
  2481. if (r) {
  2482. DMERR("could not get metadata free block count");
  2483. goto err;
  2484. }
  2485. r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
  2486. if (r) {
  2487. DMERR("could not get metadata device size");
  2488. goto err;
  2489. }
  2490. residency = policy_residency(cache->policy);
  2491. DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
  2492. (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
  2493. (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
  2494. (unsigned long long)nr_blocks_metadata,
  2495. cache->sectors_per_block,
  2496. (unsigned long long) from_cblock(residency),
  2497. (unsigned long long) from_cblock(cache->cache_size),
  2498. (unsigned) atomic_read(&cache->stats.read_hit),
  2499. (unsigned) atomic_read(&cache->stats.read_miss),
  2500. (unsigned) atomic_read(&cache->stats.write_hit),
  2501. (unsigned) atomic_read(&cache->stats.write_miss),
  2502. (unsigned) atomic_read(&cache->stats.demotion),
  2503. (unsigned) atomic_read(&cache->stats.promotion),
  2504. (unsigned long) atomic_read(&cache->nr_dirty));
  2505. if (writethrough_mode(&cache->features))
  2506. DMEMIT("1 writethrough ");
  2507. else if (passthrough_mode(&cache->features))
  2508. DMEMIT("1 passthrough ");
  2509. else if (writeback_mode(&cache->features))
  2510. DMEMIT("1 writeback ");
  2511. else {
  2512. DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
  2513. goto err;
  2514. }
  2515. DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
  2516. DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
  2517. if (sz < maxlen) {
  2518. r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
  2519. if (r)
  2520. DMERR("policy_emit_config_values returned %d", r);
  2521. }
  2522. break;
  2523. case STATUSTYPE_TABLE:
  2524. format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
  2525. DMEMIT("%s ", buf);
  2526. format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
  2527. DMEMIT("%s ", buf);
  2528. format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
  2529. DMEMIT("%s", buf);
  2530. for (i = 0; i < cache->nr_ctr_args - 1; i++)
  2531. DMEMIT(" %s", cache->ctr_args[i]);
  2532. if (cache->nr_ctr_args)
  2533. DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
  2534. }
  2535. return;
  2536. err:
  2537. DMEMIT("Error");
  2538. }
  2539. /*
  2540. * A cache block range can take two forms:
  2541. *
  2542. * i) A single cblock, eg. '3456'
  2543. * ii) A begin and end cblock with dots between, eg. 123-234
  2544. */
  2545. static int parse_cblock_range(struct cache *cache, const char *str,
  2546. struct cblock_range *result)
  2547. {
  2548. char dummy;
  2549. uint64_t b, e;
  2550. int r;
  2551. /*
  2552. * Try and parse form (ii) first.
  2553. */
  2554. r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
  2555. if (r < 0)
  2556. return r;
  2557. if (r == 2) {
  2558. result->begin = to_cblock(b);
  2559. result->end = to_cblock(e);
  2560. return 0;
  2561. }
  2562. /*
  2563. * That didn't work, try form (i).
  2564. */
  2565. r = sscanf(str, "%llu%c", &b, &dummy);
  2566. if (r < 0)
  2567. return r;
  2568. if (r == 1) {
  2569. result->begin = to_cblock(b);
  2570. result->end = to_cblock(from_cblock(result->begin) + 1u);
  2571. return 0;
  2572. }
  2573. DMERR("invalid cblock range '%s'", str);
  2574. return -EINVAL;
  2575. }
  2576. static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
  2577. {
  2578. uint64_t b = from_cblock(range->begin);
  2579. uint64_t e = from_cblock(range->end);
  2580. uint64_t n = from_cblock(cache->cache_size);
  2581. if (b >= n) {
  2582. DMERR("begin cblock out of range: %llu >= %llu", b, n);
  2583. return -EINVAL;
  2584. }
  2585. if (e > n) {
  2586. DMERR("end cblock out of range: %llu > %llu", e, n);
  2587. return -EINVAL;
  2588. }
  2589. if (b >= e) {
  2590. DMERR("invalid cblock range: %llu >= %llu", b, e);
  2591. return -EINVAL;
  2592. }
  2593. return 0;
  2594. }
  2595. static int request_invalidation(struct cache *cache, struct cblock_range *range)
  2596. {
  2597. struct invalidation_request req;
  2598. INIT_LIST_HEAD(&req.list);
  2599. req.cblocks = range;
  2600. atomic_set(&req.complete, 0);
  2601. req.err = 0;
  2602. init_waitqueue_head(&req.result_wait);
  2603. spin_lock(&cache->invalidation_lock);
  2604. list_add(&req.list, &cache->invalidation_requests);
  2605. spin_unlock(&cache->invalidation_lock);
  2606. wake_worker(cache);
  2607. wait_event(req.result_wait, atomic_read(&req.complete));
  2608. return req.err;
  2609. }
  2610. static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
  2611. const char **cblock_ranges)
  2612. {
  2613. int r = 0;
  2614. unsigned i;
  2615. struct cblock_range range;
  2616. if (!passthrough_mode(&cache->features)) {
  2617. DMERR("cache has to be in passthrough mode for invalidation");
  2618. return -EPERM;
  2619. }
  2620. for (i = 0; i < count; i++) {
  2621. r = parse_cblock_range(cache, cblock_ranges[i], &range);
  2622. if (r)
  2623. break;
  2624. r = validate_cblock_range(cache, &range);
  2625. if (r)
  2626. break;
  2627. /*
  2628. * Pass begin and end origin blocks to the worker and wake it.
  2629. */
  2630. r = request_invalidation(cache, &range);
  2631. if (r)
  2632. break;
  2633. }
  2634. return r;
  2635. }
  2636. /*
  2637. * Supports
  2638. * "<key> <value>"
  2639. * and
  2640. * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
  2641. *
  2642. * The key migration_threshold is supported by the cache target core.
  2643. */
  2644. static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
  2645. {
  2646. struct cache *cache = ti->private;
  2647. if (!argc)
  2648. return -EINVAL;
  2649. if (!strcasecmp(argv[0], "invalidate_cblocks"))
  2650. return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
  2651. if (argc != 2)
  2652. return -EINVAL;
  2653. return set_config_value(cache, argv[0], argv[1]);
  2654. }
  2655. static int cache_iterate_devices(struct dm_target *ti,
  2656. iterate_devices_callout_fn fn, void *data)
  2657. {
  2658. int r = 0;
  2659. struct cache *cache = ti->private;
  2660. r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
  2661. if (!r)
  2662. r = fn(ti, cache->origin_dev, 0, ti->len, data);
  2663. return r;
  2664. }
  2665. /*
  2666. * We assume I/O is going to the origin (which is the volume
  2667. * more likely to have restrictions e.g. by being striped).
  2668. * (Looking up the exact location of the data would be expensive
  2669. * and could always be out of date by the time the bio is submitted.)
  2670. */
  2671. static int cache_bvec_merge(struct dm_target *ti,
  2672. struct bvec_merge_data *bvm,
  2673. struct bio_vec *biovec, int max_size)
  2674. {
  2675. struct cache *cache = ti->private;
  2676. struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
  2677. if (!q->merge_bvec_fn)
  2678. return max_size;
  2679. bvm->bi_bdev = cache->origin_dev->bdev;
  2680. return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
  2681. }
  2682. static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
  2683. {
  2684. /*
  2685. * FIXME: these limits may be incompatible with the cache device
  2686. */
  2687. limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
  2688. cache->origin_sectors);
  2689. limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
  2690. }
  2691. static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
  2692. {
  2693. struct cache *cache = ti->private;
  2694. uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
  2695. /*
  2696. * If the system-determined stacked limits are compatible with the
  2697. * cache's blocksize (io_opt is a factor) do not override them.
  2698. */
  2699. if (io_opt_sectors < cache->sectors_per_block ||
  2700. do_div(io_opt_sectors, cache->sectors_per_block)) {
  2701. blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
  2702. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  2703. }
  2704. set_discard_limits(cache, limits);
  2705. }
  2706. /*----------------------------------------------------------------*/
  2707. static struct target_type cache_target = {
  2708. .name = "cache",
  2709. .version = {1, 6, 0},
  2710. .module = THIS_MODULE,
  2711. .ctr = cache_ctr,
  2712. .dtr = cache_dtr,
  2713. .map = cache_map,
  2714. .end_io = cache_end_io,
  2715. .postsuspend = cache_postsuspend,
  2716. .preresume = cache_preresume,
  2717. .resume = cache_resume,
  2718. .status = cache_status,
  2719. .message = cache_message,
  2720. .iterate_devices = cache_iterate_devices,
  2721. .merge = cache_bvec_merge,
  2722. .io_hints = cache_io_hints,
  2723. };
  2724. static int __init dm_cache_init(void)
  2725. {
  2726. int r;
  2727. r = dm_register_target(&cache_target);
  2728. if (r) {
  2729. DMERR("cache target registration failed: %d", r);
  2730. return r;
  2731. }
  2732. migration_cache = KMEM_CACHE(dm_cache_migration, 0);
  2733. if (!migration_cache) {
  2734. dm_unregister_target(&cache_target);
  2735. return -ENOMEM;
  2736. }
  2737. return 0;
  2738. }
  2739. static void __exit dm_cache_exit(void)
  2740. {
  2741. dm_unregister_target(&cache_target);
  2742. kmem_cache_destroy(migration_cache);
  2743. }
  2744. module_init(dm_cache_init);
  2745. module_exit(dm_cache_exit);
  2746. MODULE_DESCRIPTION(DM_NAME " cache target");
  2747. MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
  2748. MODULE_LICENSE("GPL");