dm-snap.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482
  1. /*
  2. * dm-snapshot.c
  3. *
  4. * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/device-mapper.h>
  10. #include <linux/delay.h>
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/kdev_t.h>
  14. #include <linux/list.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/log2.h>
  20. #include <linux/dm-kcopyd.h>
  21. #include "dm.h"
  22. #include "dm-exception-store.h"
  23. #define DM_MSG_PREFIX "snapshots"
  24. static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
  25. #define dm_target_is_snapshot_merge(ti) \
  26. ((ti)->type->name == dm_snapshot_merge_target_name)
  27. /*
  28. * The size of the mempool used to track chunks in use.
  29. */
  30. #define MIN_IOS 256
  31. #define DM_TRACKED_CHUNK_HASH_SIZE 16
  32. #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
  33. (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  34. struct dm_exception_table {
  35. uint32_t hash_mask;
  36. unsigned hash_shift;
  37. struct list_head *table;
  38. };
  39. struct dm_snapshot {
  40. struct rw_semaphore lock;
  41. struct dm_dev *origin;
  42. struct dm_dev *cow;
  43. struct dm_target *ti;
  44. /* List of snapshots per Origin */
  45. struct list_head list;
  46. /*
  47. * You can't use a snapshot if this is 0 (e.g. if full).
  48. * A snapshot-merge target never clears this.
  49. */
  50. int valid;
  51. /*
  52. * The snapshot overflowed because of a write to the snapshot device.
  53. * We don't have to invalidate the snapshot in this case, but we need
  54. * to prevent further writes.
  55. */
  56. int snapshot_overflowed;
  57. /* Origin writes don't trigger exceptions until this is set */
  58. int active;
  59. atomic_t pending_exceptions_count;
  60. /* Protected by "lock" */
  61. sector_t exception_start_sequence;
  62. /* Protected by kcopyd single-threaded callback */
  63. sector_t exception_complete_sequence;
  64. /*
  65. * A list of pending exceptions that completed out of order.
  66. * Protected by kcopyd single-threaded callback.
  67. */
  68. struct list_head out_of_order_list;
  69. mempool_t *pending_pool;
  70. struct dm_exception_table pending;
  71. struct dm_exception_table complete;
  72. /*
  73. * pe_lock protects all pending_exception operations and access
  74. * as well as the snapshot_bios list.
  75. */
  76. spinlock_t pe_lock;
  77. /* Chunks with outstanding reads */
  78. spinlock_t tracked_chunk_lock;
  79. struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  80. /* The on disk metadata handler */
  81. struct dm_exception_store *store;
  82. struct dm_kcopyd_client *kcopyd_client;
  83. /* Wait for events based on state_bits */
  84. unsigned long state_bits;
  85. /* Range of chunks currently being merged. */
  86. chunk_t first_merging_chunk;
  87. int num_merging_chunks;
  88. /*
  89. * The merge operation failed if this flag is set.
  90. * Failure modes are handled as follows:
  91. * - I/O error reading the header
  92. * => don't load the target; abort.
  93. * - Header does not have "valid" flag set
  94. * => use the origin; forget about the snapshot.
  95. * - I/O error when reading exceptions
  96. * => don't load the target; abort.
  97. * (We can't use the intermediate origin state.)
  98. * - I/O error while merging
  99. * => stop merging; set merge_failed; process I/O normally.
  100. */
  101. int merge_failed;
  102. /*
  103. * Incoming bios that overlap with chunks being merged must wait
  104. * for them to be committed.
  105. */
  106. struct bio_list bios_queued_during_merge;
  107. };
  108. /*
  109. * state_bits:
  110. * RUNNING_MERGE - Merge operation is in progress.
  111. * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
  112. * cleared afterwards.
  113. */
  114. #define RUNNING_MERGE 0
  115. #define SHUTDOWN_MERGE 1
  116. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
  117. "A percentage of time allocated for copy on write");
  118. struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
  119. {
  120. return s->origin;
  121. }
  122. EXPORT_SYMBOL(dm_snap_origin);
  123. struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
  124. {
  125. return s->cow;
  126. }
  127. EXPORT_SYMBOL(dm_snap_cow);
  128. static sector_t chunk_to_sector(struct dm_exception_store *store,
  129. chunk_t chunk)
  130. {
  131. return chunk << store->chunk_shift;
  132. }
  133. static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
  134. {
  135. /*
  136. * There is only ever one instance of a particular block
  137. * device so we can compare pointers safely.
  138. */
  139. return lhs == rhs;
  140. }
  141. struct dm_snap_pending_exception {
  142. struct dm_exception e;
  143. /*
  144. * Origin buffers waiting for this to complete are held
  145. * in a bio list
  146. */
  147. struct bio_list origin_bios;
  148. struct bio_list snapshot_bios;
  149. /* Pointer back to snapshot context */
  150. struct dm_snapshot *snap;
  151. /*
  152. * 1 indicates the exception has already been sent to
  153. * kcopyd.
  154. */
  155. int started;
  156. /* There was copying error. */
  157. int copy_error;
  158. /* A sequence number, it is used for in-order completion. */
  159. sector_t exception_sequence;
  160. struct list_head out_of_order_entry;
  161. /*
  162. * For writing a complete chunk, bypassing the copy.
  163. */
  164. struct bio *full_bio;
  165. bio_end_io_t *full_bio_end_io;
  166. void *full_bio_private;
  167. };
  168. /*
  169. * Hash table mapping origin volumes to lists of snapshots and
  170. * a lock to protect it
  171. */
  172. static struct kmem_cache *exception_cache;
  173. static struct kmem_cache *pending_cache;
  174. struct dm_snap_tracked_chunk {
  175. struct hlist_node node;
  176. chunk_t chunk;
  177. };
  178. static void init_tracked_chunk(struct bio *bio)
  179. {
  180. struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
  181. INIT_HLIST_NODE(&c->node);
  182. }
  183. static bool is_bio_tracked(struct bio *bio)
  184. {
  185. struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
  186. return !hlist_unhashed(&c->node);
  187. }
  188. static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
  189. {
  190. struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
  191. c->chunk = chunk;
  192. spin_lock_irq(&s->tracked_chunk_lock);
  193. hlist_add_head(&c->node,
  194. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
  195. spin_unlock_irq(&s->tracked_chunk_lock);
  196. }
  197. static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
  198. {
  199. struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
  200. unsigned long flags;
  201. spin_lock_irqsave(&s->tracked_chunk_lock, flags);
  202. hlist_del(&c->node);
  203. spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
  204. }
  205. static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
  206. {
  207. struct dm_snap_tracked_chunk *c;
  208. int found = 0;
  209. spin_lock_irq(&s->tracked_chunk_lock);
  210. hlist_for_each_entry(c,
  211. &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
  212. if (c->chunk == chunk) {
  213. found = 1;
  214. break;
  215. }
  216. }
  217. spin_unlock_irq(&s->tracked_chunk_lock);
  218. return found;
  219. }
  220. /*
  221. * This conflicting I/O is extremely improbable in the caller,
  222. * so msleep(1) is sufficient and there is no need for a wait queue.
  223. */
  224. static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
  225. {
  226. while (__chunk_is_tracked(s, chunk))
  227. msleep(1);
  228. }
  229. /*
  230. * One of these per registered origin, held in the snapshot_origins hash
  231. */
  232. struct origin {
  233. /* The origin device */
  234. struct block_device *bdev;
  235. struct list_head hash_list;
  236. /* List of snapshots for this origin */
  237. struct list_head snapshots;
  238. };
  239. /*
  240. * This structure is allocated for each origin target
  241. */
  242. struct dm_origin {
  243. struct dm_dev *dev;
  244. struct dm_target *ti;
  245. unsigned split_boundary;
  246. struct list_head hash_list;
  247. };
  248. /*
  249. * Size of the hash table for origin volumes. If we make this
  250. * the size of the minors list then it should be nearly perfect
  251. */
  252. #define ORIGIN_HASH_SIZE 256
  253. #define ORIGIN_MASK 0xFF
  254. static struct list_head *_origins;
  255. static struct list_head *_dm_origins;
  256. static struct rw_semaphore _origins_lock;
  257. static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
  258. static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
  259. static uint64_t _pending_exceptions_done_count;
  260. static int init_origin_hash(void)
  261. {
  262. int i;
  263. _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  264. GFP_KERNEL);
  265. if (!_origins) {
  266. DMERR("unable to allocate memory for _origins");
  267. return -ENOMEM;
  268. }
  269. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  270. INIT_LIST_HEAD(_origins + i);
  271. _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
  272. GFP_KERNEL);
  273. if (!_dm_origins) {
  274. DMERR("unable to allocate memory for _dm_origins");
  275. kfree(_origins);
  276. return -ENOMEM;
  277. }
  278. for (i = 0; i < ORIGIN_HASH_SIZE; i++)
  279. INIT_LIST_HEAD(_dm_origins + i);
  280. init_rwsem(&_origins_lock);
  281. return 0;
  282. }
  283. static void exit_origin_hash(void)
  284. {
  285. kfree(_origins);
  286. kfree(_dm_origins);
  287. }
  288. static unsigned origin_hash(struct block_device *bdev)
  289. {
  290. return bdev->bd_dev & ORIGIN_MASK;
  291. }
  292. static struct origin *__lookup_origin(struct block_device *origin)
  293. {
  294. struct list_head *ol;
  295. struct origin *o;
  296. ol = &_origins[origin_hash(origin)];
  297. list_for_each_entry (o, ol, hash_list)
  298. if (bdev_equal(o->bdev, origin))
  299. return o;
  300. return NULL;
  301. }
  302. static void __insert_origin(struct origin *o)
  303. {
  304. struct list_head *sl = &_origins[origin_hash(o->bdev)];
  305. list_add_tail(&o->hash_list, sl);
  306. }
  307. static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
  308. {
  309. struct list_head *ol;
  310. struct dm_origin *o;
  311. ol = &_dm_origins[origin_hash(origin)];
  312. list_for_each_entry (o, ol, hash_list)
  313. if (bdev_equal(o->dev->bdev, origin))
  314. return o;
  315. return NULL;
  316. }
  317. static void __insert_dm_origin(struct dm_origin *o)
  318. {
  319. struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
  320. list_add_tail(&o->hash_list, sl);
  321. }
  322. static void __remove_dm_origin(struct dm_origin *o)
  323. {
  324. list_del(&o->hash_list);
  325. }
  326. /*
  327. * _origins_lock must be held when calling this function.
  328. * Returns number of snapshots registered using the supplied cow device, plus:
  329. * snap_src - a snapshot suitable for use as a source of exception handover
  330. * snap_dest - a snapshot capable of receiving exception handover.
  331. * snap_merge - an existing snapshot-merge target linked to the same origin.
  332. * There can be at most one snapshot-merge target. The parameter is optional.
  333. *
  334. * Possible return values and states of snap_src and snap_dest.
  335. * 0: NULL, NULL - first new snapshot
  336. * 1: snap_src, NULL - normal snapshot
  337. * 2: snap_src, snap_dest - waiting for handover
  338. * 2: snap_src, NULL - handed over, waiting for old to be deleted
  339. * 1: NULL, snap_dest - source got destroyed without handover
  340. */
  341. static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
  342. struct dm_snapshot **snap_src,
  343. struct dm_snapshot **snap_dest,
  344. struct dm_snapshot **snap_merge)
  345. {
  346. struct dm_snapshot *s;
  347. struct origin *o;
  348. int count = 0;
  349. int active;
  350. o = __lookup_origin(snap->origin->bdev);
  351. if (!o)
  352. goto out;
  353. list_for_each_entry(s, &o->snapshots, list) {
  354. if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
  355. *snap_merge = s;
  356. if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
  357. continue;
  358. down_read(&s->lock);
  359. active = s->active;
  360. up_read(&s->lock);
  361. if (active) {
  362. if (snap_src)
  363. *snap_src = s;
  364. } else if (snap_dest)
  365. *snap_dest = s;
  366. count++;
  367. }
  368. out:
  369. return count;
  370. }
  371. /*
  372. * On success, returns 1 if this snapshot is a handover destination,
  373. * otherwise returns 0.
  374. */
  375. static int __validate_exception_handover(struct dm_snapshot *snap)
  376. {
  377. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  378. struct dm_snapshot *snap_merge = NULL;
  379. /* Does snapshot need exceptions handed over to it? */
  380. if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
  381. &snap_merge) == 2) ||
  382. snap_dest) {
  383. snap->ti->error = "Snapshot cow pairing for exception "
  384. "table handover failed";
  385. return -EINVAL;
  386. }
  387. /*
  388. * If no snap_src was found, snap cannot become a handover
  389. * destination.
  390. */
  391. if (!snap_src)
  392. return 0;
  393. /*
  394. * Non-snapshot-merge handover?
  395. */
  396. if (!dm_target_is_snapshot_merge(snap->ti))
  397. return 1;
  398. /*
  399. * Do not allow more than one merging snapshot.
  400. */
  401. if (snap_merge) {
  402. snap->ti->error = "A snapshot is already merging.";
  403. return -EINVAL;
  404. }
  405. if (!snap_src->store->type->prepare_merge ||
  406. !snap_src->store->type->commit_merge) {
  407. snap->ti->error = "Snapshot exception store does not "
  408. "support snapshot-merge.";
  409. return -EINVAL;
  410. }
  411. return 1;
  412. }
  413. static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
  414. {
  415. struct dm_snapshot *l;
  416. /* Sort the list according to chunk size, largest-first smallest-last */
  417. list_for_each_entry(l, &o->snapshots, list)
  418. if (l->store->chunk_size < s->store->chunk_size)
  419. break;
  420. list_add_tail(&s->list, &l->list);
  421. }
  422. /*
  423. * Make a note of the snapshot and its origin so we can look it
  424. * up when the origin has a write on it.
  425. *
  426. * Also validate snapshot exception store handovers.
  427. * On success, returns 1 if this registration is a handover destination,
  428. * otherwise returns 0.
  429. */
  430. static int register_snapshot(struct dm_snapshot *snap)
  431. {
  432. struct origin *o, *new_o = NULL;
  433. struct block_device *bdev = snap->origin->bdev;
  434. int r = 0;
  435. new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
  436. if (!new_o)
  437. return -ENOMEM;
  438. down_write(&_origins_lock);
  439. r = __validate_exception_handover(snap);
  440. if (r < 0) {
  441. kfree(new_o);
  442. goto out;
  443. }
  444. o = __lookup_origin(bdev);
  445. if (o)
  446. kfree(new_o);
  447. else {
  448. /* New origin */
  449. o = new_o;
  450. /* Initialise the struct */
  451. INIT_LIST_HEAD(&o->snapshots);
  452. o->bdev = bdev;
  453. __insert_origin(o);
  454. }
  455. __insert_snapshot(o, snap);
  456. out:
  457. up_write(&_origins_lock);
  458. return r;
  459. }
  460. /*
  461. * Move snapshot to correct place in list according to chunk size.
  462. */
  463. static void reregister_snapshot(struct dm_snapshot *s)
  464. {
  465. struct block_device *bdev = s->origin->bdev;
  466. down_write(&_origins_lock);
  467. list_del(&s->list);
  468. __insert_snapshot(__lookup_origin(bdev), s);
  469. up_write(&_origins_lock);
  470. }
  471. static void unregister_snapshot(struct dm_snapshot *s)
  472. {
  473. struct origin *o;
  474. down_write(&_origins_lock);
  475. o = __lookup_origin(s->origin->bdev);
  476. list_del(&s->list);
  477. if (o && list_empty(&o->snapshots)) {
  478. list_del(&o->hash_list);
  479. kfree(o);
  480. }
  481. up_write(&_origins_lock);
  482. }
  483. /*
  484. * Implementation of the exception hash tables.
  485. * The lowest hash_shift bits of the chunk number are ignored, allowing
  486. * some consecutive chunks to be grouped together.
  487. */
  488. static int dm_exception_table_init(struct dm_exception_table *et,
  489. uint32_t size, unsigned hash_shift)
  490. {
  491. unsigned int i;
  492. et->hash_shift = hash_shift;
  493. et->hash_mask = size - 1;
  494. et->table = dm_vcalloc(size, sizeof(struct list_head));
  495. if (!et->table)
  496. return -ENOMEM;
  497. for (i = 0; i < size; i++)
  498. INIT_LIST_HEAD(et->table + i);
  499. return 0;
  500. }
  501. static void dm_exception_table_exit(struct dm_exception_table *et,
  502. struct kmem_cache *mem)
  503. {
  504. struct list_head *slot;
  505. struct dm_exception *ex, *next;
  506. int i, size;
  507. size = et->hash_mask + 1;
  508. for (i = 0; i < size; i++) {
  509. slot = et->table + i;
  510. list_for_each_entry_safe (ex, next, slot, hash_list)
  511. kmem_cache_free(mem, ex);
  512. }
  513. vfree(et->table);
  514. }
  515. static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
  516. {
  517. return (chunk >> et->hash_shift) & et->hash_mask;
  518. }
  519. static void dm_remove_exception(struct dm_exception *e)
  520. {
  521. list_del(&e->hash_list);
  522. }
  523. /*
  524. * Return the exception data for a sector, or NULL if not
  525. * remapped.
  526. */
  527. static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
  528. chunk_t chunk)
  529. {
  530. struct list_head *slot;
  531. struct dm_exception *e;
  532. slot = &et->table[exception_hash(et, chunk)];
  533. list_for_each_entry (e, slot, hash_list)
  534. if (chunk >= e->old_chunk &&
  535. chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
  536. return e;
  537. return NULL;
  538. }
  539. static struct dm_exception *alloc_completed_exception(gfp_t gfp)
  540. {
  541. struct dm_exception *e;
  542. e = kmem_cache_alloc(exception_cache, gfp);
  543. if (!e && gfp == GFP_NOIO)
  544. e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
  545. return e;
  546. }
  547. static void free_completed_exception(struct dm_exception *e)
  548. {
  549. kmem_cache_free(exception_cache, e);
  550. }
  551. static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
  552. {
  553. struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
  554. GFP_NOIO);
  555. atomic_inc(&s->pending_exceptions_count);
  556. pe->snap = s;
  557. return pe;
  558. }
  559. static void free_pending_exception(struct dm_snap_pending_exception *pe)
  560. {
  561. struct dm_snapshot *s = pe->snap;
  562. mempool_free(pe, s->pending_pool);
  563. smp_mb__before_atomic();
  564. atomic_dec(&s->pending_exceptions_count);
  565. }
  566. static void dm_insert_exception(struct dm_exception_table *eh,
  567. struct dm_exception *new_e)
  568. {
  569. struct list_head *l;
  570. struct dm_exception *e = NULL;
  571. l = &eh->table[exception_hash(eh, new_e->old_chunk)];
  572. /* Add immediately if this table doesn't support consecutive chunks */
  573. if (!eh->hash_shift)
  574. goto out;
  575. /* List is ordered by old_chunk */
  576. list_for_each_entry_reverse(e, l, hash_list) {
  577. /* Insert after an existing chunk? */
  578. if (new_e->old_chunk == (e->old_chunk +
  579. dm_consecutive_chunk_count(e) + 1) &&
  580. new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
  581. dm_consecutive_chunk_count(e) + 1)) {
  582. dm_consecutive_chunk_count_inc(e);
  583. free_completed_exception(new_e);
  584. return;
  585. }
  586. /* Insert before an existing chunk? */
  587. if (new_e->old_chunk == (e->old_chunk - 1) &&
  588. new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
  589. dm_consecutive_chunk_count_inc(e);
  590. e->old_chunk--;
  591. e->new_chunk--;
  592. free_completed_exception(new_e);
  593. return;
  594. }
  595. if (new_e->old_chunk > e->old_chunk)
  596. break;
  597. }
  598. out:
  599. list_add(&new_e->hash_list, e ? &e->hash_list : l);
  600. }
  601. /*
  602. * Callback used by the exception stores to load exceptions when
  603. * initialising.
  604. */
  605. static int dm_add_exception(void *context, chunk_t old, chunk_t new)
  606. {
  607. struct dm_snapshot *s = context;
  608. struct dm_exception *e;
  609. e = alloc_completed_exception(GFP_KERNEL);
  610. if (!e)
  611. return -ENOMEM;
  612. e->old_chunk = old;
  613. /* Consecutive_count is implicitly initialised to zero */
  614. e->new_chunk = new;
  615. dm_insert_exception(&s->complete, e);
  616. return 0;
  617. }
  618. /*
  619. * Return a minimum chunk size of all snapshots that have the specified origin.
  620. * Return zero if the origin has no snapshots.
  621. */
  622. static uint32_t __minimum_chunk_size(struct origin *o)
  623. {
  624. struct dm_snapshot *snap;
  625. unsigned chunk_size = 0;
  626. if (o)
  627. list_for_each_entry(snap, &o->snapshots, list)
  628. chunk_size = min_not_zero(chunk_size,
  629. snap->store->chunk_size);
  630. return (uint32_t) chunk_size;
  631. }
  632. /*
  633. * Hard coded magic.
  634. */
  635. static int calc_max_buckets(void)
  636. {
  637. /* use a fixed size of 2MB */
  638. unsigned long mem = 2 * 1024 * 1024;
  639. mem /= sizeof(struct list_head);
  640. return mem;
  641. }
  642. /*
  643. * Allocate room for a suitable hash table.
  644. */
  645. static int init_hash_tables(struct dm_snapshot *s)
  646. {
  647. sector_t hash_size, cow_dev_size, max_buckets;
  648. /*
  649. * Calculate based on the size of the original volume or
  650. * the COW volume...
  651. */
  652. cow_dev_size = get_dev_size(s->cow->bdev);
  653. max_buckets = calc_max_buckets();
  654. hash_size = cow_dev_size >> s->store->chunk_shift;
  655. hash_size = min(hash_size, max_buckets);
  656. if (hash_size < 64)
  657. hash_size = 64;
  658. hash_size = rounddown_pow_of_two(hash_size);
  659. if (dm_exception_table_init(&s->complete, hash_size,
  660. DM_CHUNK_CONSECUTIVE_BITS))
  661. return -ENOMEM;
  662. /*
  663. * Allocate hash table for in-flight exceptions
  664. * Make this smaller than the real hash table
  665. */
  666. hash_size >>= 3;
  667. if (hash_size < 64)
  668. hash_size = 64;
  669. if (dm_exception_table_init(&s->pending, hash_size, 0)) {
  670. dm_exception_table_exit(&s->complete, exception_cache);
  671. return -ENOMEM;
  672. }
  673. return 0;
  674. }
  675. static void merge_shutdown(struct dm_snapshot *s)
  676. {
  677. clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
  678. smp_mb__after_atomic();
  679. wake_up_bit(&s->state_bits, RUNNING_MERGE);
  680. }
  681. static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
  682. {
  683. s->first_merging_chunk = 0;
  684. s->num_merging_chunks = 0;
  685. return bio_list_get(&s->bios_queued_during_merge);
  686. }
  687. /*
  688. * Remove one chunk from the index of completed exceptions.
  689. */
  690. static int __remove_single_exception_chunk(struct dm_snapshot *s,
  691. chunk_t old_chunk)
  692. {
  693. struct dm_exception *e;
  694. e = dm_lookup_exception(&s->complete, old_chunk);
  695. if (!e) {
  696. DMERR("Corruption detected: exception for block %llu is "
  697. "on disk but not in memory",
  698. (unsigned long long)old_chunk);
  699. return -EINVAL;
  700. }
  701. /*
  702. * If this is the only chunk using this exception, remove exception.
  703. */
  704. if (!dm_consecutive_chunk_count(e)) {
  705. dm_remove_exception(e);
  706. free_completed_exception(e);
  707. return 0;
  708. }
  709. /*
  710. * The chunk may be either at the beginning or the end of a
  711. * group of consecutive chunks - never in the middle. We are
  712. * removing chunks in the opposite order to that in which they
  713. * were added, so this should always be true.
  714. * Decrement the consecutive chunk counter and adjust the
  715. * starting point if necessary.
  716. */
  717. if (old_chunk == e->old_chunk) {
  718. e->old_chunk++;
  719. e->new_chunk++;
  720. } else if (old_chunk != e->old_chunk +
  721. dm_consecutive_chunk_count(e)) {
  722. DMERR("Attempt to merge block %llu from the "
  723. "middle of a chunk range [%llu - %llu]",
  724. (unsigned long long)old_chunk,
  725. (unsigned long long)e->old_chunk,
  726. (unsigned long long)
  727. e->old_chunk + dm_consecutive_chunk_count(e));
  728. return -EINVAL;
  729. }
  730. dm_consecutive_chunk_count_dec(e);
  731. return 0;
  732. }
  733. static void flush_bios(struct bio *bio);
  734. static int remove_single_exception_chunk(struct dm_snapshot *s)
  735. {
  736. struct bio *b = NULL;
  737. int r;
  738. chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
  739. down_write(&s->lock);
  740. /*
  741. * Process chunks (and associated exceptions) in reverse order
  742. * so that dm_consecutive_chunk_count_dec() accounting works.
  743. */
  744. do {
  745. r = __remove_single_exception_chunk(s, old_chunk);
  746. if (r)
  747. goto out;
  748. } while (old_chunk-- > s->first_merging_chunk);
  749. b = __release_queued_bios_after_merge(s);
  750. out:
  751. up_write(&s->lock);
  752. if (b)
  753. flush_bios(b);
  754. return r;
  755. }
  756. static int origin_write_extent(struct dm_snapshot *merging_snap,
  757. sector_t sector, unsigned chunk_size);
  758. static void merge_callback(int read_err, unsigned long write_err,
  759. void *context);
  760. static uint64_t read_pending_exceptions_done_count(void)
  761. {
  762. uint64_t pending_exceptions_done;
  763. spin_lock(&_pending_exceptions_done_spinlock);
  764. pending_exceptions_done = _pending_exceptions_done_count;
  765. spin_unlock(&_pending_exceptions_done_spinlock);
  766. return pending_exceptions_done;
  767. }
  768. static void increment_pending_exceptions_done_count(void)
  769. {
  770. spin_lock(&_pending_exceptions_done_spinlock);
  771. _pending_exceptions_done_count++;
  772. spin_unlock(&_pending_exceptions_done_spinlock);
  773. wake_up_all(&_pending_exceptions_done);
  774. }
  775. static void snapshot_merge_next_chunks(struct dm_snapshot *s)
  776. {
  777. int i, linear_chunks;
  778. chunk_t old_chunk, new_chunk;
  779. struct dm_io_region src, dest;
  780. sector_t io_size;
  781. uint64_t previous_count;
  782. BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
  783. if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
  784. goto shut;
  785. /*
  786. * valid flag never changes during merge, so no lock required.
  787. */
  788. if (!s->valid) {
  789. DMERR("Snapshot is invalid: can't merge");
  790. goto shut;
  791. }
  792. linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
  793. &new_chunk);
  794. if (linear_chunks <= 0) {
  795. if (linear_chunks < 0) {
  796. DMERR("Read error in exception store: "
  797. "shutting down merge");
  798. down_write(&s->lock);
  799. s->merge_failed = 1;
  800. up_write(&s->lock);
  801. }
  802. goto shut;
  803. }
  804. /* Adjust old_chunk and new_chunk to reflect start of linear region */
  805. old_chunk = old_chunk + 1 - linear_chunks;
  806. new_chunk = new_chunk + 1 - linear_chunks;
  807. /*
  808. * Use one (potentially large) I/O to copy all 'linear_chunks'
  809. * from the exception store to the origin
  810. */
  811. io_size = linear_chunks * s->store->chunk_size;
  812. dest.bdev = s->origin->bdev;
  813. dest.sector = chunk_to_sector(s->store, old_chunk);
  814. dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
  815. src.bdev = s->cow->bdev;
  816. src.sector = chunk_to_sector(s->store, new_chunk);
  817. src.count = dest.count;
  818. /*
  819. * Reallocate any exceptions needed in other snapshots then
  820. * wait for the pending exceptions to complete.
  821. * Each time any pending exception (globally on the system)
  822. * completes we are woken and repeat the process to find out
  823. * if we can proceed. While this may not seem a particularly
  824. * efficient algorithm, it is not expected to have any
  825. * significant impact on performance.
  826. */
  827. previous_count = read_pending_exceptions_done_count();
  828. while (origin_write_extent(s, dest.sector, io_size)) {
  829. wait_event(_pending_exceptions_done,
  830. (read_pending_exceptions_done_count() !=
  831. previous_count));
  832. /* Retry after the wait, until all exceptions are done. */
  833. previous_count = read_pending_exceptions_done_count();
  834. }
  835. down_write(&s->lock);
  836. s->first_merging_chunk = old_chunk;
  837. s->num_merging_chunks = linear_chunks;
  838. up_write(&s->lock);
  839. /* Wait until writes to all 'linear_chunks' drain */
  840. for (i = 0; i < linear_chunks; i++)
  841. __check_for_conflicting_io(s, old_chunk + i);
  842. dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
  843. return;
  844. shut:
  845. merge_shutdown(s);
  846. }
  847. static void error_bios(struct bio *bio);
  848. static void merge_callback(int read_err, unsigned long write_err, void *context)
  849. {
  850. struct dm_snapshot *s = context;
  851. struct bio *b = NULL;
  852. if (read_err || write_err) {
  853. if (read_err)
  854. DMERR("Read error: shutting down merge.");
  855. else
  856. DMERR("Write error: shutting down merge.");
  857. goto shut;
  858. }
  859. if (s->store->type->commit_merge(s->store,
  860. s->num_merging_chunks) < 0) {
  861. DMERR("Write error in exception store: shutting down merge");
  862. goto shut;
  863. }
  864. if (remove_single_exception_chunk(s) < 0)
  865. goto shut;
  866. snapshot_merge_next_chunks(s);
  867. return;
  868. shut:
  869. down_write(&s->lock);
  870. s->merge_failed = 1;
  871. b = __release_queued_bios_after_merge(s);
  872. up_write(&s->lock);
  873. error_bios(b);
  874. merge_shutdown(s);
  875. }
  876. static void start_merge(struct dm_snapshot *s)
  877. {
  878. if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
  879. snapshot_merge_next_chunks(s);
  880. }
  881. /*
  882. * Stop the merging process and wait until it finishes.
  883. */
  884. static void stop_merge(struct dm_snapshot *s)
  885. {
  886. set_bit(SHUTDOWN_MERGE, &s->state_bits);
  887. wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
  888. clear_bit(SHUTDOWN_MERGE, &s->state_bits);
  889. }
  890. /*
  891. * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  892. */
  893. static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  894. {
  895. struct dm_snapshot *s;
  896. int i;
  897. int r = -EINVAL;
  898. char *origin_path, *cow_path;
  899. unsigned args_used, num_flush_bios = 1;
  900. fmode_t origin_mode = FMODE_READ;
  901. if (argc != 4) {
  902. ti->error = "requires exactly 4 arguments";
  903. r = -EINVAL;
  904. goto bad;
  905. }
  906. if (dm_target_is_snapshot_merge(ti)) {
  907. num_flush_bios = 2;
  908. origin_mode = FMODE_WRITE;
  909. }
  910. s = kmalloc(sizeof(*s), GFP_KERNEL);
  911. if (!s) {
  912. ti->error = "Cannot allocate private snapshot structure";
  913. r = -ENOMEM;
  914. goto bad;
  915. }
  916. origin_path = argv[0];
  917. argv++;
  918. argc--;
  919. r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
  920. if (r) {
  921. ti->error = "Cannot get origin device";
  922. goto bad_origin;
  923. }
  924. cow_path = argv[0];
  925. argv++;
  926. argc--;
  927. r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
  928. if (r) {
  929. ti->error = "Cannot get COW device";
  930. goto bad_cow;
  931. }
  932. r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
  933. if (r) {
  934. ti->error = "Couldn't create exception store";
  935. r = -EINVAL;
  936. goto bad_store;
  937. }
  938. argv += args_used;
  939. argc -= args_used;
  940. s->ti = ti;
  941. s->valid = 1;
  942. s->snapshot_overflowed = 0;
  943. s->active = 0;
  944. atomic_set(&s->pending_exceptions_count, 0);
  945. s->exception_start_sequence = 0;
  946. s->exception_complete_sequence = 0;
  947. INIT_LIST_HEAD(&s->out_of_order_list);
  948. init_rwsem(&s->lock);
  949. INIT_LIST_HEAD(&s->list);
  950. spin_lock_init(&s->pe_lock);
  951. s->state_bits = 0;
  952. s->merge_failed = 0;
  953. s->first_merging_chunk = 0;
  954. s->num_merging_chunks = 0;
  955. bio_list_init(&s->bios_queued_during_merge);
  956. /* Allocate hash table for COW data */
  957. if (init_hash_tables(s)) {
  958. ti->error = "Unable to allocate hash table space";
  959. r = -ENOMEM;
  960. goto bad_hash_tables;
  961. }
  962. s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  963. if (IS_ERR(s->kcopyd_client)) {
  964. r = PTR_ERR(s->kcopyd_client);
  965. ti->error = "Could not create kcopyd client";
  966. goto bad_kcopyd;
  967. }
  968. s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
  969. if (!s->pending_pool) {
  970. ti->error = "Could not allocate mempool for pending exceptions";
  971. r = -ENOMEM;
  972. goto bad_pending_pool;
  973. }
  974. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  975. INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
  976. spin_lock_init(&s->tracked_chunk_lock);
  977. ti->private = s;
  978. ti->num_flush_bios = num_flush_bios;
  979. ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
  980. /* Add snapshot to the list of snapshots for this origin */
  981. /* Exceptions aren't triggered till snapshot_resume() is called */
  982. r = register_snapshot(s);
  983. if (r == -ENOMEM) {
  984. ti->error = "Snapshot origin struct allocation failed";
  985. goto bad_load_and_register;
  986. } else if (r < 0) {
  987. /* invalid handover, register_snapshot has set ti->error */
  988. goto bad_load_and_register;
  989. }
  990. /*
  991. * Metadata must only be loaded into one table at once, so skip this
  992. * if metadata will be handed over during resume.
  993. * Chunk size will be set during the handover - set it to zero to
  994. * ensure it's ignored.
  995. */
  996. if (r > 0) {
  997. s->store->chunk_size = 0;
  998. return 0;
  999. }
  1000. r = s->store->type->read_metadata(s->store, dm_add_exception,
  1001. (void *)s);
  1002. if (r < 0) {
  1003. ti->error = "Failed to read snapshot metadata";
  1004. goto bad_read_metadata;
  1005. } else if (r > 0) {
  1006. s->valid = 0;
  1007. DMWARN("Snapshot is marked invalid.");
  1008. }
  1009. if (!s->store->chunk_size) {
  1010. ti->error = "Chunk size not set";
  1011. goto bad_read_metadata;
  1012. }
  1013. r = dm_set_target_max_io_len(ti, s->store->chunk_size);
  1014. if (r)
  1015. goto bad_read_metadata;
  1016. return 0;
  1017. bad_read_metadata:
  1018. unregister_snapshot(s);
  1019. bad_load_and_register:
  1020. mempool_destroy(s->pending_pool);
  1021. bad_pending_pool:
  1022. dm_kcopyd_client_destroy(s->kcopyd_client);
  1023. bad_kcopyd:
  1024. dm_exception_table_exit(&s->pending, pending_cache);
  1025. dm_exception_table_exit(&s->complete, exception_cache);
  1026. bad_hash_tables:
  1027. dm_exception_store_destroy(s->store);
  1028. bad_store:
  1029. dm_put_device(ti, s->cow);
  1030. bad_cow:
  1031. dm_put_device(ti, s->origin);
  1032. bad_origin:
  1033. kfree(s);
  1034. bad:
  1035. return r;
  1036. }
  1037. static void __free_exceptions(struct dm_snapshot *s)
  1038. {
  1039. dm_kcopyd_client_destroy(s->kcopyd_client);
  1040. s->kcopyd_client = NULL;
  1041. dm_exception_table_exit(&s->pending, pending_cache);
  1042. dm_exception_table_exit(&s->complete, exception_cache);
  1043. }
  1044. static void __handover_exceptions(struct dm_snapshot *snap_src,
  1045. struct dm_snapshot *snap_dest)
  1046. {
  1047. union {
  1048. struct dm_exception_table table_swap;
  1049. struct dm_exception_store *store_swap;
  1050. } u;
  1051. /*
  1052. * Swap all snapshot context information between the two instances.
  1053. */
  1054. u.table_swap = snap_dest->complete;
  1055. snap_dest->complete = snap_src->complete;
  1056. snap_src->complete = u.table_swap;
  1057. u.store_swap = snap_dest->store;
  1058. snap_dest->store = snap_src->store;
  1059. snap_src->store = u.store_swap;
  1060. snap_dest->store->snap = snap_dest;
  1061. snap_src->store->snap = snap_src;
  1062. snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
  1063. snap_dest->valid = snap_src->valid;
  1064. snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
  1065. /*
  1066. * Set source invalid to ensure it receives no further I/O.
  1067. */
  1068. snap_src->valid = 0;
  1069. }
  1070. static void snapshot_dtr(struct dm_target *ti)
  1071. {
  1072. #ifdef CONFIG_DM_DEBUG
  1073. int i;
  1074. #endif
  1075. struct dm_snapshot *s = ti->private;
  1076. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1077. down_read(&_origins_lock);
  1078. /* Check whether exception handover must be cancelled */
  1079. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
  1080. if (snap_src && snap_dest && (s == snap_src)) {
  1081. down_write(&snap_dest->lock);
  1082. snap_dest->valid = 0;
  1083. up_write(&snap_dest->lock);
  1084. DMERR("Cancelling snapshot handover.");
  1085. }
  1086. up_read(&_origins_lock);
  1087. if (dm_target_is_snapshot_merge(ti))
  1088. stop_merge(s);
  1089. /* Prevent further origin writes from using this snapshot. */
  1090. /* After this returns there can be no new kcopyd jobs. */
  1091. unregister_snapshot(s);
  1092. while (atomic_read(&s->pending_exceptions_count))
  1093. msleep(1);
  1094. /*
  1095. * Ensure instructions in mempool_destroy aren't reordered
  1096. * before atomic_read.
  1097. */
  1098. smp_mb();
  1099. #ifdef CONFIG_DM_DEBUG
  1100. for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
  1101. BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
  1102. #endif
  1103. __free_exceptions(s);
  1104. mempool_destroy(s->pending_pool);
  1105. dm_exception_store_destroy(s->store);
  1106. dm_put_device(ti, s->cow);
  1107. dm_put_device(ti, s->origin);
  1108. kfree(s);
  1109. }
  1110. /*
  1111. * Flush a list of buffers.
  1112. */
  1113. static void flush_bios(struct bio *bio)
  1114. {
  1115. struct bio *n;
  1116. while (bio) {
  1117. n = bio->bi_next;
  1118. bio->bi_next = NULL;
  1119. generic_make_request(bio);
  1120. bio = n;
  1121. }
  1122. }
  1123. static int do_origin(struct dm_dev *origin, struct bio *bio);
  1124. /*
  1125. * Flush a list of buffers.
  1126. */
  1127. static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
  1128. {
  1129. struct bio *n;
  1130. int r;
  1131. while (bio) {
  1132. n = bio->bi_next;
  1133. bio->bi_next = NULL;
  1134. r = do_origin(s->origin, bio);
  1135. if (r == DM_MAPIO_REMAPPED)
  1136. generic_make_request(bio);
  1137. bio = n;
  1138. }
  1139. }
  1140. /*
  1141. * Error a list of buffers.
  1142. */
  1143. static void error_bios(struct bio *bio)
  1144. {
  1145. struct bio *n;
  1146. while (bio) {
  1147. n = bio->bi_next;
  1148. bio->bi_next = NULL;
  1149. bio_io_error(bio);
  1150. bio = n;
  1151. }
  1152. }
  1153. static void __invalidate_snapshot(struct dm_snapshot *s, int err)
  1154. {
  1155. if (!s->valid)
  1156. return;
  1157. if (err == -EIO)
  1158. DMERR("Invalidating snapshot: Error reading/writing.");
  1159. else if (err == -ENOMEM)
  1160. DMERR("Invalidating snapshot: Unable to allocate exception.");
  1161. if (s->store->type->drop_snapshot)
  1162. s->store->type->drop_snapshot(s->store);
  1163. s->valid = 0;
  1164. dm_table_event(s->ti->table);
  1165. }
  1166. static void pending_complete(struct dm_snap_pending_exception *pe, int success)
  1167. {
  1168. struct dm_exception *e;
  1169. struct dm_snapshot *s = pe->snap;
  1170. struct bio *origin_bios = NULL;
  1171. struct bio *snapshot_bios = NULL;
  1172. struct bio *full_bio = NULL;
  1173. int error = 0;
  1174. if (!success) {
  1175. /* Read/write error - snapshot is unusable */
  1176. down_write(&s->lock);
  1177. __invalidate_snapshot(s, -EIO);
  1178. error = 1;
  1179. goto out;
  1180. }
  1181. e = alloc_completed_exception(GFP_NOIO);
  1182. if (!e) {
  1183. down_write(&s->lock);
  1184. __invalidate_snapshot(s, -ENOMEM);
  1185. error = 1;
  1186. goto out;
  1187. }
  1188. *e = pe->e;
  1189. down_write(&s->lock);
  1190. if (!s->valid) {
  1191. free_completed_exception(e);
  1192. error = 1;
  1193. goto out;
  1194. }
  1195. /* Check for conflicting reads */
  1196. __check_for_conflicting_io(s, pe->e.old_chunk);
  1197. /*
  1198. * Add a proper exception, and remove the
  1199. * in-flight exception from the list.
  1200. */
  1201. dm_insert_exception(&s->complete, e);
  1202. out:
  1203. dm_remove_exception(&pe->e);
  1204. snapshot_bios = bio_list_get(&pe->snapshot_bios);
  1205. origin_bios = bio_list_get(&pe->origin_bios);
  1206. full_bio = pe->full_bio;
  1207. if (full_bio) {
  1208. full_bio->bi_end_io = pe->full_bio_end_io;
  1209. full_bio->bi_private = pe->full_bio_private;
  1210. }
  1211. increment_pending_exceptions_done_count();
  1212. up_write(&s->lock);
  1213. /* Submit any pending write bios */
  1214. if (error) {
  1215. if (full_bio)
  1216. bio_io_error(full_bio);
  1217. error_bios(snapshot_bios);
  1218. } else {
  1219. if (full_bio)
  1220. bio_endio(full_bio);
  1221. flush_bios(snapshot_bios);
  1222. }
  1223. retry_origin_bios(s, origin_bios);
  1224. free_pending_exception(pe);
  1225. }
  1226. static void commit_callback(void *context, int success)
  1227. {
  1228. struct dm_snap_pending_exception *pe = context;
  1229. pending_complete(pe, success);
  1230. }
  1231. static void complete_exception(struct dm_snap_pending_exception *pe)
  1232. {
  1233. struct dm_snapshot *s = pe->snap;
  1234. if (unlikely(pe->copy_error))
  1235. pending_complete(pe, 0);
  1236. else
  1237. /* Update the metadata if we are persistent */
  1238. s->store->type->commit_exception(s->store, &pe->e,
  1239. commit_callback, pe);
  1240. }
  1241. /*
  1242. * Called when the copy I/O has finished. kcopyd actually runs
  1243. * this code so don't block.
  1244. */
  1245. static void copy_callback(int read_err, unsigned long write_err, void *context)
  1246. {
  1247. struct dm_snap_pending_exception *pe = context;
  1248. struct dm_snapshot *s = pe->snap;
  1249. pe->copy_error = read_err || write_err;
  1250. if (pe->exception_sequence == s->exception_complete_sequence) {
  1251. s->exception_complete_sequence++;
  1252. complete_exception(pe);
  1253. while (!list_empty(&s->out_of_order_list)) {
  1254. pe = list_entry(s->out_of_order_list.next,
  1255. struct dm_snap_pending_exception, out_of_order_entry);
  1256. if (pe->exception_sequence != s->exception_complete_sequence)
  1257. break;
  1258. s->exception_complete_sequence++;
  1259. list_del(&pe->out_of_order_entry);
  1260. complete_exception(pe);
  1261. }
  1262. } else {
  1263. struct list_head *lh;
  1264. struct dm_snap_pending_exception *pe2;
  1265. list_for_each_prev(lh, &s->out_of_order_list) {
  1266. pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
  1267. if (pe2->exception_sequence < pe->exception_sequence)
  1268. break;
  1269. }
  1270. list_add(&pe->out_of_order_entry, lh);
  1271. }
  1272. }
  1273. /*
  1274. * Dispatches the copy operation to kcopyd.
  1275. */
  1276. static void start_copy(struct dm_snap_pending_exception *pe)
  1277. {
  1278. struct dm_snapshot *s = pe->snap;
  1279. struct dm_io_region src, dest;
  1280. struct block_device *bdev = s->origin->bdev;
  1281. sector_t dev_size;
  1282. dev_size = get_dev_size(bdev);
  1283. src.bdev = bdev;
  1284. src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
  1285. src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
  1286. dest.bdev = s->cow->bdev;
  1287. dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
  1288. dest.count = src.count;
  1289. /* Hand over to kcopyd */
  1290. dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
  1291. }
  1292. static void full_bio_end_io(struct bio *bio)
  1293. {
  1294. void *callback_data = bio->bi_private;
  1295. dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
  1296. }
  1297. static void start_full_bio(struct dm_snap_pending_exception *pe,
  1298. struct bio *bio)
  1299. {
  1300. struct dm_snapshot *s = pe->snap;
  1301. void *callback_data;
  1302. pe->full_bio = bio;
  1303. pe->full_bio_end_io = bio->bi_end_io;
  1304. pe->full_bio_private = bio->bi_private;
  1305. callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
  1306. copy_callback, pe);
  1307. bio->bi_end_io = full_bio_end_io;
  1308. bio->bi_private = callback_data;
  1309. generic_make_request(bio);
  1310. }
  1311. static struct dm_snap_pending_exception *
  1312. __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
  1313. {
  1314. struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
  1315. if (!e)
  1316. return NULL;
  1317. return container_of(e, struct dm_snap_pending_exception, e);
  1318. }
  1319. /*
  1320. * Looks to see if this snapshot already has a pending exception
  1321. * for this chunk, otherwise it allocates a new one and inserts
  1322. * it into the pending table.
  1323. *
  1324. * NOTE: a write lock must be held on snap->lock before calling
  1325. * this.
  1326. */
  1327. static struct dm_snap_pending_exception *
  1328. __find_pending_exception(struct dm_snapshot *s,
  1329. struct dm_snap_pending_exception *pe, chunk_t chunk)
  1330. {
  1331. struct dm_snap_pending_exception *pe2;
  1332. pe2 = __lookup_pending_exception(s, chunk);
  1333. if (pe2) {
  1334. free_pending_exception(pe);
  1335. return pe2;
  1336. }
  1337. pe->e.old_chunk = chunk;
  1338. bio_list_init(&pe->origin_bios);
  1339. bio_list_init(&pe->snapshot_bios);
  1340. pe->started = 0;
  1341. pe->full_bio = NULL;
  1342. if (s->store->type->prepare_exception(s->store, &pe->e)) {
  1343. free_pending_exception(pe);
  1344. return NULL;
  1345. }
  1346. pe->exception_sequence = s->exception_start_sequence++;
  1347. dm_insert_exception(&s->pending, &pe->e);
  1348. return pe;
  1349. }
  1350. static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
  1351. struct bio *bio, chunk_t chunk)
  1352. {
  1353. bio->bi_bdev = s->cow->bdev;
  1354. bio->bi_iter.bi_sector =
  1355. chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
  1356. (chunk - e->old_chunk)) +
  1357. (bio->bi_iter.bi_sector & s->store->chunk_mask);
  1358. }
  1359. static int snapshot_map(struct dm_target *ti, struct bio *bio)
  1360. {
  1361. struct dm_exception *e;
  1362. struct dm_snapshot *s = ti->private;
  1363. int r = DM_MAPIO_REMAPPED;
  1364. chunk_t chunk;
  1365. struct dm_snap_pending_exception *pe = NULL;
  1366. init_tracked_chunk(bio);
  1367. if (bio->bi_rw & REQ_FLUSH) {
  1368. bio->bi_bdev = s->cow->bdev;
  1369. return DM_MAPIO_REMAPPED;
  1370. }
  1371. chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
  1372. /* Full snapshots are not usable */
  1373. /* To get here the table must be live so s->active is always set. */
  1374. if (!s->valid)
  1375. return -EIO;
  1376. /* FIXME: should only take write lock if we need
  1377. * to copy an exception */
  1378. down_write(&s->lock);
  1379. if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
  1380. r = -EIO;
  1381. goto out_unlock;
  1382. }
  1383. /* If the block is already remapped - use that, else remap it */
  1384. e = dm_lookup_exception(&s->complete, chunk);
  1385. if (e) {
  1386. remap_exception(s, e, bio, chunk);
  1387. goto out_unlock;
  1388. }
  1389. /*
  1390. * Write to snapshot - higher level takes care of RW/RO
  1391. * flags so we should only get this if we are
  1392. * writeable.
  1393. */
  1394. if (bio_rw(bio) == WRITE) {
  1395. pe = __lookup_pending_exception(s, chunk);
  1396. if (!pe) {
  1397. up_write(&s->lock);
  1398. pe = alloc_pending_exception(s);
  1399. down_write(&s->lock);
  1400. if (!s->valid || s->snapshot_overflowed) {
  1401. free_pending_exception(pe);
  1402. r = -EIO;
  1403. goto out_unlock;
  1404. }
  1405. e = dm_lookup_exception(&s->complete, chunk);
  1406. if (e) {
  1407. free_pending_exception(pe);
  1408. remap_exception(s, e, bio, chunk);
  1409. goto out_unlock;
  1410. }
  1411. pe = __find_pending_exception(s, pe, chunk);
  1412. if (!pe) {
  1413. s->snapshot_overflowed = 1;
  1414. DMERR("Snapshot overflowed: Unable to allocate exception.");
  1415. r = -EIO;
  1416. goto out_unlock;
  1417. }
  1418. }
  1419. remap_exception(s, &pe->e, bio, chunk);
  1420. r = DM_MAPIO_SUBMITTED;
  1421. if (!pe->started &&
  1422. bio->bi_iter.bi_size ==
  1423. (s->store->chunk_size << SECTOR_SHIFT)) {
  1424. pe->started = 1;
  1425. up_write(&s->lock);
  1426. start_full_bio(pe, bio);
  1427. goto out;
  1428. }
  1429. bio_list_add(&pe->snapshot_bios, bio);
  1430. if (!pe->started) {
  1431. /* this is protected by snap->lock */
  1432. pe->started = 1;
  1433. up_write(&s->lock);
  1434. start_copy(pe);
  1435. goto out;
  1436. }
  1437. } else {
  1438. bio->bi_bdev = s->origin->bdev;
  1439. track_chunk(s, bio, chunk);
  1440. }
  1441. out_unlock:
  1442. up_write(&s->lock);
  1443. out:
  1444. return r;
  1445. }
  1446. /*
  1447. * A snapshot-merge target behaves like a combination of a snapshot
  1448. * target and a snapshot-origin target. It only generates new
  1449. * exceptions in other snapshots and not in the one that is being
  1450. * merged.
  1451. *
  1452. * For each chunk, if there is an existing exception, it is used to
  1453. * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
  1454. * which in turn might generate exceptions in other snapshots.
  1455. * If merging is currently taking place on the chunk in question, the
  1456. * I/O is deferred by adding it to s->bios_queued_during_merge.
  1457. */
  1458. static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
  1459. {
  1460. struct dm_exception *e;
  1461. struct dm_snapshot *s = ti->private;
  1462. int r = DM_MAPIO_REMAPPED;
  1463. chunk_t chunk;
  1464. init_tracked_chunk(bio);
  1465. if (bio->bi_rw & REQ_FLUSH) {
  1466. if (!dm_bio_get_target_bio_nr(bio))
  1467. bio->bi_bdev = s->origin->bdev;
  1468. else
  1469. bio->bi_bdev = s->cow->bdev;
  1470. return DM_MAPIO_REMAPPED;
  1471. }
  1472. chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
  1473. down_write(&s->lock);
  1474. /* Full merging snapshots are redirected to the origin */
  1475. if (!s->valid)
  1476. goto redirect_to_origin;
  1477. /* If the block is already remapped - use that */
  1478. e = dm_lookup_exception(&s->complete, chunk);
  1479. if (e) {
  1480. /* Queue writes overlapping with chunks being merged */
  1481. if (bio_rw(bio) == WRITE &&
  1482. chunk >= s->first_merging_chunk &&
  1483. chunk < (s->first_merging_chunk +
  1484. s->num_merging_chunks)) {
  1485. bio->bi_bdev = s->origin->bdev;
  1486. bio_list_add(&s->bios_queued_during_merge, bio);
  1487. r = DM_MAPIO_SUBMITTED;
  1488. goto out_unlock;
  1489. }
  1490. remap_exception(s, e, bio, chunk);
  1491. if (bio_rw(bio) == WRITE)
  1492. track_chunk(s, bio, chunk);
  1493. goto out_unlock;
  1494. }
  1495. redirect_to_origin:
  1496. bio->bi_bdev = s->origin->bdev;
  1497. if (bio_rw(bio) == WRITE) {
  1498. up_write(&s->lock);
  1499. return do_origin(s->origin, bio);
  1500. }
  1501. out_unlock:
  1502. up_write(&s->lock);
  1503. return r;
  1504. }
  1505. static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
  1506. {
  1507. struct dm_snapshot *s = ti->private;
  1508. if (is_bio_tracked(bio))
  1509. stop_tracking_chunk(s, bio);
  1510. return 0;
  1511. }
  1512. static void snapshot_merge_presuspend(struct dm_target *ti)
  1513. {
  1514. struct dm_snapshot *s = ti->private;
  1515. stop_merge(s);
  1516. }
  1517. static int snapshot_preresume(struct dm_target *ti)
  1518. {
  1519. int r = 0;
  1520. struct dm_snapshot *s = ti->private;
  1521. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
  1522. down_read(&_origins_lock);
  1523. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
  1524. if (snap_src && snap_dest) {
  1525. down_read(&snap_src->lock);
  1526. if (s == snap_src) {
  1527. DMERR("Unable to resume snapshot source until "
  1528. "handover completes.");
  1529. r = -EINVAL;
  1530. } else if (!dm_suspended(snap_src->ti)) {
  1531. DMERR("Unable to perform snapshot handover until "
  1532. "source is suspended.");
  1533. r = -EINVAL;
  1534. }
  1535. up_read(&snap_src->lock);
  1536. }
  1537. up_read(&_origins_lock);
  1538. return r;
  1539. }
  1540. static void snapshot_resume(struct dm_target *ti)
  1541. {
  1542. struct dm_snapshot *s = ti->private;
  1543. struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
  1544. struct dm_origin *o;
  1545. struct mapped_device *origin_md = NULL;
  1546. bool must_restart_merging = false;
  1547. down_read(&_origins_lock);
  1548. o = __lookup_dm_origin(s->origin->bdev);
  1549. if (o)
  1550. origin_md = dm_table_get_md(o->ti->table);
  1551. if (!origin_md) {
  1552. (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
  1553. if (snap_merging)
  1554. origin_md = dm_table_get_md(snap_merging->ti->table);
  1555. }
  1556. if (origin_md == dm_table_get_md(ti->table))
  1557. origin_md = NULL;
  1558. if (origin_md) {
  1559. if (dm_hold(origin_md))
  1560. origin_md = NULL;
  1561. }
  1562. up_read(&_origins_lock);
  1563. if (origin_md) {
  1564. dm_internal_suspend_fast(origin_md);
  1565. if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
  1566. must_restart_merging = true;
  1567. stop_merge(snap_merging);
  1568. }
  1569. }
  1570. down_read(&_origins_lock);
  1571. (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
  1572. if (snap_src && snap_dest) {
  1573. down_write(&snap_src->lock);
  1574. down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
  1575. __handover_exceptions(snap_src, snap_dest);
  1576. up_write(&snap_dest->lock);
  1577. up_write(&snap_src->lock);
  1578. }
  1579. up_read(&_origins_lock);
  1580. if (origin_md) {
  1581. if (must_restart_merging)
  1582. start_merge(snap_merging);
  1583. dm_internal_resume_fast(origin_md);
  1584. dm_put(origin_md);
  1585. }
  1586. /* Now we have correct chunk size, reregister */
  1587. reregister_snapshot(s);
  1588. down_write(&s->lock);
  1589. s->active = 1;
  1590. up_write(&s->lock);
  1591. }
  1592. static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
  1593. {
  1594. uint32_t min_chunksize;
  1595. down_read(&_origins_lock);
  1596. min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
  1597. up_read(&_origins_lock);
  1598. return min_chunksize;
  1599. }
  1600. static void snapshot_merge_resume(struct dm_target *ti)
  1601. {
  1602. struct dm_snapshot *s = ti->private;
  1603. /*
  1604. * Handover exceptions from existing snapshot.
  1605. */
  1606. snapshot_resume(ti);
  1607. /*
  1608. * snapshot-merge acts as an origin, so set ti->max_io_len
  1609. */
  1610. ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
  1611. start_merge(s);
  1612. }
  1613. static void snapshot_status(struct dm_target *ti, status_type_t type,
  1614. unsigned status_flags, char *result, unsigned maxlen)
  1615. {
  1616. unsigned sz = 0;
  1617. struct dm_snapshot *snap = ti->private;
  1618. switch (type) {
  1619. case STATUSTYPE_INFO:
  1620. down_write(&snap->lock);
  1621. if (!snap->valid)
  1622. DMEMIT("Invalid");
  1623. else if (snap->merge_failed)
  1624. DMEMIT("Merge failed");
  1625. else if (snap->snapshot_overflowed)
  1626. DMEMIT("Overflow");
  1627. else {
  1628. if (snap->store->type->usage) {
  1629. sector_t total_sectors, sectors_allocated,
  1630. metadata_sectors;
  1631. snap->store->type->usage(snap->store,
  1632. &total_sectors,
  1633. &sectors_allocated,
  1634. &metadata_sectors);
  1635. DMEMIT("%llu/%llu %llu",
  1636. (unsigned long long)sectors_allocated,
  1637. (unsigned long long)total_sectors,
  1638. (unsigned long long)metadata_sectors);
  1639. }
  1640. else
  1641. DMEMIT("Unknown");
  1642. }
  1643. up_write(&snap->lock);
  1644. break;
  1645. case STATUSTYPE_TABLE:
  1646. /*
  1647. * kdevname returns a static pointer so we need
  1648. * to make private copies if the output is to
  1649. * make sense.
  1650. */
  1651. DMEMIT("%s %s", snap->origin->name, snap->cow->name);
  1652. snap->store->type->status(snap->store, type, result + sz,
  1653. maxlen - sz);
  1654. break;
  1655. }
  1656. }
  1657. static int snapshot_iterate_devices(struct dm_target *ti,
  1658. iterate_devices_callout_fn fn, void *data)
  1659. {
  1660. struct dm_snapshot *snap = ti->private;
  1661. int r;
  1662. r = fn(ti, snap->origin, 0, ti->len, data);
  1663. if (!r)
  1664. r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
  1665. return r;
  1666. }
  1667. /*-----------------------------------------------------------------
  1668. * Origin methods
  1669. *---------------------------------------------------------------*/
  1670. /*
  1671. * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
  1672. * supplied bio was ignored. The caller may submit it immediately.
  1673. * (No remapping actually occurs as the origin is always a direct linear
  1674. * map.)
  1675. *
  1676. * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
  1677. * and any supplied bio is added to a list to be submitted once all
  1678. * the necessary exceptions exist.
  1679. */
  1680. static int __origin_write(struct list_head *snapshots, sector_t sector,
  1681. struct bio *bio)
  1682. {
  1683. int r = DM_MAPIO_REMAPPED;
  1684. struct dm_snapshot *snap;
  1685. struct dm_exception *e;
  1686. struct dm_snap_pending_exception *pe;
  1687. struct dm_snap_pending_exception *pe_to_start_now = NULL;
  1688. struct dm_snap_pending_exception *pe_to_start_last = NULL;
  1689. chunk_t chunk;
  1690. /* Do all the snapshots on this origin */
  1691. list_for_each_entry (snap, snapshots, list) {
  1692. /*
  1693. * Don't make new exceptions in a merging snapshot
  1694. * because it has effectively been deleted
  1695. */
  1696. if (dm_target_is_snapshot_merge(snap->ti))
  1697. continue;
  1698. down_write(&snap->lock);
  1699. /* Only deal with valid and active snapshots */
  1700. if (!snap->valid || !snap->active)
  1701. goto next_snapshot;
  1702. /* Nothing to do if writing beyond end of snapshot */
  1703. if (sector >= dm_table_get_size(snap->ti->table))
  1704. goto next_snapshot;
  1705. /*
  1706. * Remember, different snapshots can have
  1707. * different chunk sizes.
  1708. */
  1709. chunk = sector_to_chunk(snap->store, sector);
  1710. /*
  1711. * Check exception table to see if block
  1712. * is already remapped in this snapshot
  1713. * and trigger an exception if not.
  1714. */
  1715. e = dm_lookup_exception(&snap->complete, chunk);
  1716. if (e)
  1717. goto next_snapshot;
  1718. pe = __lookup_pending_exception(snap, chunk);
  1719. if (!pe) {
  1720. up_write(&snap->lock);
  1721. pe = alloc_pending_exception(snap);
  1722. down_write(&snap->lock);
  1723. if (!snap->valid) {
  1724. free_pending_exception(pe);
  1725. goto next_snapshot;
  1726. }
  1727. e = dm_lookup_exception(&snap->complete, chunk);
  1728. if (e) {
  1729. free_pending_exception(pe);
  1730. goto next_snapshot;
  1731. }
  1732. pe = __find_pending_exception(snap, pe, chunk);
  1733. if (!pe) {
  1734. __invalidate_snapshot(snap, -ENOMEM);
  1735. goto next_snapshot;
  1736. }
  1737. }
  1738. r = DM_MAPIO_SUBMITTED;
  1739. /*
  1740. * If an origin bio was supplied, queue it to wait for the
  1741. * completion of this exception, and start this one last,
  1742. * at the end of the function.
  1743. */
  1744. if (bio) {
  1745. bio_list_add(&pe->origin_bios, bio);
  1746. bio = NULL;
  1747. if (!pe->started) {
  1748. pe->started = 1;
  1749. pe_to_start_last = pe;
  1750. }
  1751. }
  1752. if (!pe->started) {
  1753. pe->started = 1;
  1754. pe_to_start_now = pe;
  1755. }
  1756. next_snapshot:
  1757. up_write(&snap->lock);
  1758. if (pe_to_start_now) {
  1759. start_copy(pe_to_start_now);
  1760. pe_to_start_now = NULL;
  1761. }
  1762. }
  1763. /*
  1764. * Submit the exception against which the bio is queued last,
  1765. * to give the other exceptions a head start.
  1766. */
  1767. if (pe_to_start_last)
  1768. start_copy(pe_to_start_last);
  1769. return r;
  1770. }
  1771. /*
  1772. * Called on a write from the origin driver.
  1773. */
  1774. static int do_origin(struct dm_dev *origin, struct bio *bio)
  1775. {
  1776. struct origin *o;
  1777. int r = DM_MAPIO_REMAPPED;
  1778. down_read(&_origins_lock);
  1779. o = __lookup_origin(origin->bdev);
  1780. if (o)
  1781. r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
  1782. up_read(&_origins_lock);
  1783. return r;
  1784. }
  1785. /*
  1786. * Trigger exceptions in all non-merging snapshots.
  1787. *
  1788. * The chunk size of the merging snapshot may be larger than the chunk
  1789. * size of some other snapshot so we may need to reallocate multiple
  1790. * chunks in other snapshots.
  1791. *
  1792. * We scan all the overlapping exceptions in the other snapshots.
  1793. * Returns 1 if anything was reallocated and must be waited for,
  1794. * otherwise returns 0.
  1795. *
  1796. * size must be a multiple of merging_snap's chunk_size.
  1797. */
  1798. static int origin_write_extent(struct dm_snapshot *merging_snap,
  1799. sector_t sector, unsigned size)
  1800. {
  1801. int must_wait = 0;
  1802. sector_t n;
  1803. struct origin *o;
  1804. /*
  1805. * The origin's __minimum_chunk_size() got stored in max_io_len
  1806. * by snapshot_merge_resume().
  1807. */
  1808. down_read(&_origins_lock);
  1809. o = __lookup_origin(merging_snap->origin->bdev);
  1810. for (n = 0; n < size; n += merging_snap->ti->max_io_len)
  1811. if (__origin_write(&o->snapshots, sector + n, NULL) ==
  1812. DM_MAPIO_SUBMITTED)
  1813. must_wait = 1;
  1814. up_read(&_origins_lock);
  1815. return must_wait;
  1816. }
  1817. /*
  1818. * Origin: maps a linear range of a device, with hooks for snapshotting.
  1819. */
  1820. /*
  1821. * Construct an origin mapping: <dev_path>
  1822. * The context for an origin is merely a 'struct dm_dev *'
  1823. * pointing to the real device.
  1824. */
  1825. static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  1826. {
  1827. int r;
  1828. struct dm_origin *o;
  1829. if (argc != 1) {
  1830. ti->error = "origin: incorrect number of arguments";
  1831. return -EINVAL;
  1832. }
  1833. o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
  1834. if (!o) {
  1835. ti->error = "Cannot allocate private origin structure";
  1836. r = -ENOMEM;
  1837. goto bad_alloc;
  1838. }
  1839. r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
  1840. if (r) {
  1841. ti->error = "Cannot get target device";
  1842. goto bad_open;
  1843. }
  1844. o->ti = ti;
  1845. ti->private = o;
  1846. ti->num_flush_bios = 1;
  1847. return 0;
  1848. bad_open:
  1849. kfree(o);
  1850. bad_alloc:
  1851. return r;
  1852. }
  1853. static void origin_dtr(struct dm_target *ti)
  1854. {
  1855. struct dm_origin *o = ti->private;
  1856. dm_put_device(ti, o->dev);
  1857. kfree(o);
  1858. }
  1859. static int origin_map(struct dm_target *ti, struct bio *bio)
  1860. {
  1861. struct dm_origin *o = ti->private;
  1862. unsigned available_sectors;
  1863. bio->bi_bdev = o->dev->bdev;
  1864. if (unlikely(bio->bi_rw & REQ_FLUSH))
  1865. return DM_MAPIO_REMAPPED;
  1866. if (bio_rw(bio) != WRITE)
  1867. return DM_MAPIO_REMAPPED;
  1868. available_sectors = o->split_boundary -
  1869. ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
  1870. if (bio_sectors(bio) > available_sectors)
  1871. dm_accept_partial_bio(bio, available_sectors);
  1872. /* Only tell snapshots if this is a write */
  1873. return do_origin(o->dev, bio);
  1874. }
  1875. /*
  1876. * Set the target "max_io_len" field to the minimum of all the snapshots'
  1877. * chunk sizes.
  1878. */
  1879. static void origin_resume(struct dm_target *ti)
  1880. {
  1881. struct dm_origin *o = ti->private;
  1882. o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
  1883. down_write(&_origins_lock);
  1884. __insert_dm_origin(o);
  1885. up_write(&_origins_lock);
  1886. }
  1887. static void origin_postsuspend(struct dm_target *ti)
  1888. {
  1889. struct dm_origin *o = ti->private;
  1890. down_write(&_origins_lock);
  1891. __remove_dm_origin(o);
  1892. up_write(&_origins_lock);
  1893. }
  1894. static void origin_status(struct dm_target *ti, status_type_t type,
  1895. unsigned status_flags, char *result, unsigned maxlen)
  1896. {
  1897. struct dm_origin *o = ti->private;
  1898. switch (type) {
  1899. case STATUSTYPE_INFO:
  1900. result[0] = '\0';
  1901. break;
  1902. case STATUSTYPE_TABLE:
  1903. snprintf(result, maxlen, "%s", o->dev->name);
  1904. break;
  1905. }
  1906. }
  1907. static int origin_iterate_devices(struct dm_target *ti,
  1908. iterate_devices_callout_fn fn, void *data)
  1909. {
  1910. struct dm_origin *o = ti->private;
  1911. return fn(ti, o->dev, 0, ti->len, data);
  1912. }
  1913. static struct target_type origin_target = {
  1914. .name = "snapshot-origin",
  1915. .version = {1, 9, 0},
  1916. .module = THIS_MODULE,
  1917. .ctr = origin_ctr,
  1918. .dtr = origin_dtr,
  1919. .map = origin_map,
  1920. .resume = origin_resume,
  1921. .postsuspend = origin_postsuspend,
  1922. .status = origin_status,
  1923. .iterate_devices = origin_iterate_devices,
  1924. };
  1925. static struct target_type snapshot_target = {
  1926. .name = "snapshot",
  1927. .version = {1, 14, 0},
  1928. .module = THIS_MODULE,
  1929. .ctr = snapshot_ctr,
  1930. .dtr = snapshot_dtr,
  1931. .map = snapshot_map,
  1932. .end_io = snapshot_end_io,
  1933. .preresume = snapshot_preresume,
  1934. .resume = snapshot_resume,
  1935. .status = snapshot_status,
  1936. .iterate_devices = snapshot_iterate_devices,
  1937. };
  1938. static struct target_type merge_target = {
  1939. .name = dm_snapshot_merge_target_name,
  1940. .version = {1, 3, 0},
  1941. .module = THIS_MODULE,
  1942. .ctr = snapshot_ctr,
  1943. .dtr = snapshot_dtr,
  1944. .map = snapshot_merge_map,
  1945. .end_io = snapshot_end_io,
  1946. .presuspend = snapshot_merge_presuspend,
  1947. .preresume = snapshot_preresume,
  1948. .resume = snapshot_merge_resume,
  1949. .status = snapshot_status,
  1950. .iterate_devices = snapshot_iterate_devices,
  1951. };
  1952. static int __init dm_snapshot_init(void)
  1953. {
  1954. int r;
  1955. r = dm_exception_store_init();
  1956. if (r) {
  1957. DMERR("Failed to initialize exception stores");
  1958. return r;
  1959. }
  1960. r = dm_register_target(&snapshot_target);
  1961. if (r < 0) {
  1962. DMERR("snapshot target register failed %d", r);
  1963. goto bad_register_snapshot_target;
  1964. }
  1965. r = dm_register_target(&origin_target);
  1966. if (r < 0) {
  1967. DMERR("Origin target register failed %d", r);
  1968. goto bad_register_origin_target;
  1969. }
  1970. r = dm_register_target(&merge_target);
  1971. if (r < 0) {
  1972. DMERR("Merge target register failed %d", r);
  1973. goto bad_register_merge_target;
  1974. }
  1975. r = init_origin_hash();
  1976. if (r) {
  1977. DMERR("init_origin_hash failed.");
  1978. goto bad_origin_hash;
  1979. }
  1980. exception_cache = KMEM_CACHE(dm_exception, 0);
  1981. if (!exception_cache) {
  1982. DMERR("Couldn't create exception cache.");
  1983. r = -ENOMEM;
  1984. goto bad_exception_cache;
  1985. }
  1986. pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
  1987. if (!pending_cache) {
  1988. DMERR("Couldn't create pending cache.");
  1989. r = -ENOMEM;
  1990. goto bad_pending_cache;
  1991. }
  1992. return 0;
  1993. bad_pending_cache:
  1994. kmem_cache_destroy(exception_cache);
  1995. bad_exception_cache:
  1996. exit_origin_hash();
  1997. bad_origin_hash:
  1998. dm_unregister_target(&merge_target);
  1999. bad_register_merge_target:
  2000. dm_unregister_target(&origin_target);
  2001. bad_register_origin_target:
  2002. dm_unregister_target(&snapshot_target);
  2003. bad_register_snapshot_target:
  2004. dm_exception_store_exit();
  2005. return r;
  2006. }
  2007. static void __exit dm_snapshot_exit(void)
  2008. {
  2009. dm_unregister_target(&snapshot_target);
  2010. dm_unregister_target(&origin_target);
  2011. dm_unregister_target(&merge_target);
  2012. exit_origin_hash();
  2013. kmem_cache_destroy(pending_cache);
  2014. kmem_cache_destroy(exception_cache);
  2015. dm_exception_store_exit();
  2016. }
  2017. /* Module hooks */
  2018. module_init(dm_snapshot_init);
  2019. module_exit(dm_snapshot_exit);
  2020. MODULE_DESCRIPTION(DM_NAME " snapshot target");
  2021. MODULE_AUTHOR("Joe Thornber");
  2022. MODULE_LICENSE("GPL");
  2023. MODULE_ALIAS("dm-snapshot-origin");
  2024. MODULE_ALIAS("dm-snapshot-merge");