dm.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755
  1. /*
  2. * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-core.h"
  8. #include "dm-rq.h"
  9. #include "dm-uevent.h"
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/mutex.h>
  13. #include <linux/blkpg.h>
  14. #include <linux/bio.h>
  15. #include <linux/mempool.h>
  16. #include <linux/slab.h>
  17. #include <linux/idr.h>
  18. #include <linux/hdreg.h>
  19. #include <linux/delay.h>
  20. #include <linux/wait.h>
  21. #include <linux/pr.h>
  22. #define DM_MSG_PREFIX "core"
  23. #ifdef CONFIG_PRINTK
  24. /*
  25. * ratelimit state to be used in DMXXX_LIMIT().
  26. */
  27. DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
  28. DEFAULT_RATELIMIT_INTERVAL,
  29. DEFAULT_RATELIMIT_BURST);
  30. EXPORT_SYMBOL(dm_ratelimit_state);
  31. #endif
  32. /*
  33. * Cookies are numeric values sent with CHANGE and REMOVE
  34. * uevents while resuming, removing or renaming the device.
  35. */
  36. #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
  37. #define DM_COOKIE_LENGTH 24
  38. static const char *_name = DM_NAME;
  39. static unsigned int major = 0;
  40. static unsigned int _major = 0;
  41. static DEFINE_IDR(_minor_idr);
  42. static DEFINE_SPINLOCK(_minor_lock);
  43. static void do_deferred_remove(struct work_struct *w);
  44. static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
  45. static struct workqueue_struct *deferred_remove_workqueue;
  46. /*
  47. * One of these is allocated per bio.
  48. */
  49. struct dm_io {
  50. struct mapped_device *md;
  51. int error;
  52. atomic_t io_count;
  53. struct bio *bio;
  54. unsigned long start_time;
  55. spinlock_t endio_lock;
  56. struct dm_stats_aux stats_aux;
  57. };
  58. #define MINOR_ALLOCED ((void *)-1)
  59. /*
  60. * Bits for the md->flags field.
  61. */
  62. #define DMF_BLOCK_IO_FOR_SUSPEND 0
  63. #define DMF_SUSPENDED 1
  64. #define DMF_FROZEN 2
  65. #define DMF_FREEING 3
  66. #define DMF_DELETING 4
  67. #define DMF_NOFLUSH_SUSPENDING 5
  68. #define DMF_DEFERRED_REMOVE 6
  69. #define DMF_SUSPENDED_INTERNALLY 7
  70. #define DM_NUMA_NODE NUMA_NO_NODE
  71. static int dm_numa_node = DM_NUMA_NODE;
  72. /*
  73. * For mempools pre-allocation at the table loading time.
  74. */
  75. struct dm_md_mempools {
  76. mempool_t *io_pool;
  77. mempool_t *rq_pool;
  78. struct bio_set *bs;
  79. };
  80. struct table_device {
  81. struct list_head list;
  82. atomic_t count;
  83. struct dm_dev dm_dev;
  84. };
  85. static struct kmem_cache *_io_cache;
  86. static struct kmem_cache *_rq_tio_cache;
  87. static struct kmem_cache *_rq_cache;
  88. /*
  89. * Bio-based DM's mempools' reserved IOs set by the user.
  90. */
  91. #define RESERVED_BIO_BASED_IOS 16
  92. static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
  93. static int __dm_get_module_param_int(int *module_param, int min, int max)
  94. {
  95. int param = ACCESS_ONCE(*module_param);
  96. int modified_param = 0;
  97. bool modified = true;
  98. if (param < min)
  99. modified_param = min;
  100. else if (param > max)
  101. modified_param = max;
  102. else
  103. modified = false;
  104. if (modified) {
  105. (void)cmpxchg(module_param, param, modified_param);
  106. param = modified_param;
  107. }
  108. return param;
  109. }
  110. unsigned __dm_get_module_param(unsigned *module_param,
  111. unsigned def, unsigned max)
  112. {
  113. unsigned param = ACCESS_ONCE(*module_param);
  114. unsigned modified_param = 0;
  115. if (!param)
  116. modified_param = def;
  117. else if (param > max)
  118. modified_param = max;
  119. if (modified_param) {
  120. (void)cmpxchg(module_param, param, modified_param);
  121. param = modified_param;
  122. }
  123. return param;
  124. }
  125. unsigned dm_get_reserved_bio_based_ios(void)
  126. {
  127. return __dm_get_module_param(&reserved_bio_based_ios,
  128. RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
  129. }
  130. EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
  131. static unsigned dm_get_numa_node(void)
  132. {
  133. return __dm_get_module_param_int(&dm_numa_node,
  134. DM_NUMA_NODE, num_online_nodes() - 1);
  135. }
  136. static int __init local_init(void)
  137. {
  138. int r = -ENOMEM;
  139. /* allocate a slab for the dm_ios */
  140. _io_cache = KMEM_CACHE(dm_io, 0);
  141. if (!_io_cache)
  142. return r;
  143. _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
  144. if (!_rq_tio_cache)
  145. goto out_free_io_cache;
  146. _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
  147. __alignof__(struct request), 0, NULL);
  148. if (!_rq_cache)
  149. goto out_free_rq_tio_cache;
  150. r = dm_uevent_init();
  151. if (r)
  152. goto out_free_rq_cache;
  153. deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
  154. if (!deferred_remove_workqueue) {
  155. r = -ENOMEM;
  156. goto out_uevent_exit;
  157. }
  158. _major = major;
  159. r = register_blkdev(_major, _name);
  160. if (r < 0)
  161. goto out_free_workqueue;
  162. if (!_major)
  163. _major = r;
  164. return 0;
  165. out_free_workqueue:
  166. destroy_workqueue(deferred_remove_workqueue);
  167. out_uevent_exit:
  168. dm_uevent_exit();
  169. out_free_rq_cache:
  170. kmem_cache_destroy(_rq_cache);
  171. out_free_rq_tio_cache:
  172. kmem_cache_destroy(_rq_tio_cache);
  173. out_free_io_cache:
  174. kmem_cache_destroy(_io_cache);
  175. return r;
  176. }
  177. static void local_exit(void)
  178. {
  179. flush_scheduled_work();
  180. destroy_workqueue(deferred_remove_workqueue);
  181. kmem_cache_destroy(_rq_cache);
  182. kmem_cache_destroy(_rq_tio_cache);
  183. kmem_cache_destroy(_io_cache);
  184. unregister_blkdev(_major, _name);
  185. dm_uevent_exit();
  186. _major = 0;
  187. DMINFO("cleaned up");
  188. }
  189. static int (*_inits[])(void) __initdata = {
  190. local_init,
  191. dm_target_init,
  192. dm_linear_init,
  193. dm_stripe_init,
  194. dm_io_init,
  195. dm_kcopyd_init,
  196. dm_interface_init,
  197. dm_statistics_init,
  198. };
  199. static void (*_exits[])(void) = {
  200. local_exit,
  201. dm_target_exit,
  202. dm_linear_exit,
  203. dm_stripe_exit,
  204. dm_io_exit,
  205. dm_kcopyd_exit,
  206. dm_interface_exit,
  207. dm_statistics_exit,
  208. };
  209. static int __init dm_init(void)
  210. {
  211. const int count = ARRAY_SIZE(_inits);
  212. int r, i;
  213. for (i = 0; i < count; i++) {
  214. r = _inits[i]();
  215. if (r)
  216. goto bad;
  217. }
  218. return 0;
  219. bad:
  220. while (i--)
  221. _exits[i]();
  222. return r;
  223. }
  224. static void __exit dm_exit(void)
  225. {
  226. int i = ARRAY_SIZE(_exits);
  227. while (i--)
  228. _exits[i]();
  229. /*
  230. * Should be empty by this point.
  231. */
  232. idr_destroy(&_minor_idr);
  233. }
  234. /*
  235. * Block device functions
  236. */
  237. int dm_deleting_md(struct mapped_device *md)
  238. {
  239. return test_bit(DMF_DELETING, &md->flags);
  240. }
  241. static int dm_blk_open(struct block_device *bdev, fmode_t mode)
  242. {
  243. struct mapped_device *md;
  244. spin_lock(&_minor_lock);
  245. md = bdev->bd_disk->private_data;
  246. if (!md)
  247. goto out;
  248. if (test_bit(DMF_FREEING, &md->flags) ||
  249. dm_deleting_md(md)) {
  250. md = NULL;
  251. goto out;
  252. }
  253. dm_get(md);
  254. atomic_inc(&md->open_count);
  255. out:
  256. spin_unlock(&_minor_lock);
  257. return md ? 0 : -ENXIO;
  258. }
  259. static void dm_blk_close(struct gendisk *disk, fmode_t mode)
  260. {
  261. struct mapped_device *md;
  262. spin_lock(&_minor_lock);
  263. md = disk->private_data;
  264. if (WARN_ON(!md))
  265. goto out;
  266. if (atomic_dec_and_test(&md->open_count) &&
  267. (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
  268. queue_work(deferred_remove_workqueue, &deferred_remove_work);
  269. dm_put(md);
  270. out:
  271. spin_unlock(&_minor_lock);
  272. }
  273. int dm_open_count(struct mapped_device *md)
  274. {
  275. return atomic_read(&md->open_count);
  276. }
  277. /*
  278. * Guarantees nothing is using the device before it's deleted.
  279. */
  280. int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
  281. {
  282. int r = 0;
  283. spin_lock(&_minor_lock);
  284. if (dm_open_count(md)) {
  285. r = -EBUSY;
  286. if (mark_deferred)
  287. set_bit(DMF_DEFERRED_REMOVE, &md->flags);
  288. } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
  289. r = -EEXIST;
  290. else
  291. set_bit(DMF_DELETING, &md->flags);
  292. spin_unlock(&_minor_lock);
  293. return r;
  294. }
  295. int dm_cancel_deferred_remove(struct mapped_device *md)
  296. {
  297. int r = 0;
  298. spin_lock(&_minor_lock);
  299. if (test_bit(DMF_DELETING, &md->flags))
  300. r = -EBUSY;
  301. else
  302. clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
  303. spin_unlock(&_minor_lock);
  304. return r;
  305. }
  306. static void do_deferred_remove(struct work_struct *w)
  307. {
  308. dm_deferred_remove();
  309. }
  310. sector_t dm_get_size(struct mapped_device *md)
  311. {
  312. return get_capacity(md->disk);
  313. }
  314. struct request_queue *dm_get_md_queue(struct mapped_device *md)
  315. {
  316. return md->queue;
  317. }
  318. struct dm_stats *dm_get_stats(struct mapped_device *md)
  319. {
  320. return &md->stats;
  321. }
  322. static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  323. {
  324. struct mapped_device *md = bdev->bd_disk->private_data;
  325. return dm_get_geometry(md, geo);
  326. }
  327. static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
  328. struct block_device **bdev,
  329. fmode_t *mode)
  330. {
  331. struct dm_target *tgt;
  332. struct dm_table *map;
  333. int srcu_idx, r;
  334. retry:
  335. r = -ENOTTY;
  336. map = dm_get_live_table(md, &srcu_idx);
  337. if (!map || !dm_table_get_size(map))
  338. goto out;
  339. /* We only support devices that have a single target */
  340. if (dm_table_get_num_targets(map) != 1)
  341. goto out;
  342. tgt = dm_table_get_target(map, 0);
  343. if (!tgt->type->prepare_ioctl)
  344. goto out;
  345. if (dm_suspended_md(md)) {
  346. r = -EAGAIN;
  347. goto out;
  348. }
  349. r = tgt->type->prepare_ioctl(tgt, bdev, mode);
  350. if (r < 0)
  351. goto out;
  352. bdgrab(*bdev);
  353. dm_put_live_table(md, srcu_idx);
  354. return r;
  355. out:
  356. dm_put_live_table(md, srcu_idx);
  357. if (r == -ENOTCONN && !fatal_signal_pending(current)) {
  358. msleep(10);
  359. goto retry;
  360. }
  361. return r;
  362. }
  363. static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
  364. unsigned int cmd, unsigned long arg)
  365. {
  366. struct mapped_device *md = bdev->bd_disk->private_data;
  367. int r;
  368. r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
  369. if (r < 0)
  370. return r;
  371. if (r > 0) {
  372. /*
  373. * Target determined this ioctl is being issued against
  374. * a logical partition of the parent bdev; so extra
  375. * validation is needed.
  376. */
  377. r = scsi_verify_blk_ioctl(NULL, cmd);
  378. if (r)
  379. goto out;
  380. }
  381. r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
  382. out:
  383. bdput(bdev);
  384. return r;
  385. }
  386. static struct dm_io *alloc_io(struct mapped_device *md)
  387. {
  388. return mempool_alloc(md->io_pool, GFP_NOIO);
  389. }
  390. static void free_io(struct mapped_device *md, struct dm_io *io)
  391. {
  392. mempool_free(io, md->io_pool);
  393. }
  394. static void free_tio(struct dm_target_io *tio)
  395. {
  396. bio_put(&tio->clone);
  397. }
  398. int md_in_flight(struct mapped_device *md)
  399. {
  400. return atomic_read(&md->pending[READ]) +
  401. atomic_read(&md->pending[WRITE]);
  402. }
  403. static void start_io_acct(struct dm_io *io)
  404. {
  405. struct mapped_device *md = io->md;
  406. struct bio *bio = io->bio;
  407. int cpu;
  408. int rw = bio_data_dir(bio);
  409. io->start_time = jiffies;
  410. cpu = part_stat_lock();
  411. part_round_stats(cpu, &dm_disk(md)->part0);
  412. part_stat_unlock();
  413. atomic_set(&dm_disk(md)->part0.in_flight[rw],
  414. atomic_inc_return(&md->pending[rw]));
  415. if (unlikely(dm_stats_used(&md->stats)))
  416. dm_stats_account_io(&md->stats, bio_data_dir(bio),
  417. bio->bi_iter.bi_sector, bio_sectors(bio),
  418. false, 0, &io->stats_aux);
  419. }
  420. static void end_io_acct(struct dm_io *io)
  421. {
  422. struct mapped_device *md = io->md;
  423. struct bio *bio = io->bio;
  424. unsigned long duration = jiffies - io->start_time;
  425. int pending;
  426. int rw = bio_data_dir(bio);
  427. generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
  428. if (unlikely(dm_stats_used(&md->stats)))
  429. dm_stats_account_io(&md->stats, bio_data_dir(bio),
  430. bio->bi_iter.bi_sector, bio_sectors(bio),
  431. true, duration, &io->stats_aux);
  432. /*
  433. * After this is decremented the bio must not be touched if it is
  434. * a flush.
  435. */
  436. pending = atomic_dec_return(&md->pending[rw]);
  437. atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
  438. pending += atomic_read(&md->pending[rw^0x1]);
  439. /* nudge anyone waiting on suspend queue */
  440. if (!pending)
  441. wake_up(&md->wait);
  442. }
  443. /*
  444. * Add the bio to the list of deferred io.
  445. */
  446. static void queue_io(struct mapped_device *md, struct bio *bio)
  447. {
  448. unsigned long flags;
  449. spin_lock_irqsave(&md->deferred_lock, flags);
  450. bio_list_add(&md->deferred, bio);
  451. spin_unlock_irqrestore(&md->deferred_lock, flags);
  452. queue_work(md->wq, &md->work);
  453. }
  454. /*
  455. * Everyone (including functions in this file), should use this
  456. * function to access the md->map field, and make sure they call
  457. * dm_put_live_table() when finished.
  458. */
  459. struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
  460. {
  461. *srcu_idx = srcu_read_lock(&md->io_barrier);
  462. return srcu_dereference(md->map, &md->io_barrier);
  463. }
  464. void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
  465. {
  466. srcu_read_unlock(&md->io_barrier, srcu_idx);
  467. }
  468. void dm_sync_table(struct mapped_device *md)
  469. {
  470. synchronize_srcu(&md->io_barrier);
  471. synchronize_rcu_expedited();
  472. }
  473. /*
  474. * A fast alternative to dm_get_live_table/dm_put_live_table.
  475. * The caller must not block between these two functions.
  476. */
  477. static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
  478. {
  479. rcu_read_lock();
  480. return rcu_dereference(md->map);
  481. }
  482. static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
  483. {
  484. rcu_read_unlock();
  485. }
  486. /*
  487. * Open a table device so we can use it as a map destination.
  488. */
  489. static int open_table_device(struct table_device *td, dev_t dev,
  490. struct mapped_device *md)
  491. {
  492. static char *_claim_ptr = "I belong to device-mapper";
  493. struct block_device *bdev;
  494. int r;
  495. BUG_ON(td->dm_dev.bdev);
  496. bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
  497. if (IS_ERR(bdev))
  498. return PTR_ERR(bdev);
  499. r = bd_link_disk_holder(bdev, dm_disk(md));
  500. if (r) {
  501. blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
  502. return r;
  503. }
  504. td->dm_dev.bdev = bdev;
  505. return 0;
  506. }
  507. /*
  508. * Close a table device that we've been using.
  509. */
  510. static void close_table_device(struct table_device *td, struct mapped_device *md)
  511. {
  512. if (!td->dm_dev.bdev)
  513. return;
  514. bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
  515. blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
  516. td->dm_dev.bdev = NULL;
  517. }
  518. static struct table_device *find_table_device(struct list_head *l, dev_t dev,
  519. fmode_t mode) {
  520. struct table_device *td;
  521. list_for_each_entry(td, l, list)
  522. if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
  523. return td;
  524. return NULL;
  525. }
  526. int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
  527. struct dm_dev **result) {
  528. int r;
  529. struct table_device *td;
  530. mutex_lock(&md->table_devices_lock);
  531. td = find_table_device(&md->table_devices, dev, mode);
  532. if (!td) {
  533. td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
  534. if (!td) {
  535. mutex_unlock(&md->table_devices_lock);
  536. return -ENOMEM;
  537. }
  538. td->dm_dev.mode = mode;
  539. td->dm_dev.bdev = NULL;
  540. if ((r = open_table_device(td, dev, md))) {
  541. mutex_unlock(&md->table_devices_lock);
  542. kfree(td);
  543. return r;
  544. }
  545. format_dev_t(td->dm_dev.name, dev);
  546. atomic_set(&td->count, 0);
  547. list_add(&td->list, &md->table_devices);
  548. }
  549. atomic_inc(&td->count);
  550. mutex_unlock(&md->table_devices_lock);
  551. *result = &td->dm_dev;
  552. return 0;
  553. }
  554. EXPORT_SYMBOL_GPL(dm_get_table_device);
  555. void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
  556. {
  557. struct table_device *td = container_of(d, struct table_device, dm_dev);
  558. mutex_lock(&md->table_devices_lock);
  559. if (atomic_dec_and_test(&td->count)) {
  560. close_table_device(td, md);
  561. list_del(&td->list);
  562. kfree(td);
  563. }
  564. mutex_unlock(&md->table_devices_lock);
  565. }
  566. EXPORT_SYMBOL(dm_put_table_device);
  567. static void free_table_devices(struct list_head *devices)
  568. {
  569. struct list_head *tmp, *next;
  570. list_for_each_safe(tmp, next, devices) {
  571. struct table_device *td = list_entry(tmp, struct table_device, list);
  572. DMWARN("dm_destroy: %s still exists with %d references",
  573. td->dm_dev.name, atomic_read(&td->count));
  574. kfree(td);
  575. }
  576. }
  577. /*
  578. * Get the geometry associated with a dm device
  579. */
  580. int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
  581. {
  582. *geo = md->geometry;
  583. return 0;
  584. }
  585. /*
  586. * Set the geometry of a device.
  587. */
  588. int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
  589. {
  590. sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
  591. if (geo->start > sz) {
  592. DMWARN("Start sector is beyond the geometry limits.");
  593. return -EINVAL;
  594. }
  595. md->geometry = *geo;
  596. return 0;
  597. }
  598. /*-----------------------------------------------------------------
  599. * CRUD START:
  600. * A more elegant soln is in the works that uses the queue
  601. * merge fn, unfortunately there are a couple of changes to
  602. * the block layer that I want to make for this. So in the
  603. * interests of getting something for people to use I give
  604. * you this clearly demarcated crap.
  605. *---------------------------------------------------------------*/
  606. static int __noflush_suspending(struct mapped_device *md)
  607. {
  608. return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
  609. }
  610. /*
  611. * Decrements the number of outstanding ios that a bio has been
  612. * cloned into, completing the original io if necc.
  613. */
  614. static void dec_pending(struct dm_io *io, int error)
  615. {
  616. unsigned long flags;
  617. int io_error;
  618. struct bio *bio;
  619. struct mapped_device *md = io->md;
  620. /* Push-back supersedes any I/O errors */
  621. if (unlikely(error)) {
  622. spin_lock_irqsave(&io->endio_lock, flags);
  623. if (!(io->error > 0 && __noflush_suspending(md)))
  624. io->error = error;
  625. spin_unlock_irqrestore(&io->endio_lock, flags);
  626. }
  627. if (atomic_dec_and_test(&io->io_count)) {
  628. if (io->error == DM_ENDIO_REQUEUE) {
  629. /*
  630. * Target requested pushing back the I/O.
  631. */
  632. spin_lock_irqsave(&md->deferred_lock, flags);
  633. if (__noflush_suspending(md))
  634. bio_list_add_head(&md->deferred, io->bio);
  635. else
  636. /* noflush suspend was interrupted. */
  637. io->error = -EIO;
  638. spin_unlock_irqrestore(&md->deferred_lock, flags);
  639. }
  640. io_error = io->error;
  641. bio = io->bio;
  642. end_io_acct(io);
  643. free_io(md, io);
  644. if (io_error == DM_ENDIO_REQUEUE)
  645. return;
  646. if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
  647. /*
  648. * Preflush done for flush with data, reissue
  649. * without REQ_PREFLUSH.
  650. */
  651. bio->bi_opf &= ~REQ_PREFLUSH;
  652. queue_io(md, bio);
  653. } else {
  654. /* done with normal IO or empty flush */
  655. trace_block_bio_complete(md->queue, bio, io_error);
  656. bio->bi_error = io_error;
  657. bio_endio(bio);
  658. }
  659. }
  660. }
  661. void disable_write_same(struct mapped_device *md)
  662. {
  663. struct queue_limits *limits = dm_get_queue_limits(md);
  664. /* device doesn't really support WRITE SAME, disable it */
  665. limits->max_write_same_sectors = 0;
  666. }
  667. static void clone_endio(struct bio *bio)
  668. {
  669. int error = bio->bi_error;
  670. int r = error;
  671. struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
  672. struct dm_io *io = tio->io;
  673. struct mapped_device *md = tio->io->md;
  674. dm_endio_fn endio = tio->ti->type->end_io;
  675. if (endio) {
  676. r = endio(tio->ti, bio, error);
  677. if (r < 0 || r == DM_ENDIO_REQUEUE)
  678. /*
  679. * error and requeue request are handled
  680. * in dec_pending().
  681. */
  682. error = r;
  683. else if (r == DM_ENDIO_INCOMPLETE)
  684. /* The target will handle the io */
  685. return;
  686. else if (r) {
  687. DMWARN("unimplemented target endio return value: %d", r);
  688. BUG();
  689. }
  690. }
  691. if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
  692. !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
  693. disable_write_same(md);
  694. free_tio(tio);
  695. dec_pending(io, error);
  696. }
  697. /*
  698. * Return maximum size of I/O possible at the supplied sector up to the current
  699. * target boundary.
  700. */
  701. static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
  702. {
  703. sector_t target_offset = dm_target_offset(ti, sector);
  704. return ti->len - target_offset;
  705. }
  706. static sector_t max_io_len(sector_t sector, struct dm_target *ti)
  707. {
  708. sector_t len = max_io_len_target_boundary(sector, ti);
  709. sector_t offset, max_len;
  710. /*
  711. * Does the target need to split even further?
  712. */
  713. if (ti->max_io_len) {
  714. offset = dm_target_offset(ti, sector);
  715. if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
  716. max_len = sector_div(offset, ti->max_io_len);
  717. else
  718. max_len = offset & (ti->max_io_len - 1);
  719. max_len = ti->max_io_len - max_len;
  720. if (len > max_len)
  721. len = max_len;
  722. }
  723. return len;
  724. }
  725. int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
  726. {
  727. if (len > UINT_MAX) {
  728. DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
  729. (unsigned long long)len, UINT_MAX);
  730. ti->error = "Maximum size of target IO is too large";
  731. return -EINVAL;
  732. }
  733. ti->max_io_len = (uint32_t) len;
  734. return 0;
  735. }
  736. EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
  737. static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
  738. void **kaddr, pfn_t *pfn, long size)
  739. {
  740. struct mapped_device *md = bdev->bd_disk->private_data;
  741. struct dm_table *map;
  742. struct dm_target *ti;
  743. int srcu_idx;
  744. long len, ret = -EIO;
  745. map = dm_get_live_table(md, &srcu_idx);
  746. if (!map)
  747. goto out;
  748. ti = dm_table_find_target(map, sector);
  749. if (!dm_target_is_valid(ti))
  750. goto out;
  751. len = max_io_len(sector, ti) << SECTOR_SHIFT;
  752. size = min(len, size);
  753. if (ti->type->direct_access)
  754. ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
  755. out:
  756. dm_put_live_table(md, srcu_idx);
  757. return min(ret, size);
  758. }
  759. /*
  760. * A target may call dm_accept_partial_bio only from the map routine. It is
  761. * allowed for all bio types except REQ_PREFLUSH.
  762. *
  763. * dm_accept_partial_bio informs the dm that the target only wants to process
  764. * additional n_sectors sectors of the bio and the rest of the data should be
  765. * sent in a next bio.
  766. *
  767. * A diagram that explains the arithmetics:
  768. * +--------------------+---------------+-------+
  769. * | 1 | 2 | 3 |
  770. * +--------------------+---------------+-------+
  771. *
  772. * <-------------- *tio->len_ptr --------------->
  773. * <------- bi_size ------->
  774. * <-- n_sectors -->
  775. *
  776. * Region 1 was already iterated over with bio_advance or similar function.
  777. * (it may be empty if the target doesn't use bio_advance)
  778. * Region 2 is the remaining bio size that the target wants to process.
  779. * (it may be empty if region 1 is non-empty, although there is no reason
  780. * to make it empty)
  781. * The target requires that region 3 is to be sent in the next bio.
  782. *
  783. * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
  784. * the partially processed part (the sum of regions 1+2) must be the same for all
  785. * copies of the bio.
  786. */
  787. void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
  788. {
  789. struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
  790. unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
  791. BUG_ON(bio->bi_opf & REQ_PREFLUSH);
  792. BUG_ON(bi_size > *tio->len_ptr);
  793. BUG_ON(n_sectors > bi_size);
  794. *tio->len_ptr -= bi_size - n_sectors;
  795. bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
  796. }
  797. EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
  798. static void __map_bio(struct dm_target_io *tio)
  799. {
  800. int r;
  801. sector_t sector;
  802. struct bio *clone = &tio->clone;
  803. struct dm_target *ti = tio->ti;
  804. clone->bi_end_io = clone_endio;
  805. /*
  806. * Map the clone. If r == 0 we don't need to do
  807. * anything, the target has assumed ownership of
  808. * this io.
  809. */
  810. atomic_inc(&tio->io->io_count);
  811. sector = clone->bi_iter.bi_sector;
  812. r = ti->type->map(ti, clone);
  813. if (r == DM_MAPIO_REMAPPED) {
  814. /* the bio has been remapped so dispatch it */
  815. trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
  816. tio->io->bio->bi_bdev->bd_dev, sector);
  817. generic_make_request(clone);
  818. } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
  819. /* error the io and bail out, or requeue it if needed */
  820. dec_pending(tio->io, r);
  821. free_tio(tio);
  822. } else if (r != DM_MAPIO_SUBMITTED) {
  823. DMWARN("unimplemented target map return value: %d", r);
  824. BUG();
  825. }
  826. }
  827. struct clone_info {
  828. struct mapped_device *md;
  829. struct dm_table *map;
  830. struct bio *bio;
  831. struct dm_io *io;
  832. sector_t sector;
  833. unsigned sector_count;
  834. };
  835. static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
  836. {
  837. bio->bi_iter.bi_sector = sector;
  838. bio->bi_iter.bi_size = to_bytes(len);
  839. }
  840. /*
  841. * Creates a bio that consists of range of complete bvecs.
  842. */
  843. static int clone_bio(struct dm_target_io *tio, struct bio *bio,
  844. sector_t sector, unsigned len)
  845. {
  846. struct bio *clone = &tio->clone;
  847. __bio_clone_fast(clone, bio);
  848. if (bio_integrity(bio)) {
  849. int r = bio_integrity_clone(clone, bio, GFP_NOIO);
  850. if (r < 0)
  851. return r;
  852. }
  853. bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
  854. clone->bi_iter.bi_size = to_bytes(len);
  855. if (bio_integrity(bio))
  856. bio_integrity_trim(clone, 0, len);
  857. return 0;
  858. }
  859. static struct dm_target_io *alloc_tio(struct clone_info *ci,
  860. struct dm_target *ti,
  861. unsigned target_bio_nr)
  862. {
  863. struct dm_target_io *tio;
  864. struct bio *clone;
  865. clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
  866. tio = container_of(clone, struct dm_target_io, clone);
  867. tio->io = ci->io;
  868. tio->ti = ti;
  869. tio->target_bio_nr = target_bio_nr;
  870. return tio;
  871. }
  872. static void __clone_and_map_simple_bio(struct clone_info *ci,
  873. struct dm_target *ti,
  874. unsigned target_bio_nr, unsigned *len)
  875. {
  876. struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
  877. struct bio *clone = &tio->clone;
  878. tio->len_ptr = len;
  879. __bio_clone_fast(clone, ci->bio);
  880. if (len)
  881. bio_setup_sector(clone, ci->sector, *len);
  882. __map_bio(tio);
  883. }
  884. static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
  885. unsigned num_bios, unsigned *len)
  886. {
  887. unsigned target_bio_nr;
  888. for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
  889. __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
  890. }
  891. static int __send_empty_flush(struct clone_info *ci)
  892. {
  893. unsigned target_nr = 0;
  894. struct dm_target *ti;
  895. BUG_ON(bio_has_data(ci->bio));
  896. while ((ti = dm_table_get_target(ci->map, target_nr++)))
  897. __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
  898. return 0;
  899. }
  900. static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
  901. sector_t sector, unsigned *len)
  902. {
  903. struct bio *bio = ci->bio;
  904. struct dm_target_io *tio;
  905. unsigned target_bio_nr;
  906. unsigned num_target_bios = 1;
  907. int r = 0;
  908. /*
  909. * Does the target want to receive duplicate copies of the bio?
  910. */
  911. if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
  912. num_target_bios = ti->num_write_bios(ti, bio);
  913. for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
  914. tio = alloc_tio(ci, ti, target_bio_nr);
  915. tio->len_ptr = len;
  916. r = clone_bio(tio, bio, sector, *len);
  917. if (r < 0) {
  918. free_tio(tio);
  919. break;
  920. }
  921. __map_bio(tio);
  922. }
  923. return r;
  924. }
  925. typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
  926. static unsigned get_num_discard_bios(struct dm_target *ti)
  927. {
  928. return ti->num_discard_bios;
  929. }
  930. static unsigned get_num_write_same_bios(struct dm_target *ti)
  931. {
  932. return ti->num_write_same_bios;
  933. }
  934. typedef bool (*is_split_required_fn)(struct dm_target *ti);
  935. static bool is_split_required_for_discard(struct dm_target *ti)
  936. {
  937. return ti->split_discard_bios;
  938. }
  939. static int __send_changing_extent_only(struct clone_info *ci,
  940. get_num_bios_fn get_num_bios,
  941. is_split_required_fn is_split_required)
  942. {
  943. struct dm_target *ti;
  944. unsigned len;
  945. unsigned num_bios;
  946. do {
  947. ti = dm_table_find_target(ci->map, ci->sector);
  948. if (!dm_target_is_valid(ti))
  949. return -EIO;
  950. /*
  951. * Even though the device advertised support for this type of
  952. * request, that does not mean every target supports it, and
  953. * reconfiguration might also have changed that since the
  954. * check was performed.
  955. */
  956. num_bios = get_num_bios ? get_num_bios(ti) : 0;
  957. if (!num_bios)
  958. return -EOPNOTSUPP;
  959. if (is_split_required && !is_split_required(ti))
  960. len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
  961. else
  962. len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
  963. __send_duplicate_bios(ci, ti, num_bios, &len);
  964. ci->sector += len;
  965. } while (ci->sector_count -= len);
  966. return 0;
  967. }
  968. static int __send_discard(struct clone_info *ci)
  969. {
  970. return __send_changing_extent_only(ci, get_num_discard_bios,
  971. is_split_required_for_discard);
  972. }
  973. static int __send_write_same(struct clone_info *ci)
  974. {
  975. return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
  976. }
  977. /*
  978. * Select the correct strategy for processing a non-flush bio.
  979. */
  980. static int __split_and_process_non_flush(struct clone_info *ci)
  981. {
  982. struct bio *bio = ci->bio;
  983. struct dm_target *ti;
  984. unsigned len;
  985. int r;
  986. if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
  987. return __send_discard(ci);
  988. else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
  989. return __send_write_same(ci);
  990. ti = dm_table_find_target(ci->map, ci->sector);
  991. if (!dm_target_is_valid(ti))
  992. return -EIO;
  993. len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
  994. r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
  995. if (r < 0)
  996. return r;
  997. ci->sector += len;
  998. ci->sector_count -= len;
  999. return 0;
  1000. }
  1001. /*
  1002. * Entry point to split a bio into clones and submit them to the targets.
  1003. */
  1004. static void __split_and_process_bio(struct mapped_device *md,
  1005. struct dm_table *map, struct bio *bio)
  1006. {
  1007. struct clone_info ci;
  1008. int error = 0;
  1009. if (unlikely(!map)) {
  1010. bio_io_error(bio);
  1011. return;
  1012. }
  1013. ci.map = map;
  1014. ci.md = md;
  1015. ci.io = alloc_io(md);
  1016. ci.io->error = 0;
  1017. atomic_set(&ci.io->io_count, 1);
  1018. ci.io->bio = bio;
  1019. ci.io->md = md;
  1020. spin_lock_init(&ci.io->endio_lock);
  1021. ci.sector = bio->bi_iter.bi_sector;
  1022. start_io_acct(ci.io);
  1023. if (bio->bi_opf & REQ_PREFLUSH) {
  1024. ci.bio = &ci.md->flush_bio;
  1025. ci.sector_count = 0;
  1026. error = __send_empty_flush(&ci);
  1027. /* dec_pending submits any data associated with flush */
  1028. } else {
  1029. ci.bio = bio;
  1030. ci.sector_count = bio_sectors(bio);
  1031. while (ci.sector_count && !error)
  1032. error = __split_and_process_non_flush(&ci);
  1033. }
  1034. /* drop the extra reference count */
  1035. dec_pending(ci.io, error);
  1036. }
  1037. /*-----------------------------------------------------------------
  1038. * CRUD END
  1039. *---------------------------------------------------------------*/
  1040. /*
  1041. * The request function that just remaps the bio built up by
  1042. * dm_merge_bvec.
  1043. */
  1044. static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
  1045. {
  1046. int rw = bio_data_dir(bio);
  1047. struct mapped_device *md = q->queuedata;
  1048. int srcu_idx;
  1049. struct dm_table *map;
  1050. map = dm_get_live_table(md, &srcu_idx);
  1051. generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
  1052. /* if we're suspended, we have to queue this io for later */
  1053. if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
  1054. dm_put_live_table(md, srcu_idx);
  1055. if (!(bio->bi_opf & REQ_RAHEAD))
  1056. queue_io(md, bio);
  1057. else
  1058. bio_io_error(bio);
  1059. return BLK_QC_T_NONE;
  1060. }
  1061. __split_and_process_bio(md, map, bio);
  1062. dm_put_live_table(md, srcu_idx);
  1063. return BLK_QC_T_NONE;
  1064. }
  1065. static int dm_any_congested(void *congested_data, int bdi_bits)
  1066. {
  1067. int r = bdi_bits;
  1068. struct mapped_device *md = congested_data;
  1069. struct dm_table *map;
  1070. if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
  1071. if (dm_request_based(md)) {
  1072. /*
  1073. * With request-based DM we only need to check the
  1074. * top-level queue for congestion.
  1075. */
  1076. r = md->queue->backing_dev_info.wb.state & bdi_bits;
  1077. } else {
  1078. map = dm_get_live_table_fast(md);
  1079. if (map)
  1080. r = dm_table_any_congested(map, bdi_bits);
  1081. dm_put_live_table_fast(md);
  1082. }
  1083. }
  1084. return r;
  1085. }
  1086. /*-----------------------------------------------------------------
  1087. * An IDR is used to keep track of allocated minor numbers.
  1088. *---------------------------------------------------------------*/
  1089. static void free_minor(int minor)
  1090. {
  1091. spin_lock(&_minor_lock);
  1092. idr_remove(&_minor_idr, minor);
  1093. spin_unlock(&_minor_lock);
  1094. }
  1095. /*
  1096. * See if the device with a specific minor # is free.
  1097. */
  1098. static int specific_minor(int minor)
  1099. {
  1100. int r;
  1101. if (minor >= (1 << MINORBITS))
  1102. return -EINVAL;
  1103. idr_preload(GFP_KERNEL);
  1104. spin_lock(&_minor_lock);
  1105. r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
  1106. spin_unlock(&_minor_lock);
  1107. idr_preload_end();
  1108. if (r < 0)
  1109. return r == -ENOSPC ? -EBUSY : r;
  1110. return 0;
  1111. }
  1112. static int next_free_minor(int *minor)
  1113. {
  1114. int r;
  1115. idr_preload(GFP_KERNEL);
  1116. spin_lock(&_minor_lock);
  1117. r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
  1118. spin_unlock(&_minor_lock);
  1119. idr_preload_end();
  1120. if (r < 0)
  1121. return r;
  1122. *minor = r;
  1123. return 0;
  1124. }
  1125. static const struct block_device_operations dm_blk_dops;
  1126. static void dm_wq_work(struct work_struct *work);
  1127. void dm_init_md_queue(struct mapped_device *md)
  1128. {
  1129. /*
  1130. * Request-based dm devices cannot be stacked on top of bio-based dm
  1131. * devices. The type of this dm device may not have been decided yet.
  1132. * The type is decided at the first table loading time.
  1133. * To prevent problematic device stacking, clear the queue flag
  1134. * for request stacking support until then.
  1135. *
  1136. * This queue is new, so no concurrency on the queue_flags.
  1137. */
  1138. queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
  1139. /*
  1140. * Initialize data that will only be used by a non-blk-mq DM queue
  1141. * - must do so here (in alloc_dev callchain) before queue is used
  1142. */
  1143. md->queue->queuedata = md;
  1144. md->queue->backing_dev_info.congested_data = md;
  1145. }
  1146. void dm_init_normal_md_queue(struct mapped_device *md)
  1147. {
  1148. md->use_blk_mq = false;
  1149. dm_init_md_queue(md);
  1150. /*
  1151. * Initialize aspects of queue that aren't relevant for blk-mq
  1152. */
  1153. md->queue->backing_dev_info.congested_fn = dm_any_congested;
  1154. blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
  1155. }
  1156. static void cleanup_mapped_device(struct mapped_device *md)
  1157. {
  1158. if (md->wq)
  1159. destroy_workqueue(md->wq);
  1160. if (md->kworker_task)
  1161. kthread_stop(md->kworker_task);
  1162. mempool_destroy(md->io_pool);
  1163. mempool_destroy(md->rq_pool);
  1164. if (md->bs)
  1165. bioset_free(md->bs);
  1166. if (md->disk) {
  1167. spin_lock(&_minor_lock);
  1168. md->disk->private_data = NULL;
  1169. spin_unlock(&_minor_lock);
  1170. del_gendisk(md->disk);
  1171. put_disk(md->disk);
  1172. }
  1173. if (md->queue)
  1174. blk_cleanup_queue(md->queue);
  1175. cleanup_srcu_struct(&md->io_barrier);
  1176. if (md->bdev) {
  1177. bdput(md->bdev);
  1178. md->bdev = NULL;
  1179. }
  1180. dm_mq_cleanup_mapped_device(md);
  1181. }
  1182. /*
  1183. * Allocate and initialise a blank device with a given minor.
  1184. */
  1185. static struct mapped_device *alloc_dev(int minor)
  1186. {
  1187. int r, numa_node_id = dm_get_numa_node();
  1188. struct mapped_device *md;
  1189. void *old_md;
  1190. md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
  1191. if (!md) {
  1192. DMWARN("unable to allocate device, out of memory.");
  1193. return NULL;
  1194. }
  1195. if (!try_module_get(THIS_MODULE))
  1196. goto bad_module_get;
  1197. /* get a minor number for the dev */
  1198. if (minor == DM_ANY_MINOR)
  1199. r = next_free_minor(&minor);
  1200. else
  1201. r = specific_minor(minor);
  1202. if (r < 0)
  1203. goto bad_minor;
  1204. r = init_srcu_struct(&md->io_barrier);
  1205. if (r < 0)
  1206. goto bad_io_barrier;
  1207. md->numa_node_id = numa_node_id;
  1208. md->use_blk_mq = dm_use_blk_mq_default();
  1209. md->init_tio_pdu = false;
  1210. md->type = DM_TYPE_NONE;
  1211. mutex_init(&md->suspend_lock);
  1212. mutex_init(&md->type_lock);
  1213. mutex_init(&md->table_devices_lock);
  1214. spin_lock_init(&md->deferred_lock);
  1215. atomic_set(&md->holders, 1);
  1216. atomic_set(&md->open_count, 0);
  1217. atomic_set(&md->event_nr, 0);
  1218. atomic_set(&md->uevent_seq, 0);
  1219. INIT_LIST_HEAD(&md->uevent_list);
  1220. INIT_LIST_HEAD(&md->table_devices);
  1221. spin_lock_init(&md->uevent_lock);
  1222. md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
  1223. if (!md->queue)
  1224. goto bad;
  1225. dm_init_md_queue(md);
  1226. md->disk = alloc_disk_node(1, numa_node_id);
  1227. if (!md->disk)
  1228. goto bad;
  1229. atomic_set(&md->pending[0], 0);
  1230. atomic_set(&md->pending[1], 0);
  1231. init_waitqueue_head(&md->wait);
  1232. INIT_WORK(&md->work, dm_wq_work);
  1233. init_waitqueue_head(&md->eventq);
  1234. init_completion(&md->kobj_holder.completion);
  1235. md->kworker_task = NULL;
  1236. md->disk->major = _major;
  1237. md->disk->first_minor = minor;
  1238. md->disk->fops = &dm_blk_dops;
  1239. md->disk->queue = md->queue;
  1240. md->disk->private_data = md;
  1241. sprintf(md->disk->disk_name, "dm-%d", minor);
  1242. add_disk(md->disk);
  1243. format_dev_t(md->name, MKDEV(_major, minor));
  1244. md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
  1245. if (!md->wq)
  1246. goto bad;
  1247. md->bdev = bdget_disk(md->disk, 0);
  1248. if (!md->bdev)
  1249. goto bad;
  1250. bio_init(&md->flush_bio, NULL, 0);
  1251. md->flush_bio.bi_bdev = md->bdev;
  1252. md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
  1253. dm_stats_init(&md->stats);
  1254. /* Populate the mapping, nobody knows we exist yet */
  1255. spin_lock(&_minor_lock);
  1256. old_md = idr_replace(&_minor_idr, md, minor);
  1257. spin_unlock(&_minor_lock);
  1258. BUG_ON(old_md != MINOR_ALLOCED);
  1259. return md;
  1260. bad:
  1261. cleanup_mapped_device(md);
  1262. bad_io_barrier:
  1263. free_minor(minor);
  1264. bad_minor:
  1265. module_put(THIS_MODULE);
  1266. bad_module_get:
  1267. kfree(md);
  1268. return NULL;
  1269. }
  1270. static void unlock_fs(struct mapped_device *md);
  1271. static void free_dev(struct mapped_device *md)
  1272. {
  1273. int minor = MINOR(disk_devt(md->disk));
  1274. unlock_fs(md);
  1275. cleanup_mapped_device(md);
  1276. free_table_devices(&md->table_devices);
  1277. dm_stats_cleanup(&md->stats);
  1278. free_minor(minor);
  1279. module_put(THIS_MODULE);
  1280. kfree(md);
  1281. }
  1282. static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
  1283. {
  1284. struct dm_md_mempools *p = dm_table_get_md_mempools(t);
  1285. if (md->bs) {
  1286. /* The md already has necessary mempools. */
  1287. if (dm_table_bio_based(t)) {
  1288. /*
  1289. * Reload bioset because front_pad may have changed
  1290. * because a different table was loaded.
  1291. */
  1292. bioset_free(md->bs);
  1293. md->bs = p->bs;
  1294. p->bs = NULL;
  1295. }
  1296. /*
  1297. * There's no need to reload with request-based dm
  1298. * because the size of front_pad doesn't change.
  1299. * Note for future: If you are to reload bioset,
  1300. * prep-ed requests in the queue may refer
  1301. * to bio from the old bioset, so you must walk
  1302. * through the queue to unprep.
  1303. */
  1304. goto out;
  1305. }
  1306. BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
  1307. md->io_pool = p->io_pool;
  1308. p->io_pool = NULL;
  1309. md->rq_pool = p->rq_pool;
  1310. p->rq_pool = NULL;
  1311. md->bs = p->bs;
  1312. p->bs = NULL;
  1313. out:
  1314. /* mempool bind completed, no longer need any mempools in the table */
  1315. dm_table_free_md_mempools(t);
  1316. }
  1317. /*
  1318. * Bind a table to the device.
  1319. */
  1320. static void event_callback(void *context)
  1321. {
  1322. unsigned long flags;
  1323. LIST_HEAD(uevents);
  1324. struct mapped_device *md = (struct mapped_device *) context;
  1325. spin_lock_irqsave(&md->uevent_lock, flags);
  1326. list_splice_init(&md->uevent_list, &uevents);
  1327. spin_unlock_irqrestore(&md->uevent_lock, flags);
  1328. dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
  1329. atomic_inc(&md->event_nr);
  1330. wake_up(&md->eventq);
  1331. }
  1332. /*
  1333. * Protected by md->suspend_lock obtained by dm_swap_table().
  1334. */
  1335. static void __set_size(struct mapped_device *md, sector_t size)
  1336. {
  1337. set_capacity(md->disk, size);
  1338. i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
  1339. }
  1340. /*
  1341. * Returns old map, which caller must destroy.
  1342. */
  1343. static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
  1344. struct queue_limits *limits)
  1345. {
  1346. struct dm_table *old_map;
  1347. struct request_queue *q = md->queue;
  1348. sector_t size;
  1349. lockdep_assert_held(&md->suspend_lock);
  1350. size = dm_table_get_size(t);
  1351. /*
  1352. * Wipe any geometry if the size of the table changed.
  1353. */
  1354. if (size != dm_get_size(md))
  1355. memset(&md->geometry, 0, sizeof(md->geometry));
  1356. __set_size(md, size);
  1357. dm_table_event_callback(t, event_callback, md);
  1358. /*
  1359. * The queue hasn't been stopped yet, if the old table type wasn't
  1360. * for request-based during suspension. So stop it to prevent
  1361. * I/O mapping before resume.
  1362. * This must be done before setting the queue restrictions,
  1363. * because request-based dm may be run just after the setting.
  1364. */
  1365. if (dm_table_request_based(t)) {
  1366. dm_stop_queue(q);
  1367. /*
  1368. * Leverage the fact that request-based DM targets are
  1369. * immutable singletons and establish md->immutable_target
  1370. * - used to optimize both dm_request_fn and dm_mq_queue_rq
  1371. */
  1372. md->immutable_target = dm_table_get_immutable_target(t);
  1373. }
  1374. __bind_mempools(md, t);
  1375. old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
  1376. rcu_assign_pointer(md->map, (void *)t);
  1377. md->immutable_target_type = dm_table_get_immutable_target_type(t);
  1378. dm_table_set_restrictions(t, q, limits);
  1379. if (old_map)
  1380. dm_sync_table(md);
  1381. return old_map;
  1382. }
  1383. /*
  1384. * Returns unbound table for the caller to free.
  1385. */
  1386. static struct dm_table *__unbind(struct mapped_device *md)
  1387. {
  1388. struct dm_table *map = rcu_dereference_protected(md->map, 1);
  1389. if (!map)
  1390. return NULL;
  1391. dm_table_event_callback(map, NULL, NULL);
  1392. RCU_INIT_POINTER(md->map, NULL);
  1393. dm_sync_table(md);
  1394. return map;
  1395. }
  1396. /*
  1397. * Constructor for a new device.
  1398. */
  1399. int dm_create(int minor, struct mapped_device **result)
  1400. {
  1401. struct mapped_device *md;
  1402. md = alloc_dev(minor);
  1403. if (!md)
  1404. return -ENXIO;
  1405. dm_sysfs_init(md);
  1406. *result = md;
  1407. return 0;
  1408. }
  1409. /*
  1410. * Functions to manage md->type.
  1411. * All are required to hold md->type_lock.
  1412. */
  1413. void dm_lock_md_type(struct mapped_device *md)
  1414. {
  1415. mutex_lock(&md->type_lock);
  1416. }
  1417. void dm_unlock_md_type(struct mapped_device *md)
  1418. {
  1419. mutex_unlock(&md->type_lock);
  1420. }
  1421. void dm_set_md_type(struct mapped_device *md, unsigned type)
  1422. {
  1423. BUG_ON(!mutex_is_locked(&md->type_lock));
  1424. md->type = type;
  1425. }
  1426. unsigned dm_get_md_type(struct mapped_device *md)
  1427. {
  1428. return md->type;
  1429. }
  1430. struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
  1431. {
  1432. return md->immutable_target_type;
  1433. }
  1434. /*
  1435. * The queue_limits are only valid as long as you have a reference
  1436. * count on 'md'.
  1437. */
  1438. struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
  1439. {
  1440. BUG_ON(!atomic_read(&md->holders));
  1441. return &md->queue->limits;
  1442. }
  1443. EXPORT_SYMBOL_GPL(dm_get_queue_limits);
  1444. /*
  1445. * Setup the DM device's queue based on md's type
  1446. */
  1447. int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
  1448. {
  1449. int r;
  1450. unsigned type = dm_get_md_type(md);
  1451. switch (type) {
  1452. case DM_TYPE_REQUEST_BASED:
  1453. r = dm_old_init_request_queue(md);
  1454. if (r) {
  1455. DMERR("Cannot initialize queue for request-based mapped device");
  1456. return r;
  1457. }
  1458. break;
  1459. case DM_TYPE_MQ_REQUEST_BASED:
  1460. r = dm_mq_init_request_queue(md, t);
  1461. if (r) {
  1462. DMERR("Cannot initialize queue for request-based dm-mq mapped device");
  1463. return r;
  1464. }
  1465. break;
  1466. case DM_TYPE_BIO_BASED:
  1467. case DM_TYPE_DAX_BIO_BASED:
  1468. dm_init_normal_md_queue(md);
  1469. blk_queue_make_request(md->queue, dm_make_request);
  1470. /*
  1471. * DM handles splitting bios as needed. Free the bio_split bioset
  1472. * since it won't be used (saves 1 process per bio-based DM device).
  1473. */
  1474. bioset_free(md->queue->bio_split);
  1475. md->queue->bio_split = NULL;
  1476. if (type == DM_TYPE_DAX_BIO_BASED)
  1477. queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
  1478. break;
  1479. }
  1480. return 0;
  1481. }
  1482. struct mapped_device *dm_get_md(dev_t dev)
  1483. {
  1484. struct mapped_device *md;
  1485. unsigned minor = MINOR(dev);
  1486. if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
  1487. return NULL;
  1488. spin_lock(&_minor_lock);
  1489. md = idr_find(&_minor_idr, minor);
  1490. if (md) {
  1491. if ((md == MINOR_ALLOCED ||
  1492. (MINOR(disk_devt(dm_disk(md))) != minor) ||
  1493. dm_deleting_md(md) ||
  1494. test_bit(DMF_FREEING, &md->flags))) {
  1495. md = NULL;
  1496. goto out;
  1497. }
  1498. dm_get(md);
  1499. }
  1500. out:
  1501. spin_unlock(&_minor_lock);
  1502. return md;
  1503. }
  1504. EXPORT_SYMBOL_GPL(dm_get_md);
  1505. void *dm_get_mdptr(struct mapped_device *md)
  1506. {
  1507. return md->interface_ptr;
  1508. }
  1509. void dm_set_mdptr(struct mapped_device *md, void *ptr)
  1510. {
  1511. md->interface_ptr = ptr;
  1512. }
  1513. void dm_get(struct mapped_device *md)
  1514. {
  1515. atomic_inc(&md->holders);
  1516. BUG_ON(test_bit(DMF_FREEING, &md->flags));
  1517. }
  1518. int dm_hold(struct mapped_device *md)
  1519. {
  1520. spin_lock(&_minor_lock);
  1521. if (test_bit(DMF_FREEING, &md->flags)) {
  1522. spin_unlock(&_minor_lock);
  1523. return -EBUSY;
  1524. }
  1525. dm_get(md);
  1526. spin_unlock(&_minor_lock);
  1527. return 0;
  1528. }
  1529. EXPORT_SYMBOL_GPL(dm_hold);
  1530. const char *dm_device_name(struct mapped_device *md)
  1531. {
  1532. return md->name;
  1533. }
  1534. EXPORT_SYMBOL_GPL(dm_device_name);
  1535. static void __dm_destroy(struct mapped_device *md, bool wait)
  1536. {
  1537. struct request_queue *q = dm_get_md_queue(md);
  1538. struct dm_table *map;
  1539. int srcu_idx;
  1540. might_sleep();
  1541. spin_lock(&_minor_lock);
  1542. idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
  1543. set_bit(DMF_FREEING, &md->flags);
  1544. spin_unlock(&_minor_lock);
  1545. blk_set_queue_dying(q);
  1546. if (dm_request_based(md) && md->kworker_task)
  1547. kthread_flush_worker(&md->kworker);
  1548. /*
  1549. * Take suspend_lock so that presuspend and postsuspend methods
  1550. * do not race with internal suspend.
  1551. */
  1552. mutex_lock(&md->suspend_lock);
  1553. map = dm_get_live_table(md, &srcu_idx);
  1554. if (!dm_suspended_md(md)) {
  1555. dm_table_presuspend_targets(map);
  1556. dm_table_postsuspend_targets(map);
  1557. }
  1558. /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
  1559. dm_put_live_table(md, srcu_idx);
  1560. mutex_unlock(&md->suspend_lock);
  1561. /*
  1562. * Rare, but there may be I/O requests still going to complete,
  1563. * for example. Wait for all references to disappear.
  1564. * No one should increment the reference count of the mapped_device,
  1565. * after the mapped_device state becomes DMF_FREEING.
  1566. */
  1567. if (wait)
  1568. while (atomic_read(&md->holders))
  1569. msleep(1);
  1570. else if (atomic_read(&md->holders))
  1571. DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
  1572. dm_device_name(md), atomic_read(&md->holders));
  1573. dm_sysfs_exit(md);
  1574. dm_table_destroy(__unbind(md));
  1575. free_dev(md);
  1576. }
  1577. void dm_destroy(struct mapped_device *md)
  1578. {
  1579. __dm_destroy(md, true);
  1580. }
  1581. void dm_destroy_immediate(struct mapped_device *md)
  1582. {
  1583. __dm_destroy(md, false);
  1584. }
  1585. void dm_put(struct mapped_device *md)
  1586. {
  1587. atomic_dec(&md->holders);
  1588. }
  1589. EXPORT_SYMBOL_GPL(dm_put);
  1590. static int dm_wait_for_completion(struct mapped_device *md, long task_state)
  1591. {
  1592. int r = 0;
  1593. DEFINE_WAIT(wait);
  1594. while (1) {
  1595. prepare_to_wait(&md->wait, &wait, task_state);
  1596. if (!md_in_flight(md))
  1597. break;
  1598. if (signal_pending_state(task_state, current)) {
  1599. r = -EINTR;
  1600. break;
  1601. }
  1602. io_schedule();
  1603. }
  1604. finish_wait(&md->wait, &wait);
  1605. return r;
  1606. }
  1607. /*
  1608. * Process the deferred bios
  1609. */
  1610. static void dm_wq_work(struct work_struct *work)
  1611. {
  1612. struct mapped_device *md = container_of(work, struct mapped_device,
  1613. work);
  1614. struct bio *c;
  1615. int srcu_idx;
  1616. struct dm_table *map;
  1617. map = dm_get_live_table(md, &srcu_idx);
  1618. while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
  1619. spin_lock_irq(&md->deferred_lock);
  1620. c = bio_list_pop(&md->deferred);
  1621. spin_unlock_irq(&md->deferred_lock);
  1622. if (!c)
  1623. break;
  1624. if (dm_request_based(md))
  1625. generic_make_request(c);
  1626. else
  1627. __split_and_process_bio(md, map, c);
  1628. }
  1629. dm_put_live_table(md, srcu_idx);
  1630. }
  1631. static void dm_queue_flush(struct mapped_device *md)
  1632. {
  1633. clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
  1634. smp_mb__after_atomic();
  1635. queue_work(md->wq, &md->work);
  1636. }
  1637. /*
  1638. * Swap in a new table, returning the old one for the caller to destroy.
  1639. */
  1640. struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
  1641. {
  1642. struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
  1643. struct queue_limits limits;
  1644. int r;
  1645. mutex_lock(&md->suspend_lock);
  1646. /* device must be suspended */
  1647. if (!dm_suspended_md(md))
  1648. goto out;
  1649. /*
  1650. * If the new table has no data devices, retain the existing limits.
  1651. * This helps multipath with queue_if_no_path if all paths disappear,
  1652. * then new I/O is queued based on these limits, and then some paths
  1653. * reappear.
  1654. */
  1655. if (dm_table_has_no_data_devices(table)) {
  1656. live_map = dm_get_live_table_fast(md);
  1657. if (live_map)
  1658. limits = md->queue->limits;
  1659. dm_put_live_table_fast(md);
  1660. }
  1661. if (!live_map) {
  1662. r = dm_calculate_queue_limits(table, &limits);
  1663. if (r) {
  1664. map = ERR_PTR(r);
  1665. goto out;
  1666. }
  1667. }
  1668. map = __bind(md, table, &limits);
  1669. out:
  1670. mutex_unlock(&md->suspend_lock);
  1671. return map;
  1672. }
  1673. /*
  1674. * Functions to lock and unlock any filesystem running on the
  1675. * device.
  1676. */
  1677. static int lock_fs(struct mapped_device *md)
  1678. {
  1679. int r;
  1680. WARN_ON(md->frozen_sb);
  1681. md->frozen_sb = freeze_bdev(md->bdev);
  1682. if (IS_ERR(md->frozen_sb)) {
  1683. r = PTR_ERR(md->frozen_sb);
  1684. md->frozen_sb = NULL;
  1685. return r;
  1686. }
  1687. set_bit(DMF_FROZEN, &md->flags);
  1688. return 0;
  1689. }
  1690. static void unlock_fs(struct mapped_device *md)
  1691. {
  1692. if (!test_bit(DMF_FROZEN, &md->flags))
  1693. return;
  1694. thaw_bdev(md->bdev, md->frozen_sb);
  1695. md->frozen_sb = NULL;
  1696. clear_bit(DMF_FROZEN, &md->flags);
  1697. }
  1698. /*
  1699. * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
  1700. * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
  1701. * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
  1702. *
  1703. * If __dm_suspend returns 0, the device is completely quiescent
  1704. * now. There is no request-processing activity. All new requests
  1705. * are being added to md->deferred list.
  1706. *
  1707. * Caller must hold md->suspend_lock
  1708. */
  1709. static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
  1710. unsigned suspend_flags, long task_state,
  1711. int dmf_suspended_flag)
  1712. {
  1713. bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
  1714. bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
  1715. int r;
  1716. lockdep_assert_held(&md->suspend_lock);
  1717. /*
  1718. * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
  1719. * This flag is cleared before dm_suspend returns.
  1720. */
  1721. if (noflush)
  1722. set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
  1723. /*
  1724. * This gets reverted if there's an error later and the targets
  1725. * provide the .presuspend_undo hook.
  1726. */
  1727. dm_table_presuspend_targets(map);
  1728. /*
  1729. * Flush I/O to the device.
  1730. * Any I/O submitted after lock_fs() may not be flushed.
  1731. * noflush takes precedence over do_lockfs.
  1732. * (lock_fs() flushes I/Os and waits for them to complete.)
  1733. */
  1734. if (!noflush && do_lockfs) {
  1735. r = lock_fs(md);
  1736. if (r) {
  1737. dm_table_presuspend_undo_targets(map);
  1738. return r;
  1739. }
  1740. }
  1741. /*
  1742. * Here we must make sure that no processes are submitting requests
  1743. * to target drivers i.e. no one may be executing
  1744. * __split_and_process_bio. This is called from dm_request and
  1745. * dm_wq_work.
  1746. *
  1747. * To get all processes out of __split_and_process_bio in dm_request,
  1748. * we take the write lock. To prevent any process from reentering
  1749. * __split_and_process_bio from dm_request and quiesce the thread
  1750. * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
  1751. * flush_workqueue(md->wq).
  1752. */
  1753. set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
  1754. if (map)
  1755. synchronize_srcu(&md->io_barrier);
  1756. /*
  1757. * Stop md->queue before flushing md->wq in case request-based
  1758. * dm defers requests to md->wq from md->queue.
  1759. */
  1760. if (dm_request_based(md)) {
  1761. dm_stop_queue(md->queue);
  1762. if (md->kworker_task)
  1763. kthread_flush_worker(&md->kworker);
  1764. }
  1765. flush_workqueue(md->wq);
  1766. /*
  1767. * At this point no more requests are entering target request routines.
  1768. * We call dm_wait_for_completion to wait for all existing requests
  1769. * to finish.
  1770. */
  1771. r = dm_wait_for_completion(md, task_state);
  1772. if (!r)
  1773. set_bit(dmf_suspended_flag, &md->flags);
  1774. if (noflush)
  1775. clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
  1776. if (map)
  1777. synchronize_srcu(&md->io_barrier);
  1778. /* were we interrupted ? */
  1779. if (r < 0) {
  1780. dm_queue_flush(md);
  1781. if (dm_request_based(md))
  1782. dm_start_queue(md->queue);
  1783. unlock_fs(md);
  1784. dm_table_presuspend_undo_targets(map);
  1785. /* pushback list is already flushed, so skip flush */
  1786. }
  1787. return r;
  1788. }
  1789. /*
  1790. * We need to be able to change a mapping table under a mounted
  1791. * filesystem. For example we might want to move some data in
  1792. * the background. Before the table can be swapped with
  1793. * dm_bind_table, dm_suspend must be called to flush any in
  1794. * flight bios and ensure that any further io gets deferred.
  1795. */
  1796. /*
  1797. * Suspend mechanism in request-based dm.
  1798. *
  1799. * 1. Flush all I/Os by lock_fs() if needed.
  1800. * 2. Stop dispatching any I/O by stopping the request_queue.
  1801. * 3. Wait for all in-flight I/Os to be completed or requeued.
  1802. *
  1803. * To abort suspend, start the request_queue.
  1804. */
  1805. int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
  1806. {
  1807. struct dm_table *map = NULL;
  1808. int r = 0;
  1809. retry:
  1810. mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
  1811. if (dm_suspended_md(md)) {
  1812. r = -EINVAL;
  1813. goto out_unlock;
  1814. }
  1815. if (dm_suspended_internally_md(md)) {
  1816. /* already internally suspended, wait for internal resume */
  1817. mutex_unlock(&md->suspend_lock);
  1818. r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
  1819. if (r)
  1820. return r;
  1821. goto retry;
  1822. }
  1823. map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
  1824. r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
  1825. if (r)
  1826. goto out_unlock;
  1827. dm_table_postsuspend_targets(map);
  1828. out_unlock:
  1829. mutex_unlock(&md->suspend_lock);
  1830. return r;
  1831. }
  1832. static int __dm_resume(struct mapped_device *md, struct dm_table *map)
  1833. {
  1834. if (map) {
  1835. int r = dm_table_resume_targets(map);
  1836. if (r)
  1837. return r;
  1838. }
  1839. dm_queue_flush(md);
  1840. /*
  1841. * Flushing deferred I/Os must be done after targets are resumed
  1842. * so that mapping of targets can work correctly.
  1843. * Request-based dm is queueing the deferred I/Os in its request_queue.
  1844. */
  1845. if (dm_request_based(md))
  1846. dm_start_queue(md->queue);
  1847. unlock_fs(md);
  1848. return 0;
  1849. }
  1850. int dm_resume(struct mapped_device *md)
  1851. {
  1852. int r;
  1853. struct dm_table *map = NULL;
  1854. retry:
  1855. r = -EINVAL;
  1856. mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
  1857. if (!dm_suspended_md(md))
  1858. goto out;
  1859. if (dm_suspended_internally_md(md)) {
  1860. /* already internally suspended, wait for internal resume */
  1861. mutex_unlock(&md->suspend_lock);
  1862. r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
  1863. if (r)
  1864. return r;
  1865. goto retry;
  1866. }
  1867. map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
  1868. if (!map || !dm_table_get_size(map))
  1869. goto out;
  1870. r = __dm_resume(md, map);
  1871. if (r)
  1872. goto out;
  1873. clear_bit(DMF_SUSPENDED, &md->flags);
  1874. out:
  1875. mutex_unlock(&md->suspend_lock);
  1876. return r;
  1877. }
  1878. /*
  1879. * Internal suspend/resume works like userspace-driven suspend. It waits
  1880. * until all bios finish and prevents issuing new bios to the target drivers.
  1881. * It may be used only from the kernel.
  1882. */
  1883. static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
  1884. {
  1885. struct dm_table *map = NULL;
  1886. if (md->internal_suspend_count++)
  1887. return; /* nested internal suspend */
  1888. if (dm_suspended_md(md)) {
  1889. set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
  1890. return; /* nest suspend */
  1891. }
  1892. map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
  1893. /*
  1894. * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
  1895. * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
  1896. * would require changing .presuspend to return an error -- avoid this
  1897. * until there is a need for more elaborate variants of internal suspend.
  1898. */
  1899. (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
  1900. DMF_SUSPENDED_INTERNALLY);
  1901. dm_table_postsuspend_targets(map);
  1902. }
  1903. static void __dm_internal_resume(struct mapped_device *md)
  1904. {
  1905. BUG_ON(!md->internal_suspend_count);
  1906. if (--md->internal_suspend_count)
  1907. return; /* resume from nested internal suspend */
  1908. if (dm_suspended_md(md))
  1909. goto done; /* resume from nested suspend */
  1910. /*
  1911. * NOTE: existing callers don't need to call dm_table_resume_targets
  1912. * (which may fail -- so best to avoid it for now by passing NULL map)
  1913. */
  1914. (void) __dm_resume(md, NULL);
  1915. done:
  1916. clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
  1917. smp_mb__after_atomic();
  1918. wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
  1919. }
  1920. void dm_internal_suspend_noflush(struct mapped_device *md)
  1921. {
  1922. mutex_lock(&md->suspend_lock);
  1923. __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
  1924. mutex_unlock(&md->suspend_lock);
  1925. }
  1926. EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
  1927. void dm_internal_resume(struct mapped_device *md)
  1928. {
  1929. mutex_lock(&md->suspend_lock);
  1930. __dm_internal_resume(md);
  1931. mutex_unlock(&md->suspend_lock);
  1932. }
  1933. EXPORT_SYMBOL_GPL(dm_internal_resume);
  1934. /*
  1935. * Fast variants of internal suspend/resume hold md->suspend_lock,
  1936. * which prevents interaction with userspace-driven suspend.
  1937. */
  1938. void dm_internal_suspend_fast(struct mapped_device *md)
  1939. {
  1940. mutex_lock(&md->suspend_lock);
  1941. if (dm_suspended_md(md) || dm_suspended_internally_md(md))
  1942. return;
  1943. set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
  1944. synchronize_srcu(&md->io_barrier);
  1945. flush_workqueue(md->wq);
  1946. dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
  1947. }
  1948. EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
  1949. void dm_internal_resume_fast(struct mapped_device *md)
  1950. {
  1951. if (dm_suspended_md(md) || dm_suspended_internally_md(md))
  1952. goto done;
  1953. dm_queue_flush(md);
  1954. done:
  1955. mutex_unlock(&md->suspend_lock);
  1956. }
  1957. EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
  1958. /*-----------------------------------------------------------------
  1959. * Event notification.
  1960. *---------------------------------------------------------------*/
  1961. int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
  1962. unsigned cookie)
  1963. {
  1964. char udev_cookie[DM_COOKIE_LENGTH];
  1965. char *envp[] = { udev_cookie, NULL };
  1966. if (!cookie)
  1967. return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
  1968. else {
  1969. snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
  1970. DM_COOKIE_ENV_VAR_NAME, cookie);
  1971. return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
  1972. action, envp);
  1973. }
  1974. }
  1975. uint32_t dm_next_uevent_seq(struct mapped_device *md)
  1976. {
  1977. return atomic_add_return(1, &md->uevent_seq);
  1978. }
  1979. uint32_t dm_get_event_nr(struct mapped_device *md)
  1980. {
  1981. return atomic_read(&md->event_nr);
  1982. }
  1983. int dm_wait_event(struct mapped_device *md, int event_nr)
  1984. {
  1985. return wait_event_interruptible(md->eventq,
  1986. (event_nr != atomic_read(&md->event_nr)));
  1987. }
  1988. void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
  1989. {
  1990. unsigned long flags;
  1991. spin_lock_irqsave(&md->uevent_lock, flags);
  1992. list_add(elist, &md->uevent_list);
  1993. spin_unlock_irqrestore(&md->uevent_lock, flags);
  1994. }
  1995. /*
  1996. * The gendisk is only valid as long as you have a reference
  1997. * count on 'md'.
  1998. */
  1999. struct gendisk *dm_disk(struct mapped_device *md)
  2000. {
  2001. return md->disk;
  2002. }
  2003. EXPORT_SYMBOL_GPL(dm_disk);
  2004. struct kobject *dm_kobject(struct mapped_device *md)
  2005. {
  2006. return &md->kobj_holder.kobj;
  2007. }
  2008. struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
  2009. {
  2010. struct mapped_device *md;
  2011. md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
  2012. if (test_bit(DMF_FREEING, &md->flags) ||
  2013. dm_deleting_md(md))
  2014. return NULL;
  2015. dm_get(md);
  2016. return md;
  2017. }
  2018. int dm_suspended_md(struct mapped_device *md)
  2019. {
  2020. return test_bit(DMF_SUSPENDED, &md->flags);
  2021. }
  2022. int dm_suspended_internally_md(struct mapped_device *md)
  2023. {
  2024. return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
  2025. }
  2026. int dm_test_deferred_remove_flag(struct mapped_device *md)
  2027. {
  2028. return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
  2029. }
  2030. int dm_suspended(struct dm_target *ti)
  2031. {
  2032. return dm_suspended_md(dm_table_get_md(ti->table));
  2033. }
  2034. EXPORT_SYMBOL_GPL(dm_suspended);
  2035. int dm_noflush_suspending(struct dm_target *ti)
  2036. {
  2037. return __noflush_suspending(dm_table_get_md(ti->table));
  2038. }
  2039. EXPORT_SYMBOL_GPL(dm_noflush_suspending);
  2040. struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
  2041. unsigned integrity, unsigned per_io_data_size)
  2042. {
  2043. struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
  2044. struct kmem_cache *cachep = NULL;
  2045. unsigned int pool_size = 0;
  2046. unsigned int front_pad;
  2047. if (!pools)
  2048. return NULL;
  2049. switch (type) {
  2050. case DM_TYPE_BIO_BASED:
  2051. case DM_TYPE_DAX_BIO_BASED:
  2052. cachep = _io_cache;
  2053. pool_size = dm_get_reserved_bio_based_ios();
  2054. front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
  2055. break;
  2056. case DM_TYPE_REQUEST_BASED:
  2057. cachep = _rq_tio_cache;
  2058. pool_size = dm_get_reserved_rq_based_ios();
  2059. pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
  2060. if (!pools->rq_pool)
  2061. goto out;
  2062. /* fall through to setup remaining rq-based pools */
  2063. case DM_TYPE_MQ_REQUEST_BASED:
  2064. if (!pool_size)
  2065. pool_size = dm_get_reserved_rq_based_ios();
  2066. front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
  2067. /* per_io_data_size is used for blk-mq pdu at queue allocation */
  2068. break;
  2069. default:
  2070. BUG();
  2071. }
  2072. if (cachep) {
  2073. pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
  2074. if (!pools->io_pool)
  2075. goto out;
  2076. }
  2077. pools->bs = bioset_create_nobvec(pool_size, front_pad);
  2078. if (!pools->bs)
  2079. goto out;
  2080. if (integrity && bioset_integrity_create(pools->bs, pool_size))
  2081. goto out;
  2082. return pools;
  2083. out:
  2084. dm_free_md_mempools(pools);
  2085. return NULL;
  2086. }
  2087. void dm_free_md_mempools(struct dm_md_mempools *pools)
  2088. {
  2089. if (!pools)
  2090. return;
  2091. mempool_destroy(pools->io_pool);
  2092. mempool_destroy(pools->rq_pool);
  2093. if (pools->bs)
  2094. bioset_free(pools->bs);
  2095. kfree(pools);
  2096. }
  2097. struct dm_pr {
  2098. u64 old_key;
  2099. u64 new_key;
  2100. u32 flags;
  2101. bool fail_early;
  2102. };
  2103. static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
  2104. void *data)
  2105. {
  2106. struct mapped_device *md = bdev->bd_disk->private_data;
  2107. struct dm_table *table;
  2108. struct dm_target *ti;
  2109. int ret = -ENOTTY, srcu_idx;
  2110. table = dm_get_live_table(md, &srcu_idx);
  2111. if (!table || !dm_table_get_size(table))
  2112. goto out;
  2113. /* We only support devices that have a single target */
  2114. if (dm_table_get_num_targets(table) != 1)
  2115. goto out;
  2116. ti = dm_table_get_target(table, 0);
  2117. ret = -EINVAL;
  2118. if (!ti->type->iterate_devices)
  2119. goto out;
  2120. ret = ti->type->iterate_devices(ti, fn, data);
  2121. out:
  2122. dm_put_live_table(md, srcu_idx);
  2123. return ret;
  2124. }
  2125. /*
  2126. * For register / unregister we need to manually call out to every path.
  2127. */
  2128. static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
  2129. sector_t start, sector_t len, void *data)
  2130. {
  2131. struct dm_pr *pr = data;
  2132. const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
  2133. if (!ops || !ops->pr_register)
  2134. return -EOPNOTSUPP;
  2135. return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
  2136. }
  2137. static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
  2138. u32 flags)
  2139. {
  2140. struct dm_pr pr = {
  2141. .old_key = old_key,
  2142. .new_key = new_key,
  2143. .flags = flags,
  2144. .fail_early = true,
  2145. };
  2146. int ret;
  2147. ret = dm_call_pr(bdev, __dm_pr_register, &pr);
  2148. if (ret && new_key) {
  2149. /* unregister all paths if we failed to register any path */
  2150. pr.old_key = new_key;
  2151. pr.new_key = 0;
  2152. pr.flags = 0;
  2153. pr.fail_early = false;
  2154. dm_call_pr(bdev, __dm_pr_register, &pr);
  2155. }
  2156. return ret;
  2157. }
  2158. static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
  2159. u32 flags)
  2160. {
  2161. struct mapped_device *md = bdev->bd_disk->private_data;
  2162. const struct pr_ops *ops;
  2163. fmode_t mode;
  2164. int r;
  2165. r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
  2166. if (r < 0)
  2167. return r;
  2168. ops = bdev->bd_disk->fops->pr_ops;
  2169. if (ops && ops->pr_reserve)
  2170. r = ops->pr_reserve(bdev, key, type, flags);
  2171. else
  2172. r = -EOPNOTSUPP;
  2173. bdput(bdev);
  2174. return r;
  2175. }
  2176. static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
  2177. {
  2178. struct mapped_device *md = bdev->bd_disk->private_data;
  2179. const struct pr_ops *ops;
  2180. fmode_t mode;
  2181. int r;
  2182. r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
  2183. if (r < 0)
  2184. return r;
  2185. ops = bdev->bd_disk->fops->pr_ops;
  2186. if (ops && ops->pr_release)
  2187. r = ops->pr_release(bdev, key, type);
  2188. else
  2189. r = -EOPNOTSUPP;
  2190. bdput(bdev);
  2191. return r;
  2192. }
  2193. static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
  2194. enum pr_type type, bool abort)
  2195. {
  2196. struct mapped_device *md = bdev->bd_disk->private_data;
  2197. const struct pr_ops *ops;
  2198. fmode_t mode;
  2199. int r;
  2200. r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
  2201. if (r < 0)
  2202. return r;
  2203. ops = bdev->bd_disk->fops->pr_ops;
  2204. if (ops && ops->pr_preempt)
  2205. r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
  2206. else
  2207. r = -EOPNOTSUPP;
  2208. bdput(bdev);
  2209. return r;
  2210. }
  2211. static int dm_pr_clear(struct block_device *bdev, u64 key)
  2212. {
  2213. struct mapped_device *md = bdev->bd_disk->private_data;
  2214. const struct pr_ops *ops;
  2215. fmode_t mode;
  2216. int r;
  2217. r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
  2218. if (r < 0)
  2219. return r;
  2220. ops = bdev->bd_disk->fops->pr_ops;
  2221. if (ops && ops->pr_clear)
  2222. r = ops->pr_clear(bdev, key);
  2223. else
  2224. r = -EOPNOTSUPP;
  2225. bdput(bdev);
  2226. return r;
  2227. }
  2228. static const struct pr_ops dm_pr_ops = {
  2229. .pr_register = dm_pr_register,
  2230. .pr_reserve = dm_pr_reserve,
  2231. .pr_release = dm_pr_release,
  2232. .pr_preempt = dm_pr_preempt,
  2233. .pr_clear = dm_pr_clear,
  2234. };
  2235. static const struct block_device_operations dm_blk_dops = {
  2236. .open = dm_blk_open,
  2237. .release = dm_blk_close,
  2238. .ioctl = dm_blk_ioctl,
  2239. .direct_access = dm_blk_direct_access,
  2240. .getgeo = dm_blk_getgeo,
  2241. .pr_ops = &dm_pr_ops,
  2242. .owner = THIS_MODULE
  2243. };
  2244. /*
  2245. * module hooks
  2246. */
  2247. module_init(dm_init);
  2248. module_exit(dm_exit);
  2249. module_param(major, uint, 0);
  2250. MODULE_PARM_DESC(major, "The major number of the device mapper");
  2251. module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
  2252. MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
  2253. module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
  2254. MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
  2255. MODULE_DESCRIPTION(DM_NAME " driver");
  2256. MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
  2257. MODULE_LICENSE("GPL");