dm-mpath.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include <linux/device-mapper.h>
  8. #include "dm-rq.h"
  9. #include "dm-bio-record.h"
  10. #include "dm-path-selector.h"
  11. #include "dm-uevent.h"
  12. #include <linux/blkdev.h>
  13. #include <linux/ctype.h>
  14. #include <linux/init.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/slab.h>
  19. #include <linux/time.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/delay.h>
  22. #include <scsi/scsi_dh.h>
  23. #include <linux/atomic.h>
  24. #include <linux/blk-mq.h>
  25. #define DM_MSG_PREFIX "multipath"
  26. #define DM_PG_INIT_DELAY_MSECS 2000
  27. #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  28. /* Path properties */
  29. struct pgpath {
  30. struct list_head list;
  31. struct priority_group *pg; /* Owning PG */
  32. unsigned fail_count; /* Cumulative failure count */
  33. struct dm_path path;
  34. struct delayed_work activate_path;
  35. bool is_active:1; /* Path status */
  36. };
  37. #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  38. /*
  39. * Paths are grouped into Priority Groups and numbered from 1 upwards.
  40. * Each has a path selector which controls which path gets used.
  41. */
  42. struct priority_group {
  43. struct list_head list;
  44. struct multipath *m; /* Owning multipath instance */
  45. struct path_selector ps;
  46. unsigned pg_num; /* Reference number */
  47. unsigned nr_pgpaths; /* Number of paths in PG */
  48. struct list_head pgpaths;
  49. bool bypassed:1; /* Temporarily bypass this PG? */
  50. };
  51. /* Multipath context */
  52. struct multipath {
  53. struct list_head list;
  54. struct dm_target *ti;
  55. const char *hw_handler_name;
  56. char *hw_handler_params;
  57. spinlock_t lock;
  58. unsigned nr_priority_groups;
  59. struct list_head priority_groups;
  60. wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
  61. struct pgpath *current_pgpath;
  62. struct priority_group *current_pg;
  63. struct priority_group *next_pg; /* Switch to this PG if set */
  64. unsigned long flags; /* Multipath state flags */
  65. unsigned pg_init_retries; /* Number of times to retry pg_init */
  66. unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
  67. atomic_t nr_valid_paths; /* Total number of usable paths */
  68. atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
  69. atomic_t pg_init_count; /* Number of times pg_init called */
  70. enum dm_queue_mode queue_mode;
  71. struct mutex work_mutex;
  72. struct work_struct trigger_event;
  73. struct work_struct process_queued_bios;
  74. struct bio_list queued_bios;
  75. };
  76. /*
  77. * Context information attached to each io we process.
  78. */
  79. struct dm_mpath_io {
  80. struct pgpath *pgpath;
  81. size_t nr_bytes;
  82. };
  83. typedef int (*action_fn) (struct pgpath *pgpath);
  84. static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
  85. static void trigger_event(struct work_struct *work);
  86. static void activate_or_offline_path(struct pgpath *pgpath);
  87. static void activate_path_work(struct work_struct *work);
  88. static void process_queued_bios(struct work_struct *work);
  89. /*-----------------------------------------------
  90. * Multipath state flags.
  91. *-----------------------------------------------*/
  92. #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
  93. #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
  94. #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
  95. #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
  96. #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
  97. #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
  98. #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
  99. /*-----------------------------------------------
  100. * Allocation routines
  101. *-----------------------------------------------*/
  102. static struct pgpath *alloc_pgpath(void)
  103. {
  104. struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
  105. if (pgpath) {
  106. pgpath->is_active = true;
  107. INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
  108. }
  109. return pgpath;
  110. }
  111. static void free_pgpath(struct pgpath *pgpath)
  112. {
  113. kfree(pgpath);
  114. }
  115. static struct priority_group *alloc_priority_group(void)
  116. {
  117. struct priority_group *pg;
  118. pg = kzalloc(sizeof(*pg), GFP_KERNEL);
  119. if (pg)
  120. INIT_LIST_HEAD(&pg->pgpaths);
  121. return pg;
  122. }
  123. static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
  124. {
  125. struct pgpath *pgpath, *tmp;
  126. list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
  127. list_del(&pgpath->list);
  128. dm_put_device(ti, pgpath->path.dev);
  129. free_pgpath(pgpath);
  130. }
  131. }
  132. static void free_priority_group(struct priority_group *pg,
  133. struct dm_target *ti)
  134. {
  135. struct path_selector *ps = &pg->ps;
  136. if (ps->type) {
  137. ps->type->destroy(ps);
  138. dm_put_path_selector(ps->type);
  139. }
  140. free_pgpaths(&pg->pgpaths, ti);
  141. kfree(pg);
  142. }
  143. static struct multipath *alloc_multipath(struct dm_target *ti)
  144. {
  145. struct multipath *m;
  146. m = kzalloc(sizeof(*m), GFP_KERNEL);
  147. if (m) {
  148. INIT_LIST_HEAD(&m->priority_groups);
  149. spin_lock_init(&m->lock);
  150. set_bit(MPATHF_QUEUE_IO, &m->flags);
  151. atomic_set(&m->nr_valid_paths, 0);
  152. atomic_set(&m->pg_init_in_progress, 0);
  153. atomic_set(&m->pg_init_count, 0);
  154. m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
  155. INIT_WORK(&m->trigger_event, trigger_event);
  156. init_waitqueue_head(&m->pg_init_wait);
  157. mutex_init(&m->work_mutex);
  158. m->queue_mode = DM_TYPE_NONE;
  159. m->ti = ti;
  160. ti->private = m;
  161. }
  162. return m;
  163. }
  164. static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
  165. {
  166. if (m->queue_mode == DM_TYPE_NONE) {
  167. /*
  168. * Default to request-based.
  169. */
  170. if (dm_use_blk_mq(dm_table_get_md(ti->table)))
  171. m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
  172. else
  173. m->queue_mode = DM_TYPE_REQUEST_BASED;
  174. } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
  175. INIT_WORK(&m->process_queued_bios, process_queued_bios);
  176. /*
  177. * bio-based doesn't support any direct scsi_dh management;
  178. * it just discovers if a scsi_dh is attached.
  179. */
  180. set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
  181. }
  182. dm_table_set_type(ti->table, m->queue_mode);
  183. return 0;
  184. }
  185. static void free_multipath(struct multipath *m)
  186. {
  187. struct priority_group *pg, *tmp;
  188. list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
  189. list_del(&pg->list);
  190. free_priority_group(pg, m->ti);
  191. }
  192. kfree(m->hw_handler_name);
  193. kfree(m->hw_handler_params);
  194. kfree(m);
  195. }
  196. static struct dm_mpath_io *get_mpio(union map_info *info)
  197. {
  198. return info->ptr;
  199. }
  200. static size_t multipath_per_bio_data_size(void)
  201. {
  202. return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
  203. }
  204. static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
  205. {
  206. return dm_per_bio_data(bio, multipath_per_bio_data_size());
  207. }
  208. static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
  209. {
  210. /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
  211. struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
  212. void *bio_details = mpio + 1;
  213. return bio_details;
  214. }
  215. static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
  216. struct dm_bio_details **bio_details_p)
  217. {
  218. struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
  219. struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
  220. memset(mpio, 0, sizeof(*mpio));
  221. memset(bio_details, 0, sizeof(*bio_details));
  222. dm_bio_record(bio_details, bio);
  223. if (mpio_p)
  224. *mpio_p = mpio;
  225. if (bio_details_p)
  226. *bio_details_p = bio_details;
  227. }
  228. /*-----------------------------------------------
  229. * Path selection
  230. *-----------------------------------------------*/
  231. static int __pg_init_all_paths(struct multipath *m)
  232. {
  233. struct pgpath *pgpath;
  234. unsigned long pg_init_delay = 0;
  235. lockdep_assert_held(&m->lock);
  236. if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
  237. return 0;
  238. atomic_inc(&m->pg_init_count);
  239. clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  240. /* Check here to reset pg_init_required */
  241. if (!m->current_pg)
  242. return 0;
  243. if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
  244. pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
  245. m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
  246. list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
  247. /* Skip failed paths */
  248. if (!pgpath->is_active)
  249. continue;
  250. if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
  251. pg_init_delay))
  252. atomic_inc(&m->pg_init_in_progress);
  253. }
  254. return atomic_read(&m->pg_init_in_progress);
  255. }
  256. static int pg_init_all_paths(struct multipath *m)
  257. {
  258. int ret;
  259. unsigned long flags;
  260. spin_lock_irqsave(&m->lock, flags);
  261. ret = __pg_init_all_paths(m);
  262. spin_unlock_irqrestore(&m->lock, flags);
  263. return ret;
  264. }
  265. static void __switch_pg(struct multipath *m, struct priority_group *pg)
  266. {
  267. m->current_pg = pg;
  268. /* Must we initialise the PG first, and queue I/O till it's ready? */
  269. if (m->hw_handler_name) {
  270. set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  271. set_bit(MPATHF_QUEUE_IO, &m->flags);
  272. } else {
  273. clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  274. clear_bit(MPATHF_QUEUE_IO, &m->flags);
  275. }
  276. atomic_set(&m->pg_init_count, 0);
  277. }
  278. static struct pgpath *choose_path_in_pg(struct multipath *m,
  279. struct priority_group *pg,
  280. size_t nr_bytes)
  281. {
  282. unsigned long flags;
  283. struct dm_path *path;
  284. struct pgpath *pgpath;
  285. path = pg->ps.type->select_path(&pg->ps, nr_bytes);
  286. if (!path)
  287. return ERR_PTR(-ENXIO);
  288. pgpath = path_to_pgpath(path);
  289. if (unlikely(lockless_dereference(m->current_pg) != pg)) {
  290. /* Only update current_pgpath if pg changed */
  291. spin_lock_irqsave(&m->lock, flags);
  292. m->current_pgpath = pgpath;
  293. __switch_pg(m, pg);
  294. spin_unlock_irqrestore(&m->lock, flags);
  295. }
  296. return pgpath;
  297. }
  298. static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
  299. {
  300. unsigned long flags;
  301. struct priority_group *pg;
  302. struct pgpath *pgpath;
  303. unsigned bypassed = 1;
  304. if (!atomic_read(&m->nr_valid_paths)) {
  305. clear_bit(MPATHF_QUEUE_IO, &m->flags);
  306. goto failed;
  307. }
  308. /* Were we instructed to switch PG? */
  309. if (lockless_dereference(m->next_pg)) {
  310. spin_lock_irqsave(&m->lock, flags);
  311. pg = m->next_pg;
  312. if (!pg) {
  313. spin_unlock_irqrestore(&m->lock, flags);
  314. goto check_current_pg;
  315. }
  316. m->next_pg = NULL;
  317. spin_unlock_irqrestore(&m->lock, flags);
  318. pgpath = choose_path_in_pg(m, pg, nr_bytes);
  319. if (!IS_ERR_OR_NULL(pgpath))
  320. return pgpath;
  321. }
  322. /* Don't change PG until it has no remaining paths */
  323. check_current_pg:
  324. pg = lockless_dereference(m->current_pg);
  325. if (pg) {
  326. pgpath = choose_path_in_pg(m, pg, nr_bytes);
  327. if (!IS_ERR_OR_NULL(pgpath))
  328. return pgpath;
  329. }
  330. /*
  331. * Loop through priority groups until we find a valid path.
  332. * First time we skip PGs marked 'bypassed'.
  333. * Second time we only try the ones we skipped, but set
  334. * pg_init_delay_retry so we do not hammer controllers.
  335. */
  336. do {
  337. list_for_each_entry(pg, &m->priority_groups, list) {
  338. if (pg->bypassed == !!bypassed)
  339. continue;
  340. pgpath = choose_path_in_pg(m, pg, nr_bytes);
  341. if (!IS_ERR_OR_NULL(pgpath)) {
  342. if (!bypassed)
  343. set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
  344. return pgpath;
  345. }
  346. }
  347. } while (bypassed--);
  348. failed:
  349. spin_lock_irqsave(&m->lock, flags);
  350. m->current_pgpath = NULL;
  351. m->current_pg = NULL;
  352. spin_unlock_irqrestore(&m->lock, flags);
  353. return NULL;
  354. }
  355. /*
  356. * dm_report_EIO() is a macro instead of a function to make pr_debug()
  357. * report the function name and line number of the function from which
  358. * it has been invoked.
  359. */
  360. #define dm_report_EIO(m) \
  361. do { \
  362. struct mapped_device *md = dm_table_get_md((m)->ti->table); \
  363. \
  364. pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
  365. dm_device_name(md), \
  366. test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
  367. test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
  368. dm_noflush_suspending((m)->ti)); \
  369. } while (0)
  370. /*
  371. * Map cloned requests (request-based multipath)
  372. */
  373. static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
  374. union map_info *map_context,
  375. struct request **__clone)
  376. {
  377. struct multipath *m = ti->private;
  378. size_t nr_bytes = blk_rq_bytes(rq);
  379. struct pgpath *pgpath;
  380. struct block_device *bdev;
  381. struct dm_mpath_io *mpio = get_mpio(map_context);
  382. struct request_queue *q;
  383. struct request *clone;
  384. /* Do we need to select a new pgpath? */
  385. pgpath = lockless_dereference(m->current_pgpath);
  386. if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
  387. pgpath = choose_pgpath(m, nr_bytes);
  388. if (!pgpath) {
  389. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  390. return DM_MAPIO_DELAY_REQUEUE;
  391. dm_report_EIO(m); /* Failed */
  392. return DM_MAPIO_KILL;
  393. } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
  394. test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
  395. if (pg_init_all_paths(m))
  396. return DM_MAPIO_DELAY_REQUEUE;
  397. return DM_MAPIO_REQUEUE;
  398. }
  399. memset(mpio, 0, sizeof(*mpio));
  400. mpio->pgpath = pgpath;
  401. mpio->nr_bytes = nr_bytes;
  402. bdev = pgpath->path.dev->bdev;
  403. q = bdev_get_queue(bdev);
  404. clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
  405. if (IS_ERR(clone)) {
  406. /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
  407. bool queue_dying = blk_queue_dying(q);
  408. if (queue_dying) {
  409. atomic_inc(&m->pg_init_in_progress);
  410. activate_or_offline_path(pgpath);
  411. }
  412. return DM_MAPIO_DELAY_REQUEUE;
  413. }
  414. clone->bio = clone->biotail = NULL;
  415. clone->rq_disk = bdev->bd_disk;
  416. clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
  417. *__clone = clone;
  418. if (pgpath->pg->ps.type->start_io)
  419. pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
  420. &pgpath->path,
  421. nr_bytes);
  422. return DM_MAPIO_REMAPPED;
  423. }
  424. static void multipath_release_clone(struct request *clone)
  425. {
  426. blk_put_request(clone);
  427. }
  428. /*
  429. * Map cloned bios (bio-based multipath)
  430. */
  431. static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
  432. {
  433. size_t nr_bytes = bio->bi_iter.bi_size;
  434. struct pgpath *pgpath;
  435. unsigned long flags;
  436. bool queue_io;
  437. /* Do we need to select a new pgpath? */
  438. pgpath = lockless_dereference(m->current_pgpath);
  439. queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
  440. if (!pgpath || !queue_io)
  441. pgpath = choose_pgpath(m, nr_bytes);
  442. if ((pgpath && queue_io) ||
  443. (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
  444. /* Queue for the daemon to resubmit */
  445. spin_lock_irqsave(&m->lock, flags);
  446. bio_list_add(&m->queued_bios, bio);
  447. spin_unlock_irqrestore(&m->lock, flags);
  448. /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
  449. if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
  450. pg_init_all_paths(m);
  451. else if (!queue_io)
  452. queue_work(kmultipathd, &m->process_queued_bios);
  453. return DM_MAPIO_SUBMITTED;
  454. }
  455. if (!pgpath) {
  456. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  457. return DM_MAPIO_REQUEUE;
  458. dm_report_EIO(m);
  459. return DM_MAPIO_KILL;
  460. }
  461. mpio->pgpath = pgpath;
  462. mpio->nr_bytes = nr_bytes;
  463. bio->bi_status = 0;
  464. bio_set_dev(bio, pgpath->path.dev->bdev);
  465. bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
  466. if (pgpath->pg->ps.type->start_io)
  467. pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
  468. &pgpath->path,
  469. nr_bytes);
  470. return DM_MAPIO_REMAPPED;
  471. }
  472. static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
  473. {
  474. struct multipath *m = ti->private;
  475. struct dm_mpath_io *mpio = NULL;
  476. multipath_init_per_bio_data(bio, &mpio, NULL);
  477. return __multipath_map_bio(m, bio, mpio);
  478. }
  479. static void process_queued_io_list(struct multipath *m)
  480. {
  481. if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
  482. dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
  483. else if (m->queue_mode == DM_TYPE_BIO_BASED)
  484. queue_work(kmultipathd, &m->process_queued_bios);
  485. }
  486. static void process_queued_bios(struct work_struct *work)
  487. {
  488. int r;
  489. unsigned long flags;
  490. struct bio *bio;
  491. struct bio_list bios;
  492. struct blk_plug plug;
  493. struct multipath *m =
  494. container_of(work, struct multipath, process_queued_bios);
  495. bio_list_init(&bios);
  496. spin_lock_irqsave(&m->lock, flags);
  497. if (bio_list_empty(&m->queued_bios)) {
  498. spin_unlock_irqrestore(&m->lock, flags);
  499. return;
  500. }
  501. bio_list_merge(&bios, &m->queued_bios);
  502. bio_list_init(&m->queued_bios);
  503. spin_unlock_irqrestore(&m->lock, flags);
  504. blk_start_plug(&plug);
  505. while ((bio = bio_list_pop(&bios))) {
  506. r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
  507. switch (r) {
  508. case DM_MAPIO_KILL:
  509. bio->bi_status = BLK_STS_IOERR;
  510. bio_endio(bio);
  511. break;
  512. case DM_MAPIO_REQUEUE:
  513. bio->bi_status = BLK_STS_DM_REQUEUE;
  514. bio_endio(bio);
  515. break;
  516. case DM_MAPIO_REMAPPED:
  517. generic_make_request(bio);
  518. break;
  519. case 0:
  520. break;
  521. default:
  522. WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
  523. }
  524. }
  525. blk_finish_plug(&plug);
  526. }
  527. static void assign_bit(bool value, long nr, unsigned long *addr)
  528. {
  529. if (value)
  530. set_bit(nr, addr);
  531. else
  532. clear_bit(nr, addr);
  533. }
  534. /*
  535. * If we run out of usable paths, should we queue I/O or error it?
  536. */
  537. static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
  538. bool save_old_value)
  539. {
  540. unsigned long flags;
  541. spin_lock_irqsave(&m->lock, flags);
  542. assign_bit((save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
  543. (!save_old_value && queue_if_no_path),
  544. MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
  545. assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
  546. MPATHF_QUEUE_IF_NO_PATH, &m->flags);
  547. spin_unlock_irqrestore(&m->lock, flags);
  548. if (!queue_if_no_path) {
  549. dm_table_run_md_queue_async(m->ti->table);
  550. process_queued_io_list(m);
  551. }
  552. return 0;
  553. }
  554. /*
  555. * An event is triggered whenever a path is taken out of use.
  556. * Includes path failure and PG bypass.
  557. */
  558. static void trigger_event(struct work_struct *work)
  559. {
  560. struct multipath *m =
  561. container_of(work, struct multipath, trigger_event);
  562. dm_table_event(m->ti->table);
  563. }
  564. /*-----------------------------------------------------------------
  565. * Constructor/argument parsing:
  566. * <#multipath feature args> [<arg>]*
  567. * <#hw_handler args> [hw_handler [<arg>]*]
  568. * <#priority groups>
  569. * <initial priority group>
  570. * [<selector> <#selector args> [<arg>]*
  571. * <#paths> <#per-path selector args>
  572. * [<path> [<arg>]* ]+ ]+
  573. *---------------------------------------------------------------*/
  574. static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
  575. struct dm_target *ti)
  576. {
  577. int r;
  578. struct path_selector_type *pst;
  579. unsigned ps_argc;
  580. static const struct dm_arg _args[] = {
  581. {0, 1024, "invalid number of path selector args"},
  582. };
  583. pst = dm_get_path_selector(dm_shift_arg(as));
  584. if (!pst) {
  585. ti->error = "unknown path selector type";
  586. return -EINVAL;
  587. }
  588. r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
  589. if (r) {
  590. dm_put_path_selector(pst);
  591. return -EINVAL;
  592. }
  593. r = pst->create(&pg->ps, ps_argc, as->argv);
  594. if (r) {
  595. dm_put_path_selector(pst);
  596. ti->error = "path selector constructor failed";
  597. return r;
  598. }
  599. pg->ps.type = pst;
  600. dm_consume_args(as, ps_argc);
  601. return 0;
  602. }
  603. static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
  604. struct dm_target *ti)
  605. {
  606. int r;
  607. struct pgpath *p;
  608. struct multipath *m = ti->private;
  609. struct request_queue *q = NULL;
  610. const char *attached_handler_name;
  611. /* we need at least a path arg */
  612. if (as->argc < 1) {
  613. ti->error = "no device given";
  614. return ERR_PTR(-EINVAL);
  615. }
  616. p = alloc_pgpath();
  617. if (!p)
  618. return ERR_PTR(-ENOMEM);
  619. r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
  620. &p->path.dev);
  621. if (r) {
  622. ti->error = "error getting device";
  623. goto bad;
  624. }
  625. if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
  626. q = bdev_get_queue(p->path.dev->bdev);
  627. if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
  628. retain:
  629. attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
  630. if (attached_handler_name) {
  631. /*
  632. * Clear any hw_handler_params associated with a
  633. * handler that isn't already attached.
  634. */
  635. if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
  636. kfree(m->hw_handler_params);
  637. m->hw_handler_params = NULL;
  638. }
  639. /*
  640. * Reset hw_handler_name to match the attached handler
  641. *
  642. * NB. This modifies the table line to show the actual
  643. * handler instead of the original table passed in.
  644. */
  645. kfree(m->hw_handler_name);
  646. m->hw_handler_name = attached_handler_name;
  647. }
  648. }
  649. if (m->hw_handler_name) {
  650. r = scsi_dh_attach(q, m->hw_handler_name);
  651. if (r == -EBUSY) {
  652. char b[BDEVNAME_SIZE];
  653. printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
  654. bdevname(p->path.dev->bdev, b));
  655. goto retain;
  656. }
  657. if (r < 0) {
  658. ti->error = "error attaching hardware handler";
  659. dm_put_device(ti, p->path.dev);
  660. goto bad;
  661. }
  662. if (m->hw_handler_params) {
  663. r = scsi_dh_set_params(q, m->hw_handler_params);
  664. if (r < 0) {
  665. ti->error = "unable to set hardware "
  666. "handler parameters";
  667. dm_put_device(ti, p->path.dev);
  668. goto bad;
  669. }
  670. }
  671. }
  672. r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
  673. if (r) {
  674. dm_put_device(ti, p->path.dev);
  675. goto bad;
  676. }
  677. return p;
  678. bad:
  679. free_pgpath(p);
  680. return ERR_PTR(r);
  681. }
  682. static struct priority_group *parse_priority_group(struct dm_arg_set *as,
  683. struct multipath *m)
  684. {
  685. static const struct dm_arg _args[] = {
  686. {1, 1024, "invalid number of paths"},
  687. {0, 1024, "invalid number of selector args"}
  688. };
  689. int r;
  690. unsigned i, nr_selector_args, nr_args;
  691. struct priority_group *pg;
  692. struct dm_target *ti = m->ti;
  693. if (as->argc < 2) {
  694. as->argc = 0;
  695. ti->error = "not enough priority group arguments";
  696. return ERR_PTR(-EINVAL);
  697. }
  698. pg = alloc_priority_group();
  699. if (!pg) {
  700. ti->error = "couldn't allocate priority group";
  701. return ERR_PTR(-ENOMEM);
  702. }
  703. pg->m = m;
  704. r = parse_path_selector(as, pg, ti);
  705. if (r)
  706. goto bad;
  707. /*
  708. * read the paths
  709. */
  710. r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
  711. if (r)
  712. goto bad;
  713. r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
  714. if (r)
  715. goto bad;
  716. nr_args = 1 + nr_selector_args;
  717. for (i = 0; i < pg->nr_pgpaths; i++) {
  718. struct pgpath *pgpath;
  719. struct dm_arg_set path_args;
  720. if (as->argc < nr_args) {
  721. ti->error = "not enough path parameters";
  722. r = -EINVAL;
  723. goto bad;
  724. }
  725. path_args.argc = nr_args;
  726. path_args.argv = as->argv;
  727. pgpath = parse_path(&path_args, &pg->ps, ti);
  728. if (IS_ERR(pgpath)) {
  729. r = PTR_ERR(pgpath);
  730. goto bad;
  731. }
  732. pgpath->pg = pg;
  733. list_add_tail(&pgpath->list, &pg->pgpaths);
  734. dm_consume_args(as, nr_args);
  735. }
  736. return pg;
  737. bad:
  738. free_priority_group(pg, ti);
  739. return ERR_PTR(r);
  740. }
  741. static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
  742. {
  743. unsigned hw_argc;
  744. int ret;
  745. struct dm_target *ti = m->ti;
  746. static const struct dm_arg _args[] = {
  747. {0, 1024, "invalid number of hardware handler args"},
  748. };
  749. if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
  750. return -EINVAL;
  751. if (!hw_argc)
  752. return 0;
  753. if (m->queue_mode == DM_TYPE_BIO_BASED) {
  754. dm_consume_args(as, hw_argc);
  755. DMERR("bio-based multipath doesn't allow hardware handler args");
  756. return 0;
  757. }
  758. m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
  759. if (!m->hw_handler_name)
  760. return -EINVAL;
  761. if (hw_argc > 1) {
  762. char *p;
  763. int i, j, len = 4;
  764. for (i = 0; i <= hw_argc - 2; i++)
  765. len += strlen(as->argv[i]) + 1;
  766. p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
  767. if (!p) {
  768. ti->error = "memory allocation failed";
  769. ret = -ENOMEM;
  770. goto fail;
  771. }
  772. j = sprintf(p, "%d", hw_argc - 1);
  773. for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
  774. j = sprintf(p, "%s", as->argv[i]);
  775. }
  776. dm_consume_args(as, hw_argc - 1);
  777. return 0;
  778. fail:
  779. kfree(m->hw_handler_name);
  780. m->hw_handler_name = NULL;
  781. return ret;
  782. }
  783. static int parse_features(struct dm_arg_set *as, struct multipath *m)
  784. {
  785. int r;
  786. unsigned argc;
  787. struct dm_target *ti = m->ti;
  788. const char *arg_name;
  789. static const struct dm_arg _args[] = {
  790. {0, 8, "invalid number of feature args"},
  791. {1, 50, "pg_init_retries must be between 1 and 50"},
  792. {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
  793. };
  794. r = dm_read_arg_group(_args, as, &argc, &ti->error);
  795. if (r)
  796. return -EINVAL;
  797. if (!argc)
  798. return 0;
  799. do {
  800. arg_name = dm_shift_arg(as);
  801. argc--;
  802. if (!strcasecmp(arg_name, "queue_if_no_path")) {
  803. r = queue_if_no_path(m, true, false);
  804. continue;
  805. }
  806. if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
  807. set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
  808. continue;
  809. }
  810. if (!strcasecmp(arg_name, "pg_init_retries") &&
  811. (argc >= 1)) {
  812. r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
  813. argc--;
  814. continue;
  815. }
  816. if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
  817. (argc >= 1)) {
  818. r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
  819. argc--;
  820. continue;
  821. }
  822. if (!strcasecmp(arg_name, "queue_mode") &&
  823. (argc >= 1)) {
  824. const char *queue_mode_name = dm_shift_arg(as);
  825. if (!strcasecmp(queue_mode_name, "bio"))
  826. m->queue_mode = DM_TYPE_BIO_BASED;
  827. else if (!strcasecmp(queue_mode_name, "rq"))
  828. m->queue_mode = DM_TYPE_REQUEST_BASED;
  829. else if (!strcasecmp(queue_mode_name, "mq"))
  830. m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
  831. else {
  832. ti->error = "Unknown 'queue_mode' requested";
  833. r = -EINVAL;
  834. }
  835. argc--;
  836. continue;
  837. }
  838. ti->error = "Unrecognised multipath feature request";
  839. r = -EINVAL;
  840. } while (argc && !r);
  841. return r;
  842. }
  843. static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
  844. {
  845. /* target arguments */
  846. static const struct dm_arg _args[] = {
  847. {0, 1024, "invalid number of priority groups"},
  848. {0, 1024, "invalid initial priority group number"},
  849. };
  850. int r;
  851. struct multipath *m;
  852. struct dm_arg_set as;
  853. unsigned pg_count = 0;
  854. unsigned next_pg_num;
  855. as.argc = argc;
  856. as.argv = argv;
  857. m = alloc_multipath(ti);
  858. if (!m) {
  859. ti->error = "can't allocate multipath";
  860. return -EINVAL;
  861. }
  862. r = parse_features(&as, m);
  863. if (r)
  864. goto bad;
  865. r = alloc_multipath_stage2(ti, m);
  866. if (r)
  867. goto bad;
  868. r = parse_hw_handler(&as, m);
  869. if (r)
  870. goto bad;
  871. r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
  872. if (r)
  873. goto bad;
  874. r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
  875. if (r)
  876. goto bad;
  877. if ((!m->nr_priority_groups && next_pg_num) ||
  878. (m->nr_priority_groups && !next_pg_num)) {
  879. ti->error = "invalid initial priority group";
  880. r = -EINVAL;
  881. goto bad;
  882. }
  883. /* parse the priority groups */
  884. while (as.argc) {
  885. struct priority_group *pg;
  886. unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
  887. pg = parse_priority_group(&as, m);
  888. if (IS_ERR(pg)) {
  889. r = PTR_ERR(pg);
  890. goto bad;
  891. }
  892. nr_valid_paths += pg->nr_pgpaths;
  893. atomic_set(&m->nr_valid_paths, nr_valid_paths);
  894. list_add_tail(&pg->list, &m->priority_groups);
  895. pg_count++;
  896. pg->pg_num = pg_count;
  897. if (!--next_pg_num)
  898. m->next_pg = pg;
  899. }
  900. if (pg_count != m->nr_priority_groups) {
  901. ti->error = "priority group count mismatch";
  902. r = -EINVAL;
  903. goto bad;
  904. }
  905. ti->num_flush_bios = 1;
  906. ti->num_discard_bios = 1;
  907. ti->num_write_same_bios = 1;
  908. ti->num_write_zeroes_bios = 1;
  909. if (m->queue_mode == DM_TYPE_BIO_BASED)
  910. ti->per_io_data_size = multipath_per_bio_data_size();
  911. else
  912. ti->per_io_data_size = sizeof(struct dm_mpath_io);
  913. return 0;
  914. bad:
  915. free_multipath(m);
  916. return r;
  917. }
  918. static void multipath_wait_for_pg_init_completion(struct multipath *m)
  919. {
  920. DEFINE_WAIT(wait);
  921. while (1) {
  922. prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
  923. if (!atomic_read(&m->pg_init_in_progress))
  924. break;
  925. io_schedule();
  926. }
  927. finish_wait(&m->pg_init_wait, &wait);
  928. }
  929. static void flush_multipath_work(struct multipath *m)
  930. {
  931. set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
  932. smp_mb__after_atomic();
  933. flush_workqueue(kmpath_handlerd);
  934. multipath_wait_for_pg_init_completion(m);
  935. flush_workqueue(kmultipathd);
  936. flush_work(&m->trigger_event);
  937. clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
  938. smp_mb__after_atomic();
  939. }
  940. static void multipath_dtr(struct dm_target *ti)
  941. {
  942. struct multipath *m = ti->private;
  943. flush_multipath_work(m);
  944. free_multipath(m);
  945. }
  946. /*
  947. * Take a path out of use.
  948. */
  949. static int fail_path(struct pgpath *pgpath)
  950. {
  951. unsigned long flags;
  952. struct multipath *m = pgpath->pg->m;
  953. spin_lock_irqsave(&m->lock, flags);
  954. if (!pgpath->is_active)
  955. goto out;
  956. DMWARN("Failing path %s.", pgpath->path.dev->name);
  957. pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
  958. pgpath->is_active = false;
  959. pgpath->fail_count++;
  960. atomic_dec(&m->nr_valid_paths);
  961. if (pgpath == m->current_pgpath)
  962. m->current_pgpath = NULL;
  963. dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
  964. pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
  965. schedule_work(&m->trigger_event);
  966. out:
  967. spin_unlock_irqrestore(&m->lock, flags);
  968. return 0;
  969. }
  970. /*
  971. * Reinstate a previously-failed path
  972. */
  973. static int reinstate_path(struct pgpath *pgpath)
  974. {
  975. int r = 0, run_queue = 0;
  976. unsigned long flags;
  977. struct multipath *m = pgpath->pg->m;
  978. unsigned nr_valid_paths;
  979. spin_lock_irqsave(&m->lock, flags);
  980. if (pgpath->is_active)
  981. goto out;
  982. DMWARN("Reinstating path %s.", pgpath->path.dev->name);
  983. r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
  984. if (r)
  985. goto out;
  986. pgpath->is_active = true;
  987. nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
  988. if (nr_valid_paths == 1) {
  989. m->current_pgpath = NULL;
  990. run_queue = 1;
  991. } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
  992. if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
  993. atomic_inc(&m->pg_init_in_progress);
  994. }
  995. dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
  996. pgpath->path.dev->name, nr_valid_paths);
  997. schedule_work(&m->trigger_event);
  998. out:
  999. spin_unlock_irqrestore(&m->lock, flags);
  1000. if (run_queue) {
  1001. dm_table_run_md_queue_async(m->ti->table);
  1002. process_queued_io_list(m);
  1003. }
  1004. return r;
  1005. }
  1006. /*
  1007. * Fail or reinstate all paths that match the provided struct dm_dev.
  1008. */
  1009. static int action_dev(struct multipath *m, struct dm_dev *dev,
  1010. action_fn action)
  1011. {
  1012. int r = -EINVAL;
  1013. struct pgpath *pgpath;
  1014. struct priority_group *pg;
  1015. list_for_each_entry(pg, &m->priority_groups, list) {
  1016. list_for_each_entry(pgpath, &pg->pgpaths, list) {
  1017. if (pgpath->path.dev == dev)
  1018. r = action(pgpath);
  1019. }
  1020. }
  1021. return r;
  1022. }
  1023. /*
  1024. * Temporarily try to avoid having to use the specified PG
  1025. */
  1026. static void bypass_pg(struct multipath *m, struct priority_group *pg,
  1027. bool bypassed)
  1028. {
  1029. unsigned long flags;
  1030. spin_lock_irqsave(&m->lock, flags);
  1031. pg->bypassed = bypassed;
  1032. m->current_pgpath = NULL;
  1033. m->current_pg = NULL;
  1034. spin_unlock_irqrestore(&m->lock, flags);
  1035. schedule_work(&m->trigger_event);
  1036. }
  1037. /*
  1038. * Switch to using the specified PG from the next I/O that gets mapped
  1039. */
  1040. static int switch_pg_num(struct multipath *m, const char *pgstr)
  1041. {
  1042. struct priority_group *pg;
  1043. unsigned pgnum;
  1044. unsigned long flags;
  1045. char dummy;
  1046. if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
  1047. !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
  1048. DMWARN("invalid PG number supplied to switch_pg_num");
  1049. return -EINVAL;
  1050. }
  1051. spin_lock_irqsave(&m->lock, flags);
  1052. list_for_each_entry(pg, &m->priority_groups, list) {
  1053. pg->bypassed = false;
  1054. if (--pgnum)
  1055. continue;
  1056. m->current_pgpath = NULL;
  1057. m->current_pg = NULL;
  1058. m->next_pg = pg;
  1059. }
  1060. spin_unlock_irqrestore(&m->lock, flags);
  1061. schedule_work(&m->trigger_event);
  1062. return 0;
  1063. }
  1064. /*
  1065. * Set/clear bypassed status of a PG.
  1066. * PGs are numbered upwards from 1 in the order they were declared.
  1067. */
  1068. static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
  1069. {
  1070. struct priority_group *pg;
  1071. unsigned pgnum;
  1072. char dummy;
  1073. if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
  1074. !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
  1075. DMWARN("invalid PG number supplied to bypass_pg");
  1076. return -EINVAL;
  1077. }
  1078. list_for_each_entry(pg, &m->priority_groups, list) {
  1079. if (!--pgnum)
  1080. break;
  1081. }
  1082. bypass_pg(m, pg, bypassed);
  1083. return 0;
  1084. }
  1085. /*
  1086. * Should we retry pg_init immediately?
  1087. */
  1088. static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
  1089. {
  1090. unsigned long flags;
  1091. bool limit_reached = false;
  1092. spin_lock_irqsave(&m->lock, flags);
  1093. if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
  1094. !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
  1095. set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  1096. else
  1097. limit_reached = true;
  1098. spin_unlock_irqrestore(&m->lock, flags);
  1099. return limit_reached;
  1100. }
  1101. static void pg_init_done(void *data, int errors)
  1102. {
  1103. struct pgpath *pgpath = data;
  1104. struct priority_group *pg = pgpath->pg;
  1105. struct multipath *m = pg->m;
  1106. unsigned long flags;
  1107. bool delay_retry = false;
  1108. /* device or driver problems */
  1109. switch (errors) {
  1110. case SCSI_DH_OK:
  1111. break;
  1112. case SCSI_DH_NOSYS:
  1113. if (!m->hw_handler_name) {
  1114. errors = 0;
  1115. break;
  1116. }
  1117. DMERR("Could not failover the device: Handler scsi_dh_%s "
  1118. "Error %d.", m->hw_handler_name, errors);
  1119. /*
  1120. * Fail path for now, so we do not ping pong
  1121. */
  1122. fail_path(pgpath);
  1123. break;
  1124. case SCSI_DH_DEV_TEMP_BUSY:
  1125. /*
  1126. * Probably doing something like FW upgrade on the
  1127. * controller so try the other pg.
  1128. */
  1129. bypass_pg(m, pg, true);
  1130. break;
  1131. case SCSI_DH_RETRY:
  1132. /* Wait before retrying. */
  1133. delay_retry = 1;
  1134. /* fall through */
  1135. case SCSI_DH_IMM_RETRY:
  1136. case SCSI_DH_RES_TEMP_UNAVAIL:
  1137. if (pg_init_limit_reached(m, pgpath))
  1138. fail_path(pgpath);
  1139. errors = 0;
  1140. break;
  1141. case SCSI_DH_DEV_OFFLINED:
  1142. default:
  1143. /*
  1144. * We probably do not want to fail the path for a device
  1145. * error, but this is what the old dm did. In future
  1146. * patches we can do more advanced handling.
  1147. */
  1148. fail_path(pgpath);
  1149. }
  1150. spin_lock_irqsave(&m->lock, flags);
  1151. if (errors) {
  1152. if (pgpath == m->current_pgpath) {
  1153. DMERR("Could not failover device. Error %d.", errors);
  1154. m->current_pgpath = NULL;
  1155. m->current_pg = NULL;
  1156. }
  1157. } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
  1158. pg->bypassed = false;
  1159. if (atomic_dec_return(&m->pg_init_in_progress) > 0)
  1160. /* Activations of other paths are still on going */
  1161. goto out;
  1162. if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
  1163. if (delay_retry)
  1164. set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
  1165. else
  1166. clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
  1167. if (__pg_init_all_paths(m))
  1168. goto out;
  1169. }
  1170. clear_bit(MPATHF_QUEUE_IO, &m->flags);
  1171. process_queued_io_list(m);
  1172. /*
  1173. * Wake up any thread waiting to suspend.
  1174. */
  1175. wake_up(&m->pg_init_wait);
  1176. out:
  1177. spin_unlock_irqrestore(&m->lock, flags);
  1178. }
  1179. static void activate_or_offline_path(struct pgpath *pgpath)
  1180. {
  1181. struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
  1182. if (pgpath->is_active && !blk_queue_dying(q))
  1183. scsi_dh_activate(q, pg_init_done, pgpath);
  1184. else
  1185. pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
  1186. }
  1187. static void activate_path_work(struct work_struct *work)
  1188. {
  1189. struct pgpath *pgpath =
  1190. container_of(work, struct pgpath, activate_path.work);
  1191. activate_or_offline_path(pgpath);
  1192. }
  1193. static int noretry_error(blk_status_t error)
  1194. {
  1195. switch (error) {
  1196. case BLK_STS_NOTSUPP:
  1197. case BLK_STS_NOSPC:
  1198. case BLK_STS_TARGET:
  1199. case BLK_STS_NEXUS:
  1200. case BLK_STS_MEDIUM:
  1201. return 1;
  1202. }
  1203. /* Anything else could be a path failure, so should be retried */
  1204. return 0;
  1205. }
  1206. static int multipath_end_io(struct dm_target *ti, struct request *clone,
  1207. blk_status_t error, union map_info *map_context)
  1208. {
  1209. struct dm_mpath_io *mpio = get_mpio(map_context);
  1210. struct pgpath *pgpath = mpio->pgpath;
  1211. int r = DM_ENDIO_DONE;
  1212. /*
  1213. * We don't queue any clone request inside the multipath target
  1214. * during end I/O handling, since those clone requests don't have
  1215. * bio clones. If we queue them inside the multipath target,
  1216. * we need to make bio clones, that requires memory allocation.
  1217. * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
  1218. * don't have bio clones.)
  1219. * Instead of queueing the clone request here, we queue the original
  1220. * request into dm core, which will remake a clone request and
  1221. * clone bios for it and resubmit it later.
  1222. */
  1223. if (error && !noretry_error(error)) {
  1224. struct multipath *m = ti->private;
  1225. r = DM_ENDIO_REQUEUE;
  1226. if (pgpath)
  1227. fail_path(pgpath);
  1228. if (atomic_read(&m->nr_valid_paths) == 0 &&
  1229. !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
  1230. if (error == BLK_STS_IOERR)
  1231. dm_report_EIO(m);
  1232. /* complete with the original error */
  1233. r = DM_ENDIO_DONE;
  1234. }
  1235. }
  1236. if (pgpath) {
  1237. struct path_selector *ps = &pgpath->pg->ps;
  1238. if (ps->type->end_io)
  1239. ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
  1240. }
  1241. return r;
  1242. }
  1243. static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
  1244. blk_status_t *error)
  1245. {
  1246. struct multipath *m = ti->private;
  1247. struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
  1248. struct pgpath *pgpath = mpio->pgpath;
  1249. unsigned long flags;
  1250. int r = DM_ENDIO_DONE;
  1251. if (!*error || noretry_error(*error))
  1252. goto done;
  1253. if (pgpath)
  1254. fail_path(pgpath);
  1255. if (atomic_read(&m->nr_valid_paths) == 0 &&
  1256. !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
  1257. dm_report_EIO(m);
  1258. *error = BLK_STS_IOERR;
  1259. goto done;
  1260. }
  1261. /* Queue for the daemon to resubmit */
  1262. dm_bio_restore(get_bio_details_from_bio(clone), clone);
  1263. spin_lock_irqsave(&m->lock, flags);
  1264. bio_list_add(&m->queued_bios, clone);
  1265. spin_unlock_irqrestore(&m->lock, flags);
  1266. if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
  1267. queue_work(kmultipathd, &m->process_queued_bios);
  1268. r = DM_ENDIO_INCOMPLETE;
  1269. done:
  1270. if (pgpath) {
  1271. struct path_selector *ps = &pgpath->pg->ps;
  1272. if (ps->type->end_io)
  1273. ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
  1274. }
  1275. return r;
  1276. }
  1277. /*
  1278. * Suspend can't complete until all the I/O is processed so if
  1279. * the last path fails we must error any remaining I/O.
  1280. * Note that if the freeze_bdev fails while suspending, the
  1281. * queue_if_no_path state is lost - userspace should reset it.
  1282. */
  1283. static void multipath_presuspend(struct dm_target *ti)
  1284. {
  1285. struct multipath *m = ti->private;
  1286. queue_if_no_path(m, false, true);
  1287. }
  1288. static void multipath_postsuspend(struct dm_target *ti)
  1289. {
  1290. struct multipath *m = ti->private;
  1291. mutex_lock(&m->work_mutex);
  1292. flush_multipath_work(m);
  1293. mutex_unlock(&m->work_mutex);
  1294. }
  1295. /*
  1296. * Restore the queue_if_no_path setting.
  1297. */
  1298. static void multipath_resume(struct dm_target *ti)
  1299. {
  1300. struct multipath *m = ti->private;
  1301. unsigned long flags;
  1302. spin_lock_irqsave(&m->lock, flags);
  1303. assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
  1304. MPATHF_QUEUE_IF_NO_PATH, &m->flags);
  1305. spin_unlock_irqrestore(&m->lock, flags);
  1306. }
  1307. /*
  1308. * Info output has the following format:
  1309. * num_multipath_feature_args [multipath_feature_args]*
  1310. * num_handler_status_args [handler_status_args]*
  1311. * num_groups init_group_number
  1312. * [A|D|E num_ps_status_args [ps_status_args]*
  1313. * num_paths num_selector_args
  1314. * [path_dev A|F fail_count [selector_args]* ]+ ]+
  1315. *
  1316. * Table output has the following format (identical to the constructor string):
  1317. * num_feature_args [features_args]*
  1318. * num_handler_args hw_handler [hw_handler_args]*
  1319. * num_groups init_group_number
  1320. * [priority selector-name num_ps_args [ps_args]*
  1321. * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
  1322. */
  1323. static void multipath_status(struct dm_target *ti, status_type_t type,
  1324. unsigned status_flags, char *result, unsigned maxlen)
  1325. {
  1326. int sz = 0;
  1327. unsigned long flags;
  1328. struct multipath *m = ti->private;
  1329. struct priority_group *pg;
  1330. struct pgpath *p;
  1331. unsigned pg_num;
  1332. char state;
  1333. spin_lock_irqsave(&m->lock, flags);
  1334. /* Features */
  1335. if (type == STATUSTYPE_INFO)
  1336. DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
  1337. atomic_read(&m->pg_init_count));
  1338. else {
  1339. DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
  1340. (m->pg_init_retries > 0) * 2 +
  1341. (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
  1342. test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
  1343. (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
  1344. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  1345. DMEMIT("queue_if_no_path ");
  1346. if (m->pg_init_retries)
  1347. DMEMIT("pg_init_retries %u ", m->pg_init_retries);
  1348. if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
  1349. DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
  1350. if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
  1351. DMEMIT("retain_attached_hw_handler ");
  1352. if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
  1353. switch(m->queue_mode) {
  1354. case DM_TYPE_BIO_BASED:
  1355. DMEMIT("queue_mode bio ");
  1356. break;
  1357. case DM_TYPE_MQ_REQUEST_BASED:
  1358. DMEMIT("queue_mode mq ");
  1359. break;
  1360. default:
  1361. WARN_ON_ONCE(true);
  1362. break;
  1363. }
  1364. }
  1365. }
  1366. if (!m->hw_handler_name || type == STATUSTYPE_INFO)
  1367. DMEMIT("0 ");
  1368. else
  1369. DMEMIT("1 %s ", m->hw_handler_name);
  1370. DMEMIT("%u ", m->nr_priority_groups);
  1371. if (m->next_pg)
  1372. pg_num = m->next_pg->pg_num;
  1373. else if (m->current_pg)
  1374. pg_num = m->current_pg->pg_num;
  1375. else
  1376. pg_num = (m->nr_priority_groups ? 1 : 0);
  1377. DMEMIT("%u ", pg_num);
  1378. switch (type) {
  1379. case STATUSTYPE_INFO:
  1380. list_for_each_entry(pg, &m->priority_groups, list) {
  1381. if (pg->bypassed)
  1382. state = 'D'; /* Disabled */
  1383. else if (pg == m->current_pg)
  1384. state = 'A'; /* Currently Active */
  1385. else
  1386. state = 'E'; /* Enabled */
  1387. DMEMIT("%c ", state);
  1388. if (pg->ps.type->status)
  1389. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1390. result + sz,
  1391. maxlen - sz);
  1392. else
  1393. DMEMIT("0 ");
  1394. DMEMIT("%u %u ", pg->nr_pgpaths,
  1395. pg->ps.type->info_args);
  1396. list_for_each_entry(p, &pg->pgpaths, list) {
  1397. DMEMIT("%s %s %u ", p->path.dev->name,
  1398. p->is_active ? "A" : "F",
  1399. p->fail_count);
  1400. if (pg->ps.type->status)
  1401. sz += pg->ps.type->status(&pg->ps,
  1402. &p->path, type, result + sz,
  1403. maxlen - sz);
  1404. }
  1405. }
  1406. break;
  1407. case STATUSTYPE_TABLE:
  1408. list_for_each_entry(pg, &m->priority_groups, list) {
  1409. DMEMIT("%s ", pg->ps.type->name);
  1410. if (pg->ps.type->status)
  1411. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1412. result + sz,
  1413. maxlen - sz);
  1414. else
  1415. DMEMIT("0 ");
  1416. DMEMIT("%u %u ", pg->nr_pgpaths,
  1417. pg->ps.type->table_args);
  1418. list_for_each_entry(p, &pg->pgpaths, list) {
  1419. DMEMIT("%s ", p->path.dev->name);
  1420. if (pg->ps.type->status)
  1421. sz += pg->ps.type->status(&pg->ps,
  1422. &p->path, type, result + sz,
  1423. maxlen - sz);
  1424. }
  1425. }
  1426. break;
  1427. }
  1428. spin_unlock_irqrestore(&m->lock, flags);
  1429. }
  1430. static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
  1431. {
  1432. int r = -EINVAL;
  1433. struct dm_dev *dev;
  1434. struct multipath *m = ti->private;
  1435. action_fn action;
  1436. mutex_lock(&m->work_mutex);
  1437. if (dm_suspended(ti)) {
  1438. r = -EBUSY;
  1439. goto out;
  1440. }
  1441. if (argc == 1) {
  1442. if (!strcasecmp(argv[0], "queue_if_no_path")) {
  1443. r = queue_if_no_path(m, true, false);
  1444. goto out;
  1445. } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
  1446. r = queue_if_no_path(m, false, false);
  1447. goto out;
  1448. }
  1449. }
  1450. if (argc != 2) {
  1451. DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
  1452. goto out;
  1453. }
  1454. if (!strcasecmp(argv[0], "disable_group")) {
  1455. r = bypass_pg_num(m, argv[1], true);
  1456. goto out;
  1457. } else if (!strcasecmp(argv[0], "enable_group")) {
  1458. r = bypass_pg_num(m, argv[1], false);
  1459. goto out;
  1460. } else if (!strcasecmp(argv[0], "switch_group")) {
  1461. r = switch_pg_num(m, argv[1]);
  1462. goto out;
  1463. } else if (!strcasecmp(argv[0], "reinstate_path"))
  1464. action = reinstate_path;
  1465. else if (!strcasecmp(argv[0], "fail_path"))
  1466. action = fail_path;
  1467. else {
  1468. DMWARN("Unrecognised multipath message received: %s", argv[0]);
  1469. goto out;
  1470. }
  1471. r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
  1472. if (r) {
  1473. DMWARN("message: error getting device %s",
  1474. argv[1]);
  1475. goto out;
  1476. }
  1477. r = action_dev(m, dev, action);
  1478. dm_put_device(ti, dev);
  1479. out:
  1480. mutex_unlock(&m->work_mutex);
  1481. return r;
  1482. }
  1483. static int multipath_prepare_ioctl(struct dm_target *ti,
  1484. struct block_device **bdev, fmode_t *mode)
  1485. {
  1486. struct multipath *m = ti->private;
  1487. struct pgpath *current_pgpath;
  1488. int r;
  1489. current_pgpath = lockless_dereference(m->current_pgpath);
  1490. if (!current_pgpath)
  1491. current_pgpath = choose_pgpath(m, 0);
  1492. if (current_pgpath) {
  1493. if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
  1494. *bdev = current_pgpath->path.dev->bdev;
  1495. *mode = current_pgpath->path.dev->mode;
  1496. r = 0;
  1497. } else {
  1498. /* pg_init has not started or completed */
  1499. r = -ENOTCONN;
  1500. }
  1501. } else {
  1502. /* No path is available */
  1503. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  1504. r = -ENOTCONN;
  1505. else
  1506. r = -EIO;
  1507. }
  1508. if (r == -ENOTCONN) {
  1509. if (!lockless_dereference(m->current_pg)) {
  1510. /* Path status changed, redo selection */
  1511. (void) choose_pgpath(m, 0);
  1512. }
  1513. if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
  1514. pg_init_all_paths(m);
  1515. dm_table_run_md_queue_async(m->ti->table);
  1516. process_queued_io_list(m);
  1517. }
  1518. /*
  1519. * Only pass ioctls through if the device sizes match exactly.
  1520. */
  1521. if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
  1522. return 1;
  1523. return r;
  1524. }
  1525. static int multipath_iterate_devices(struct dm_target *ti,
  1526. iterate_devices_callout_fn fn, void *data)
  1527. {
  1528. struct multipath *m = ti->private;
  1529. struct priority_group *pg;
  1530. struct pgpath *p;
  1531. int ret = 0;
  1532. list_for_each_entry(pg, &m->priority_groups, list) {
  1533. list_for_each_entry(p, &pg->pgpaths, list) {
  1534. ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
  1535. if (ret)
  1536. goto out;
  1537. }
  1538. }
  1539. out:
  1540. return ret;
  1541. }
  1542. static int pgpath_busy(struct pgpath *pgpath)
  1543. {
  1544. struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
  1545. return blk_lld_busy(q);
  1546. }
  1547. /*
  1548. * We return "busy", only when we can map I/Os but underlying devices
  1549. * are busy (so even if we map I/Os now, the I/Os will wait on
  1550. * the underlying queue).
  1551. * In other words, if we want to kill I/Os or queue them inside us
  1552. * due to map unavailability, we don't return "busy". Otherwise,
  1553. * dm core won't give us the I/Os and we can't do what we want.
  1554. */
  1555. static int multipath_busy(struct dm_target *ti)
  1556. {
  1557. bool busy = false, has_active = false;
  1558. struct multipath *m = ti->private;
  1559. struct priority_group *pg, *next_pg;
  1560. struct pgpath *pgpath;
  1561. /* pg_init in progress */
  1562. if (atomic_read(&m->pg_init_in_progress))
  1563. return true;
  1564. /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
  1565. if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  1566. return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
  1567. /* Guess which priority_group will be used at next mapping time */
  1568. pg = lockless_dereference(m->current_pg);
  1569. next_pg = lockless_dereference(m->next_pg);
  1570. if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
  1571. pg = next_pg;
  1572. if (!pg) {
  1573. /*
  1574. * We don't know which pg will be used at next mapping time.
  1575. * We don't call choose_pgpath() here to avoid to trigger
  1576. * pg_init just by busy checking.
  1577. * So we don't know whether underlying devices we will be using
  1578. * at next mapping time are busy or not. Just try mapping.
  1579. */
  1580. return busy;
  1581. }
  1582. /*
  1583. * If there is one non-busy active path at least, the path selector
  1584. * will be able to select it. So we consider such a pg as not busy.
  1585. */
  1586. busy = true;
  1587. list_for_each_entry(pgpath, &pg->pgpaths, list) {
  1588. if (pgpath->is_active) {
  1589. has_active = true;
  1590. if (!pgpath_busy(pgpath)) {
  1591. busy = false;
  1592. break;
  1593. }
  1594. }
  1595. }
  1596. if (!has_active) {
  1597. /*
  1598. * No active path in this pg, so this pg won't be used and
  1599. * the current_pg will be changed at next mapping time.
  1600. * We need to try mapping to determine it.
  1601. */
  1602. busy = false;
  1603. }
  1604. return busy;
  1605. }
  1606. /*-----------------------------------------------------------------
  1607. * Module setup
  1608. *---------------------------------------------------------------*/
  1609. static struct target_type multipath_target = {
  1610. .name = "multipath",
  1611. .version = {1, 12, 0},
  1612. .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
  1613. .module = THIS_MODULE,
  1614. .ctr = multipath_ctr,
  1615. .dtr = multipath_dtr,
  1616. .clone_and_map_rq = multipath_clone_and_map,
  1617. .release_clone_rq = multipath_release_clone,
  1618. .rq_end_io = multipath_end_io,
  1619. .map = multipath_map_bio,
  1620. .end_io = multipath_end_io_bio,
  1621. .presuspend = multipath_presuspend,
  1622. .postsuspend = multipath_postsuspend,
  1623. .resume = multipath_resume,
  1624. .status = multipath_status,
  1625. .message = multipath_message,
  1626. .prepare_ioctl = multipath_prepare_ioctl,
  1627. .iterate_devices = multipath_iterate_devices,
  1628. .busy = multipath_busy,
  1629. };
  1630. static int __init dm_multipath_init(void)
  1631. {
  1632. int r;
  1633. r = dm_register_target(&multipath_target);
  1634. if (r < 0) {
  1635. DMERR("request-based register failed %d", r);
  1636. r = -EINVAL;
  1637. goto bad_register_target;
  1638. }
  1639. kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
  1640. if (!kmultipathd) {
  1641. DMERR("failed to create workqueue kmpathd");
  1642. r = -ENOMEM;
  1643. goto bad_alloc_kmultipathd;
  1644. }
  1645. /*
  1646. * A separate workqueue is used to handle the device handlers
  1647. * to avoid overloading existing workqueue. Overloading the
  1648. * old workqueue would also create a bottleneck in the
  1649. * path of the storage hardware device activation.
  1650. */
  1651. kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
  1652. WQ_MEM_RECLAIM);
  1653. if (!kmpath_handlerd) {
  1654. DMERR("failed to create workqueue kmpath_handlerd");
  1655. r = -ENOMEM;
  1656. goto bad_alloc_kmpath_handlerd;
  1657. }
  1658. return 0;
  1659. bad_alloc_kmpath_handlerd:
  1660. destroy_workqueue(kmultipathd);
  1661. bad_alloc_kmultipathd:
  1662. dm_unregister_target(&multipath_target);
  1663. bad_register_target:
  1664. return r;
  1665. }
  1666. static void __exit dm_multipath_exit(void)
  1667. {
  1668. destroy_workqueue(kmpath_handlerd);
  1669. destroy_workqueue(kmultipathd);
  1670. dm_unregister_target(&multipath_target);
  1671. }
  1672. module_init(dm_multipath_init);
  1673. module_exit(dm_multipath_exit);
  1674. MODULE_DESCRIPTION(DM_NAME " multipath target");
  1675. MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
  1676. MODULE_LICENSE("GPL");