dm-mpath.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include <linux/device-mapper.h>
  8. #include "dm-rq.h"
  9. #include "dm-bio-record.h"
  10. #include "dm-path-selector.h"
  11. #include "dm-uevent.h"
  12. #include <linux/blkdev.h>
  13. #include <linux/ctype.h>
  14. #include <linux/init.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/slab.h>
  19. #include <linux/time.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/delay.h>
  22. #include <scsi/scsi_dh.h>
  23. #include <linux/atomic.h>
  24. #include <linux/blk-mq.h>
  25. #define DM_MSG_PREFIX "multipath"
  26. #define DM_PG_INIT_DELAY_MSECS 2000
  27. #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  28. /* Path properties */
  29. struct pgpath {
  30. struct list_head list;
  31. struct priority_group *pg; /* Owning PG */
  32. unsigned fail_count; /* Cumulative failure count */
  33. struct dm_path path;
  34. struct delayed_work activate_path;
  35. bool is_active:1; /* Path status */
  36. };
  37. #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  38. /*
  39. * Paths are grouped into Priority Groups and numbered from 1 upwards.
  40. * Each has a path selector which controls which path gets used.
  41. */
  42. struct priority_group {
  43. struct list_head list;
  44. struct multipath *m; /* Owning multipath instance */
  45. struct path_selector ps;
  46. unsigned pg_num; /* Reference number */
  47. unsigned nr_pgpaths; /* Number of paths in PG */
  48. struct list_head pgpaths;
  49. bool bypassed:1; /* Temporarily bypass this PG? */
  50. };
  51. /* Multipath context */
  52. struct multipath {
  53. struct list_head list;
  54. struct dm_target *ti;
  55. const char *hw_handler_name;
  56. char *hw_handler_params;
  57. spinlock_t lock;
  58. unsigned nr_priority_groups;
  59. struct list_head priority_groups;
  60. wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
  61. struct pgpath *current_pgpath;
  62. struct priority_group *current_pg;
  63. struct priority_group *next_pg; /* Switch to this PG if set */
  64. unsigned long flags; /* Multipath state flags */
  65. unsigned pg_init_retries; /* Number of times to retry pg_init */
  66. unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
  67. atomic_t nr_valid_paths; /* Total number of usable paths */
  68. atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
  69. atomic_t pg_init_count; /* Number of times pg_init called */
  70. enum dm_queue_mode queue_mode;
  71. struct mutex work_mutex;
  72. struct work_struct trigger_event;
  73. struct work_struct process_queued_bios;
  74. struct bio_list queued_bios;
  75. };
  76. /*
  77. * Context information attached to each io we process.
  78. */
  79. struct dm_mpath_io {
  80. struct pgpath *pgpath;
  81. size_t nr_bytes;
  82. };
  83. typedef int (*action_fn) (struct pgpath *pgpath);
  84. static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
  85. static void trigger_event(struct work_struct *work);
  86. static void activate_or_offline_path(struct pgpath *pgpath);
  87. static void activate_path_work(struct work_struct *work);
  88. static void process_queued_bios(struct work_struct *work);
  89. /*-----------------------------------------------
  90. * Multipath state flags.
  91. *-----------------------------------------------*/
  92. #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
  93. #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
  94. #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
  95. #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
  96. #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
  97. #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
  98. #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
  99. /*-----------------------------------------------
  100. * Allocation routines
  101. *-----------------------------------------------*/
  102. static struct pgpath *alloc_pgpath(void)
  103. {
  104. struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
  105. if (pgpath) {
  106. pgpath->is_active = true;
  107. INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
  108. }
  109. return pgpath;
  110. }
  111. static void free_pgpath(struct pgpath *pgpath)
  112. {
  113. kfree(pgpath);
  114. }
  115. static struct priority_group *alloc_priority_group(void)
  116. {
  117. struct priority_group *pg;
  118. pg = kzalloc(sizeof(*pg), GFP_KERNEL);
  119. if (pg)
  120. INIT_LIST_HEAD(&pg->pgpaths);
  121. return pg;
  122. }
  123. static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
  124. {
  125. struct pgpath *pgpath, *tmp;
  126. list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
  127. list_del(&pgpath->list);
  128. dm_put_device(ti, pgpath->path.dev);
  129. free_pgpath(pgpath);
  130. }
  131. }
  132. static void free_priority_group(struct priority_group *pg,
  133. struct dm_target *ti)
  134. {
  135. struct path_selector *ps = &pg->ps;
  136. if (ps->type) {
  137. ps->type->destroy(ps);
  138. dm_put_path_selector(ps->type);
  139. }
  140. free_pgpaths(&pg->pgpaths, ti);
  141. kfree(pg);
  142. }
  143. static struct multipath *alloc_multipath(struct dm_target *ti)
  144. {
  145. struct multipath *m;
  146. m = kzalloc(sizeof(*m), GFP_KERNEL);
  147. if (m) {
  148. INIT_LIST_HEAD(&m->priority_groups);
  149. spin_lock_init(&m->lock);
  150. set_bit(MPATHF_QUEUE_IO, &m->flags);
  151. atomic_set(&m->nr_valid_paths, 0);
  152. atomic_set(&m->pg_init_in_progress, 0);
  153. atomic_set(&m->pg_init_count, 0);
  154. m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
  155. INIT_WORK(&m->trigger_event, trigger_event);
  156. init_waitqueue_head(&m->pg_init_wait);
  157. mutex_init(&m->work_mutex);
  158. m->queue_mode = DM_TYPE_NONE;
  159. m->ti = ti;
  160. ti->private = m;
  161. }
  162. return m;
  163. }
  164. static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
  165. {
  166. if (m->queue_mode == DM_TYPE_NONE) {
  167. /*
  168. * Default to request-based.
  169. */
  170. if (dm_use_blk_mq(dm_table_get_md(ti->table)))
  171. m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
  172. else
  173. m->queue_mode = DM_TYPE_REQUEST_BASED;
  174. } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
  175. INIT_WORK(&m->process_queued_bios, process_queued_bios);
  176. /*
  177. * bio-based doesn't support any direct scsi_dh management;
  178. * it just discovers if a scsi_dh is attached.
  179. */
  180. set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
  181. }
  182. dm_table_set_type(ti->table, m->queue_mode);
  183. return 0;
  184. }
  185. static void free_multipath(struct multipath *m)
  186. {
  187. struct priority_group *pg, *tmp;
  188. list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
  189. list_del(&pg->list);
  190. free_priority_group(pg, m->ti);
  191. }
  192. kfree(m->hw_handler_name);
  193. kfree(m->hw_handler_params);
  194. kfree(m);
  195. }
  196. static struct dm_mpath_io *get_mpio(union map_info *info)
  197. {
  198. return info->ptr;
  199. }
  200. static size_t multipath_per_bio_data_size(void)
  201. {
  202. return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
  203. }
  204. static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
  205. {
  206. return dm_per_bio_data(bio, multipath_per_bio_data_size());
  207. }
  208. static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
  209. {
  210. /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
  211. struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
  212. void *bio_details = mpio + 1;
  213. return bio_details;
  214. }
  215. static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
  216. struct dm_bio_details **bio_details_p)
  217. {
  218. struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
  219. struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
  220. memset(mpio, 0, sizeof(*mpio));
  221. memset(bio_details, 0, sizeof(*bio_details));
  222. dm_bio_record(bio_details, bio);
  223. if (mpio_p)
  224. *mpio_p = mpio;
  225. if (bio_details_p)
  226. *bio_details_p = bio_details;
  227. }
  228. /*-----------------------------------------------
  229. * Path selection
  230. *-----------------------------------------------*/
  231. static int __pg_init_all_paths(struct multipath *m)
  232. {
  233. struct pgpath *pgpath;
  234. unsigned long pg_init_delay = 0;
  235. lockdep_assert_held(&m->lock);
  236. if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
  237. return 0;
  238. atomic_inc(&m->pg_init_count);
  239. clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  240. /* Check here to reset pg_init_required */
  241. if (!m->current_pg)
  242. return 0;
  243. if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
  244. pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
  245. m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
  246. list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
  247. /* Skip failed paths */
  248. if (!pgpath->is_active)
  249. continue;
  250. if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
  251. pg_init_delay))
  252. atomic_inc(&m->pg_init_in_progress);
  253. }
  254. return atomic_read(&m->pg_init_in_progress);
  255. }
  256. static int pg_init_all_paths(struct multipath *m)
  257. {
  258. int ret;
  259. unsigned long flags;
  260. spin_lock_irqsave(&m->lock, flags);
  261. ret = __pg_init_all_paths(m);
  262. spin_unlock_irqrestore(&m->lock, flags);
  263. return ret;
  264. }
  265. static void __switch_pg(struct multipath *m, struct priority_group *pg)
  266. {
  267. m->current_pg = pg;
  268. /* Must we initialise the PG first, and queue I/O till it's ready? */
  269. if (m->hw_handler_name) {
  270. set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  271. set_bit(MPATHF_QUEUE_IO, &m->flags);
  272. } else {
  273. clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  274. clear_bit(MPATHF_QUEUE_IO, &m->flags);
  275. }
  276. atomic_set(&m->pg_init_count, 0);
  277. }
  278. static struct pgpath *choose_path_in_pg(struct multipath *m,
  279. struct priority_group *pg,
  280. size_t nr_bytes)
  281. {
  282. unsigned long flags;
  283. struct dm_path *path;
  284. struct pgpath *pgpath;
  285. path = pg->ps.type->select_path(&pg->ps, nr_bytes);
  286. if (!path)
  287. return ERR_PTR(-ENXIO);
  288. pgpath = path_to_pgpath(path);
  289. if (unlikely(lockless_dereference(m->current_pg) != pg)) {
  290. /* Only update current_pgpath if pg changed */
  291. spin_lock_irqsave(&m->lock, flags);
  292. m->current_pgpath = pgpath;
  293. __switch_pg(m, pg);
  294. spin_unlock_irqrestore(&m->lock, flags);
  295. }
  296. return pgpath;
  297. }
  298. static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
  299. {
  300. unsigned long flags;
  301. struct priority_group *pg;
  302. struct pgpath *pgpath;
  303. unsigned bypassed = 1;
  304. if (!atomic_read(&m->nr_valid_paths)) {
  305. clear_bit(MPATHF_QUEUE_IO, &m->flags);
  306. goto failed;
  307. }
  308. /* Were we instructed to switch PG? */
  309. if (lockless_dereference(m->next_pg)) {
  310. spin_lock_irqsave(&m->lock, flags);
  311. pg = m->next_pg;
  312. if (!pg) {
  313. spin_unlock_irqrestore(&m->lock, flags);
  314. goto check_current_pg;
  315. }
  316. m->next_pg = NULL;
  317. spin_unlock_irqrestore(&m->lock, flags);
  318. pgpath = choose_path_in_pg(m, pg, nr_bytes);
  319. if (!IS_ERR_OR_NULL(pgpath))
  320. return pgpath;
  321. }
  322. /* Don't change PG until it has no remaining paths */
  323. check_current_pg:
  324. pg = lockless_dereference(m->current_pg);
  325. if (pg) {
  326. pgpath = choose_path_in_pg(m, pg, nr_bytes);
  327. if (!IS_ERR_OR_NULL(pgpath))
  328. return pgpath;
  329. }
  330. /*
  331. * Loop through priority groups until we find a valid path.
  332. * First time we skip PGs marked 'bypassed'.
  333. * Second time we only try the ones we skipped, but set
  334. * pg_init_delay_retry so we do not hammer controllers.
  335. */
  336. do {
  337. list_for_each_entry(pg, &m->priority_groups, list) {
  338. if (pg->bypassed == !!bypassed)
  339. continue;
  340. pgpath = choose_path_in_pg(m, pg, nr_bytes);
  341. if (!IS_ERR_OR_NULL(pgpath)) {
  342. if (!bypassed)
  343. set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
  344. return pgpath;
  345. }
  346. }
  347. } while (bypassed--);
  348. failed:
  349. spin_lock_irqsave(&m->lock, flags);
  350. m->current_pgpath = NULL;
  351. m->current_pg = NULL;
  352. spin_unlock_irqrestore(&m->lock, flags);
  353. return NULL;
  354. }
  355. /*
  356. * dm_report_EIO() is a macro instead of a function to make pr_debug()
  357. * report the function name and line number of the function from which
  358. * it has been invoked.
  359. */
  360. #define dm_report_EIO(m) \
  361. do { \
  362. struct mapped_device *md = dm_table_get_md((m)->ti->table); \
  363. \
  364. pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
  365. dm_device_name(md), \
  366. test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
  367. test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
  368. dm_noflush_suspending((m)->ti)); \
  369. } while (0)
  370. /*
  371. * Map cloned requests (request-based multipath)
  372. */
  373. static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
  374. union map_info *map_context,
  375. struct request **__clone)
  376. {
  377. struct multipath *m = ti->private;
  378. size_t nr_bytes = blk_rq_bytes(rq);
  379. struct pgpath *pgpath;
  380. struct block_device *bdev;
  381. struct dm_mpath_io *mpio = get_mpio(map_context);
  382. struct request_queue *q;
  383. struct request *clone;
  384. /* Do we need to select a new pgpath? */
  385. pgpath = lockless_dereference(m->current_pgpath);
  386. if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
  387. pgpath = choose_pgpath(m, nr_bytes);
  388. if (!pgpath) {
  389. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  390. return DM_MAPIO_DELAY_REQUEUE;
  391. dm_report_EIO(m); /* Failed */
  392. return DM_MAPIO_KILL;
  393. } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
  394. test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
  395. if (pg_init_all_paths(m))
  396. return DM_MAPIO_DELAY_REQUEUE;
  397. return DM_MAPIO_REQUEUE;
  398. }
  399. memset(mpio, 0, sizeof(*mpio));
  400. mpio->pgpath = pgpath;
  401. mpio->nr_bytes = nr_bytes;
  402. bdev = pgpath->path.dev->bdev;
  403. q = bdev_get_queue(bdev);
  404. clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
  405. if (IS_ERR(clone)) {
  406. /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
  407. bool queue_dying = blk_queue_dying(q);
  408. DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
  409. PTR_ERR(clone), queue_dying ? " (path offline)" : "");
  410. if (queue_dying) {
  411. atomic_inc(&m->pg_init_in_progress);
  412. activate_or_offline_path(pgpath);
  413. return DM_MAPIO_REQUEUE;
  414. }
  415. return DM_MAPIO_DELAY_REQUEUE;
  416. }
  417. clone->bio = clone->biotail = NULL;
  418. clone->rq_disk = bdev->bd_disk;
  419. clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
  420. *__clone = clone;
  421. if (pgpath->pg->ps.type->start_io)
  422. pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
  423. &pgpath->path,
  424. nr_bytes);
  425. return DM_MAPIO_REMAPPED;
  426. }
  427. static void multipath_release_clone(struct request *clone)
  428. {
  429. blk_put_request(clone);
  430. }
  431. /*
  432. * Map cloned bios (bio-based multipath)
  433. */
  434. static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
  435. {
  436. size_t nr_bytes = bio->bi_iter.bi_size;
  437. struct pgpath *pgpath;
  438. unsigned long flags;
  439. bool queue_io;
  440. /* Do we need to select a new pgpath? */
  441. pgpath = lockless_dereference(m->current_pgpath);
  442. queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
  443. if (!pgpath || !queue_io)
  444. pgpath = choose_pgpath(m, nr_bytes);
  445. if ((pgpath && queue_io) ||
  446. (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
  447. /* Queue for the daemon to resubmit */
  448. spin_lock_irqsave(&m->lock, flags);
  449. bio_list_add(&m->queued_bios, bio);
  450. spin_unlock_irqrestore(&m->lock, flags);
  451. /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
  452. if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
  453. pg_init_all_paths(m);
  454. else if (!queue_io)
  455. queue_work(kmultipathd, &m->process_queued_bios);
  456. return DM_MAPIO_SUBMITTED;
  457. }
  458. if (!pgpath) {
  459. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  460. return DM_MAPIO_REQUEUE;
  461. dm_report_EIO(m);
  462. return -EIO;
  463. }
  464. mpio->pgpath = pgpath;
  465. mpio->nr_bytes = nr_bytes;
  466. bio->bi_error = 0;
  467. bio->bi_bdev = pgpath->path.dev->bdev;
  468. bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
  469. if (pgpath->pg->ps.type->start_io)
  470. pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
  471. &pgpath->path,
  472. nr_bytes);
  473. return DM_MAPIO_REMAPPED;
  474. }
  475. static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
  476. {
  477. struct multipath *m = ti->private;
  478. struct dm_mpath_io *mpio = NULL;
  479. multipath_init_per_bio_data(bio, &mpio, NULL);
  480. return __multipath_map_bio(m, bio, mpio);
  481. }
  482. static void process_queued_io_list(struct multipath *m)
  483. {
  484. if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
  485. dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
  486. else if (m->queue_mode == DM_TYPE_BIO_BASED)
  487. queue_work(kmultipathd, &m->process_queued_bios);
  488. }
  489. static void process_queued_bios(struct work_struct *work)
  490. {
  491. int r;
  492. unsigned long flags;
  493. struct bio *bio;
  494. struct bio_list bios;
  495. struct blk_plug plug;
  496. struct multipath *m =
  497. container_of(work, struct multipath, process_queued_bios);
  498. bio_list_init(&bios);
  499. spin_lock_irqsave(&m->lock, flags);
  500. if (bio_list_empty(&m->queued_bios)) {
  501. spin_unlock_irqrestore(&m->lock, flags);
  502. return;
  503. }
  504. bio_list_merge(&bios, &m->queued_bios);
  505. bio_list_init(&m->queued_bios);
  506. spin_unlock_irqrestore(&m->lock, flags);
  507. blk_start_plug(&plug);
  508. while ((bio = bio_list_pop(&bios))) {
  509. r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
  510. if (r < 0 || r == DM_MAPIO_REQUEUE) {
  511. bio->bi_error = r;
  512. bio_endio(bio);
  513. } else if (r == DM_MAPIO_REMAPPED)
  514. generic_make_request(bio);
  515. }
  516. blk_finish_plug(&plug);
  517. }
  518. static void assign_bit(bool value, long nr, unsigned long *addr)
  519. {
  520. if (value)
  521. set_bit(nr, addr);
  522. else
  523. clear_bit(nr, addr);
  524. }
  525. /*
  526. * If we run out of usable paths, should we queue I/O or error it?
  527. */
  528. static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
  529. bool save_old_value)
  530. {
  531. unsigned long flags;
  532. spin_lock_irqsave(&m->lock, flags);
  533. assign_bit((save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
  534. (!save_old_value && queue_if_no_path),
  535. MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
  536. assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
  537. MPATHF_QUEUE_IF_NO_PATH, &m->flags);
  538. spin_unlock_irqrestore(&m->lock, flags);
  539. if (!queue_if_no_path) {
  540. dm_table_run_md_queue_async(m->ti->table);
  541. process_queued_io_list(m);
  542. }
  543. return 0;
  544. }
  545. /*
  546. * An event is triggered whenever a path is taken out of use.
  547. * Includes path failure and PG bypass.
  548. */
  549. static void trigger_event(struct work_struct *work)
  550. {
  551. struct multipath *m =
  552. container_of(work, struct multipath, trigger_event);
  553. dm_table_event(m->ti->table);
  554. }
  555. /*-----------------------------------------------------------------
  556. * Constructor/argument parsing:
  557. * <#multipath feature args> [<arg>]*
  558. * <#hw_handler args> [hw_handler [<arg>]*]
  559. * <#priority groups>
  560. * <initial priority group>
  561. * [<selector> <#selector args> [<arg>]*
  562. * <#paths> <#per-path selector args>
  563. * [<path> [<arg>]* ]+ ]+
  564. *---------------------------------------------------------------*/
  565. static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
  566. struct dm_target *ti)
  567. {
  568. int r;
  569. struct path_selector_type *pst;
  570. unsigned ps_argc;
  571. static struct dm_arg _args[] = {
  572. {0, 1024, "invalid number of path selector args"},
  573. };
  574. pst = dm_get_path_selector(dm_shift_arg(as));
  575. if (!pst) {
  576. ti->error = "unknown path selector type";
  577. return -EINVAL;
  578. }
  579. r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
  580. if (r) {
  581. dm_put_path_selector(pst);
  582. return -EINVAL;
  583. }
  584. r = pst->create(&pg->ps, ps_argc, as->argv);
  585. if (r) {
  586. dm_put_path_selector(pst);
  587. ti->error = "path selector constructor failed";
  588. return r;
  589. }
  590. pg->ps.type = pst;
  591. dm_consume_args(as, ps_argc);
  592. return 0;
  593. }
  594. static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
  595. struct dm_target *ti)
  596. {
  597. int r;
  598. struct pgpath *p;
  599. struct multipath *m = ti->private;
  600. struct request_queue *q = NULL;
  601. const char *attached_handler_name;
  602. /* we need at least a path arg */
  603. if (as->argc < 1) {
  604. ti->error = "no device given";
  605. return ERR_PTR(-EINVAL);
  606. }
  607. p = alloc_pgpath();
  608. if (!p)
  609. return ERR_PTR(-ENOMEM);
  610. r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
  611. &p->path.dev);
  612. if (r) {
  613. ti->error = "error getting device";
  614. goto bad;
  615. }
  616. if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
  617. q = bdev_get_queue(p->path.dev->bdev);
  618. if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
  619. retain:
  620. attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
  621. if (attached_handler_name) {
  622. /*
  623. * Clear any hw_handler_params associated with a
  624. * handler that isn't already attached.
  625. */
  626. if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
  627. kfree(m->hw_handler_params);
  628. m->hw_handler_params = NULL;
  629. }
  630. /*
  631. * Reset hw_handler_name to match the attached handler
  632. *
  633. * NB. This modifies the table line to show the actual
  634. * handler instead of the original table passed in.
  635. */
  636. kfree(m->hw_handler_name);
  637. m->hw_handler_name = attached_handler_name;
  638. }
  639. }
  640. if (m->hw_handler_name) {
  641. r = scsi_dh_attach(q, m->hw_handler_name);
  642. if (r == -EBUSY) {
  643. char b[BDEVNAME_SIZE];
  644. printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
  645. bdevname(p->path.dev->bdev, b));
  646. goto retain;
  647. }
  648. if (r < 0) {
  649. ti->error = "error attaching hardware handler";
  650. dm_put_device(ti, p->path.dev);
  651. goto bad;
  652. }
  653. if (m->hw_handler_params) {
  654. r = scsi_dh_set_params(q, m->hw_handler_params);
  655. if (r < 0) {
  656. ti->error = "unable to set hardware "
  657. "handler parameters";
  658. dm_put_device(ti, p->path.dev);
  659. goto bad;
  660. }
  661. }
  662. }
  663. r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
  664. if (r) {
  665. dm_put_device(ti, p->path.dev);
  666. goto bad;
  667. }
  668. return p;
  669. bad:
  670. free_pgpath(p);
  671. return ERR_PTR(r);
  672. }
  673. static struct priority_group *parse_priority_group(struct dm_arg_set *as,
  674. struct multipath *m)
  675. {
  676. static struct dm_arg _args[] = {
  677. {1, 1024, "invalid number of paths"},
  678. {0, 1024, "invalid number of selector args"}
  679. };
  680. int r;
  681. unsigned i, nr_selector_args, nr_args;
  682. struct priority_group *pg;
  683. struct dm_target *ti = m->ti;
  684. if (as->argc < 2) {
  685. as->argc = 0;
  686. ti->error = "not enough priority group arguments";
  687. return ERR_PTR(-EINVAL);
  688. }
  689. pg = alloc_priority_group();
  690. if (!pg) {
  691. ti->error = "couldn't allocate priority group";
  692. return ERR_PTR(-ENOMEM);
  693. }
  694. pg->m = m;
  695. r = parse_path_selector(as, pg, ti);
  696. if (r)
  697. goto bad;
  698. /*
  699. * read the paths
  700. */
  701. r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
  702. if (r)
  703. goto bad;
  704. r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
  705. if (r)
  706. goto bad;
  707. nr_args = 1 + nr_selector_args;
  708. for (i = 0; i < pg->nr_pgpaths; i++) {
  709. struct pgpath *pgpath;
  710. struct dm_arg_set path_args;
  711. if (as->argc < nr_args) {
  712. ti->error = "not enough path parameters";
  713. r = -EINVAL;
  714. goto bad;
  715. }
  716. path_args.argc = nr_args;
  717. path_args.argv = as->argv;
  718. pgpath = parse_path(&path_args, &pg->ps, ti);
  719. if (IS_ERR(pgpath)) {
  720. r = PTR_ERR(pgpath);
  721. goto bad;
  722. }
  723. pgpath->pg = pg;
  724. list_add_tail(&pgpath->list, &pg->pgpaths);
  725. dm_consume_args(as, nr_args);
  726. }
  727. return pg;
  728. bad:
  729. free_priority_group(pg, ti);
  730. return ERR_PTR(r);
  731. }
  732. static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
  733. {
  734. unsigned hw_argc;
  735. int ret;
  736. struct dm_target *ti = m->ti;
  737. static struct dm_arg _args[] = {
  738. {0, 1024, "invalid number of hardware handler args"},
  739. };
  740. if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
  741. return -EINVAL;
  742. if (!hw_argc)
  743. return 0;
  744. if (m->queue_mode == DM_TYPE_BIO_BASED) {
  745. dm_consume_args(as, hw_argc);
  746. DMERR("bio-based multipath doesn't allow hardware handler args");
  747. return 0;
  748. }
  749. m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
  750. if (!m->hw_handler_name)
  751. return -EINVAL;
  752. if (hw_argc > 1) {
  753. char *p;
  754. int i, j, len = 4;
  755. for (i = 0; i <= hw_argc - 2; i++)
  756. len += strlen(as->argv[i]) + 1;
  757. p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
  758. if (!p) {
  759. ti->error = "memory allocation failed";
  760. ret = -ENOMEM;
  761. goto fail;
  762. }
  763. j = sprintf(p, "%d", hw_argc - 1);
  764. for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
  765. j = sprintf(p, "%s", as->argv[i]);
  766. }
  767. dm_consume_args(as, hw_argc - 1);
  768. return 0;
  769. fail:
  770. kfree(m->hw_handler_name);
  771. m->hw_handler_name = NULL;
  772. return ret;
  773. }
  774. static int parse_features(struct dm_arg_set *as, struct multipath *m)
  775. {
  776. int r;
  777. unsigned argc;
  778. struct dm_target *ti = m->ti;
  779. const char *arg_name;
  780. static struct dm_arg _args[] = {
  781. {0, 8, "invalid number of feature args"},
  782. {1, 50, "pg_init_retries must be between 1 and 50"},
  783. {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
  784. };
  785. r = dm_read_arg_group(_args, as, &argc, &ti->error);
  786. if (r)
  787. return -EINVAL;
  788. if (!argc)
  789. return 0;
  790. do {
  791. arg_name = dm_shift_arg(as);
  792. argc--;
  793. if (!strcasecmp(arg_name, "queue_if_no_path")) {
  794. r = queue_if_no_path(m, true, false);
  795. continue;
  796. }
  797. if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
  798. set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
  799. continue;
  800. }
  801. if (!strcasecmp(arg_name, "pg_init_retries") &&
  802. (argc >= 1)) {
  803. r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
  804. argc--;
  805. continue;
  806. }
  807. if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
  808. (argc >= 1)) {
  809. r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
  810. argc--;
  811. continue;
  812. }
  813. if (!strcasecmp(arg_name, "queue_mode") &&
  814. (argc >= 1)) {
  815. const char *queue_mode_name = dm_shift_arg(as);
  816. if (!strcasecmp(queue_mode_name, "bio"))
  817. m->queue_mode = DM_TYPE_BIO_BASED;
  818. else if (!strcasecmp(queue_mode_name, "rq"))
  819. m->queue_mode = DM_TYPE_REQUEST_BASED;
  820. else if (!strcasecmp(queue_mode_name, "mq"))
  821. m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
  822. else {
  823. ti->error = "Unknown 'queue_mode' requested";
  824. r = -EINVAL;
  825. }
  826. argc--;
  827. continue;
  828. }
  829. ti->error = "Unrecognised multipath feature request";
  830. r = -EINVAL;
  831. } while (argc && !r);
  832. return r;
  833. }
  834. static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
  835. {
  836. /* target arguments */
  837. static struct dm_arg _args[] = {
  838. {0, 1024, "invalid number of priority groups"},
  839. {0, 1024, "invalid initial priority group number"},
  840. };
  841. int r;
  842. struct multipath *m;
  843. struct dm_arg_set as;
  844. unsigned pg_count = 0;
  845. unsigned next_pg_num;
  846. as.argc = argc;
  847. as.argv = argv;
  848. m = alloc_multipath(ti);
  849. if (!m) {
  850. ti->error = "can't allocate multipath";
  851. return -EINVAL;
  852. }
  853. r = parse_features(&as, m);
  854. if (r)
  855. goto bad;
  856. r = alloc_multipath_stage2(ti, m);
  857. if (r)
  858. goto bad;
  859. r = parse_hw_handler(&as, m);
  860. if (r)
  861. goto bad;
  862. r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
  863. if (r)
  864. goto bad;
  865. r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
  866. if (r)
  867. goto bad;
  868. if ((!m->nr_priority_groups && next_pg_num) ||
  869. (m->nr_priority_groups && !next_pg_num)) {
  870. ti->error = "invalid initial priority group";
  871. r = -EINVAL;
  872. goto bad;
  873. }
  874. /* parse the priority groups */
  875. while (as.argc) {
  876. struct priority_group *pg;
  877. unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
  878. pg = parse_priority_group(&as, m);
  879. if (IS_ERR(pg)) {
  880. r = PTR_ERR(pg);
  881. goto bad;
  882. }
  883. nr_valid_paths += pg->nr_pgpaths;
  884. atomic_set(&m->nr_valid_paths, nr_valid_paths);
  885. list_add_tail(&pg->list, &m->priority_groups);
  886. pg_count++;
  887. pg->pg_num = pg_count;
  888. if (!--next_pg_num)
  889. m->next_pg = pg;
  890. }
  891. if (pg_count != m->nr_priority_groups) {
  892. ti->error = "priority group count mismatch";
  893. r = -EINVAL;
  894. goto bad;
  895. }
  896. ti->num_flush_bios = 1;
  897. ti->num_discard_bios = 1;
  898. ti->num_write_same_bios = 1;
  899. ti->num_write_zeroes_bios = 1;
  900. if (m->queue_mode == DM_TYPE_BIO_BASED)
  901. ti->per_io_data_size = multipath_per_bio_data_size();
  902. else
  903. ti->per_io_data_size = sizeof(struct dm_mpath_io);
  904. return 0;
  905. bad:
  906. free_multipath(m);
  907. return r;
  908. }
  909. static void multipath_wait_for_pg_init_completion(struct multipath *m)
  910. {
  911. DEFINE_WAIT(wait);
  912. while (1) {
  913. prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
  914. if (!atomic_read(&m->pg_init_in_progress))
  915. break;
  916. io_schedule();
  917. }
  918. finish_wait(&m->pg_init_wait, &wait);
  919. }
  920. static void flush_multipath_work(struct multipath *m)
  921. {
  922. set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
  923. smp_mb__after_atomic();
  924. flush_workqueue(kmpath_handlerd);
  925. multipath_wait_for_pg_init_completion(m);
  926. flush_workqueue(kmultipathd);
  927. flush_work(&m->trigger_event);
  928. clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
  929. smp_mb__after_atomic();
  930. }
  931. static void multipath_dtr(struct dm_target *ti)
  932. {
  933. struct multipath *m = ti->private;
  934. flush_multipath_work(m);
  935. free_multipath(m);
  936. }
  937. /*
  938. * Take a path out of use.
  939. */
  940. static int fail_path(struct pgpath *pgpath)
  941. {
  942. unsigned long flags;
  943. struct multipath *m = pgpath->pg->m;
  944. spin_lock_irqsave(&m->lock, flags);
  945. if (!pgpath->is_active)
  946. goto out;
  947. DMWARN("Failing path %s.", pgpath->path.dev->name);
  948. pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
  949. pgpath->is_active = false;
  950. pgpath->fail_count++;
  951. atomic_dec(&m->nr_valid_paths);
  952. if (pgpath == m->current_pgpath)
  953. m->current_pgpath = NULL;
  954. dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
  955. pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
  956. schedule_work(&m->trigger_event);
  957. out:
  958. spin_unlock_irqrestore(&m->lock, flags);
  959. return 0;
  960. }
  961. /*
  962. * Reinstate a previously-failed path
  963. */
  964. static int reinstate_path(struct pgpath *pgpath)
  965. {
  966. int r = 0, run_queue = 0;
  967. unsigned long flags;
  968. struct multipath *m = pgpath->pg->m;
  969. unsigned nr_valid_paths;
  970. spin_lock_irqsave(&m->lock, flags);
  971. if (pgpath->is_active)
  972. goto out;
  973. DMWARN("Reinstating path %s.", pgpath->path.dev->name);
  974. r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
  975. if (r)
  976. goto out;
  977. pgpath->is_active = true;
  978. nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
  979. if (nr_valid_paths == 1) {
  980. m->current_pgpath = NULL;
  981. run_queue = 1;
  982. } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
  983. if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
  984. atomic_inc(&m->pg_init_in_progress);
  985. }
  986. dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
  987. pgpath->path.dev->name, nr_valid_paths);
  988. schedule_work(&m->trigger_event);
  989. out:
  990. spin_unlock_irqrestore(&m->lock, flags);
  991. if (run_queue) {
  992. dm_table_run_md_queue_async(m->ti->table);
  993. process_queued_io_list(m);
  994. }
  995. return r;
  996. }
  997. /*
  998. * Fail or reinstate all paths that match the provided struct dm_dev.
  999. */
  1000. static int action_dev(struct multipath *m, struct dm_dev *dev,
  1001. action_fn action)
  1002. {
  1003. int r = -EINVAL;
  1004. struct pgpath *pgpath;
  1005. struct priority_group *pg;
  1006. list_for_each_entry(pg, &m->priority_groups, list) {
  1007. list_for_each_entry(pgpath, &pg->pgpaths, list) {
  1008. if (pgpath->path.dev == dev)
  1009. r = action(pgpath);
  1010. }
  1011. }
  1012. return r;
  1013. }
  1014. /*
  1015. * Temporarily try to avoid having to use the specified PG
  1016. */
  1017. static void bypass_pg(struct multipath *m, struct priority_group *pg,
  1018. bool bypassed)
  1019. {
  1020. unsigned long flags;
  1021. spin_lock_irqsave(&m->lock, flags);
  1022. pg->bypassed = bypassed;
  1023. m->current_pgpath = NULL;
  1024. m->current_pg = NULL;
  1025. spin_unlock_irqrestore(&m->lock, flags);
  1026. schedule_work(&m->trigger_event);
  1027. }
  1028. /*
  1029. * Switch to using the specified PG from the next I/O that gets mapped
  1030. */
  1031. static int switch_pg_num(struct multipath *m, const char *pgstr)
  1032. {
  1033. struct priority_group *pg;
  1034. unsigned pgnum;
  1035. unsigned long flags;
  1036. char dummy;
  1037. if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
  1038. !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
  1039. DMWARN("invalid PG number supplied to switch_pg_num");
  1040. return -EINVAL;
  1041. }
  1042. spin_lock_irqsave(&m->lock, flags);
  1043. list_for_each_entry(pg, &m->priority_groups, list) {
  1044. pg->bypassed = false;
  1045. if (--pgnum)
  1046. continue;
  1047. m->current_pgpath = NULL;
  1048. m->current_pg = NULL;
  1049. m->next_pg = pg;
  1050. }
  1051. spin_unlock_irqrestore(&m->lock, flags);
  1052. schedule_work(&m->trigger_event);
  1053. return 0;
  1054. }
  1055. /*
  1056. * Set/clear bypassed status of a PG.
  1057. * PGs are numbered upwards from 1 in the order they were declared.
  1058. */
  1059. static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
  1060. {
  1061. struct priority_group *pg;
  1062. unsigned pgnum;
  1063. char dummy;
  1064. if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
  1065. !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
  1066. DMWARN("invalid PG number supplied to bypass_pg");
  1067. return -EINVAL;
  1068. }
  1069. list_for_each_entry(pg, &m->priority_groups, list) {
  1070. if (!--pgnum)
  1071. break;
  1072. }
  1073. bypass_pg(m, pg, bypassed);
  1074. return 0;
  1075. }
  1076. /*
  1077. * Should we retry pg_init immediately?
  1078. */
  1079. static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
  1080. {
  1081. unsigned long flags;
  1082. bool limit_reached = false;
  1083. spin_lock_irqsave(&m->lock, flags);
  1084. if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
  1085. !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
  1086. set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
  1087. else
  1088. limit_reached = true;
  1089. spin_unlock_irqrestore(&m->lock, flags);
  1090. return limit_reached;
  1091. }
  1092. static void pg_init_done(void *data, int errors)
  1093. {
  1094. struct pgpath *pgpath = data;
  1095. struct priority_group *pg = pgpath->pg;
  1096. struct multipath *m = pg->m;
  1097. unsigned long flags;
  1098. bool delay_retry = false;
  1099. /* device or driver problems */
  1100. switch (errors) {
  1101. case SCSI_DH_OK:
  1102. break;
  1103. case SCSI_DH_NOSYS:
  1104. if (!m->hw_handler_name) {
  1105. errors = 0;
  1106. break;
  1107. }
  1108. DMERR("Could not failover the device: Handler scsi_dh_%s "
  1109. "Error %d.", m->hw_handler_name, errors);
  1110. /*
  1111. * Fail path for now, so we do not ping pong
  1112. */
  1113. fail_path(pgpath);
  1114. break;
  1115. case SCSI_DH_DEV_TEMP_BUSY:
  1116. /*
  1117. * Probably doing something like FW upgrade on the
  1118. * controller so try the other pg.
  1119. */
  1120. bypass_pg(m, pg, true);
  1121. break;
  1122. case SCSI_DH_RETRY:
  1123. /* Wait before retrying. */
  1124. delay_retry = 1;
  1125. case SCSI_DH_IMM_RETRY:
  1126. case SCSI_DH_RES_TEMP_UNAVAIL:
  1127. if (pg_init_limit_reached(m, pgpath))
  1128. fail_path(pgpath);
  1129. errors = 0;
  1130. break;
  1131. case SCSI_DH_DEV_OFFLINED:
  1132. default:
  1133. /*
  1134. * We probably do not want to fail the path for a device
  1135. * error, but this is what the old dm did. In future
  1136. * patches we can do more advanced handling.
  1137. */
  1138. fail_path(pgpath);
  1139. }
  1140. spin_lock_irqsave(&m->lock, flags);
  1141. if (errors) {
  1142. if (pgpath == m->current_pgpath) {
  1143. DMERR("Could not failover device. Error %d.", errors);
  1144. m->current_pgpath = NULL;
  1145. m->current_pg = NULL;
  1146. }
  1147. } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
  1148. pg->bypassed = false;
  1149. if (atomic_dec_return(&m->pg_init_in_progress) > 0)
  1150. /* Activations of other paths are still on going */
  1151. goto out;
  1152. if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
  1153. if (delay_retry)
  1154. set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
  1155. else
  1156. clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
  1157. if (__pg_init_all_paths(m))
  1158. goto out;
  1159. }
  1160. clear_bit(MPATHF_QUEUE_IO, &m->flags);
  1161. process_queued_io_list(m);
  1162. /*
  1163. * Wake up any thread waiting to suspend.
  1164. */
  1165. wake_up(&m->pg_init_wait);
  1166. out:
  1167. spin_unlock_irqrestore(&m->lock, flags);
  1168. }
  1169. static void activate_or_offline_path(struct pgpath *pgpath)
  1170. {
  1171. struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
  1172. if (pgpath->is_active && !blk_queue_dying(q))
  1173. scsi_dh_activate(q, pg_init_done, pgpath);
  1174. else
  1175. pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
  1176. }
  1177. static void activate_path_work(struct work_struct *work)
  1178. {
  1179. struct pgpath *pgpath =
  1180. container_of(work, struct pgpath, activate_path.work);
  1181. activate_or_offline_path(pgpath);
  1182. }
  1183. static int noretry_error(int error)
  1184. {
  1185. switch (error) {
  1186. case -EBADE:
  1187. /*
  1188. * EBADE signals an reservation conflict.
  1189. * We shouldn't fail the path here as we can communicate with
  1190. * the target. We should failover to the next path, but in
  1191. * doing so we might be causing a ping-pong between paths.
  1192. * So just return the reservation conflict error.
  1193. */
  1194. case -EOPNOTSUPP:
  1195. case -EREMOTEIO:
  1196. case -EILSEQ:
  1197. case -ENODATA:
  1198. case -ENOSPC:
  1199. return 1;
  1200. }
  1201. /* Anything else could be a path failure, so should be retried */
  1202. return 0;
  1203. }
  1204. static int multipath_end_io(struct dm_target *ti, struct request *clone,
  1205. int error, union map_info *map_context)
  1206. {
  1207. struct dm_mpath_io *mpio = get_mpio(map_context);
  1208. struct pgpath *pgpath = mpio->pgpath;
  1209. int r = DM_ENDIO_DONE;
  1210. /*
  1211. * We don't queue any clone request inside the multipath target
  1212. * during end I/O handling, since those clone requests don't have
  1213. * bio clones. If we queue them inside the multipath target,
  1214. * we need to make bio clones, that requires memory allocation.
  1215. * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
  1216. * don't have bio clones.)
  1217. * Instead of queueing the clone request here, we queue the original
  1218. * request into dm core, which will remake a clone request and
  1219. * clone bios for it and resubmit it later.
  1220. */
  1221. if (error && !noretry_error(error)) {
  1222. struct multipath *m = ti->private;
  1223. r = DM_ENDIO_REQUEUE;
  1224. if (pgpath)
  1225. fail_path(pgpath);
  1226. if (atomic_read(&m->nr_valid_paths) == 0 &&
  1227. !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
  1228. if (error == -EIO)
  1229. dm_report_EIO(m);
  1230. /* complete with the original error */
  1231. r = DM_ENDIO_DONE;
  1232. }
  1233. }
  1234. if (pgpath) {
  1235. struct path_selector *ps = &pgpath->pg->ps;
  1236. if (ps->type->end_io)
  1237. ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
  1238. }
  1239. return r;
  1240. }
  1241. static int do_end_io_bio(struct multipath *m, struct bio *clone,
  1242. int error, struct dm_mpath_io *mpio)
  1243. {
  1244. unsigned long flags;
  1245. if (!error)
  1246. return 0; /* I/O complete */
  1247. if (noretry_error(error))
  1248. return error;
  1249. if (mpio->pgpath)
  1250. fail_path(mpio->pgpath);
  1251. if (atomic_read(&m->nr_valid_paths) == 0 &&
  1252. !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
  1253. dm_report_EIO(m);
  1254. return -EIO;
  1255. }
  1256. /* Queue for the daemon to resubmit */
  1257. dm_bio_restore(get_bio_details_from_bio(clone), clone);
  1258. spin_lock_irqsave(&m->lock, flags);
  1259. bio_list_add(&m->queued_bios, clone);
  1260. spin_unlock_irqrestore(&m->lock, flags);
  1261. if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
  1262. queue_work(kmultipathd, &m->process_queued_bios);
  1263. return DM_ENDIO_INCOMPLETE;
  1264. }
  1265. static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
  1266. {
  1267. struct multipath *m = ti->private;
  1268. struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
  1269. struct pgpath *pgpath;
  1270. struct path_selector *ps;
  1271. int r;
  1272. BUG_ON(!mpio);
  1273. r = do_end_io_bio(m, clone, error, mpio);
  1274. pgpath = mpio->pgpath;
  1275. if (pgpath) {
  1276. ps = &pgpath->pg->ps;
  1277. if (ps->type->end_io)
  1278. ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
  1279. }
  1280. return r;
  1281. }
  1282. /*
  1283. * Suspend can't complete until all the I/O is processed so if
  1284. * the last path fails we must error any remaining I/O.
  1285. * Note that if the freeze_bdev fails while suspending, the
  1286. * queue_if_no_path state is lost - userspace should reset it.
  1287. */
  1288. static void multipath_presuspend(struct dm_target *ti)
  1289. {
  1290. struct multipath *m = ti->private;
  1291. queue_if_no_path(m, false, true);
  1292. }
  1293. static void multipath_postsuspend(struct dm_target *ti)
  1294. {
  1295. struct multipath *m = ti->private;
  1296. mutex_lock(&m->work_mutex);
  1297. flush_multipath_work(m);
  1298. mutex_unlock(&m->work_mutex);
  1299. }
  1300. /*
  1301. * Restore the queue_if_no_path setting.
  1302. */
  1303. static void multipath_resume(struct dm_target *ti)
  1304. {
  1305. struct multipath *m = ti->private;
  1306. unsigned long flags;
  1307. spin_lock_irqsave(&m->lock, flags);
  1308. assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
  1309. MPATHF_QUEUE_IF_NO_PATH, &m->flags);
  1310. spin_unlock_irqrestore(&m->lock, flags);
  1311. }
  1312. /*
  1313. * Info output has the following format:
  1314. * num_multipath_feature_args [multipath_feature_args]*
  1315. * num_handler_status_args [handler_status_args]*
  1316. * num_groups init_group_number
  1317. * [A|D|E num_ps_status_args [ps_status_args]*
  1318. * num_paths num_selector_args
  1319. * [path_dev A|F fail_count [selector_args]* ]+ ]+
  1320. *
  1321. * Table output has the following format (identical to the constructor string):
  1322. * num_feature_args [features_args]*
  1323. * num_handler_args hw_handler [hw_handler_args]*
  1324. * num_groups init_group_number
  1325. * [priority selector-name num_ps_args [ps_args]*
  1326. * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
  1327. */
  1328. static void multipath_status(struct dm_target *ti, status_type_t type,
  1329. unsigned status_flags, char *result, unsigned maxlen)
  1330. {
  1331. int sz = 0;
  1332. unsigned long flags;
  1333. struct multipath *m = ti->private;
  1334. struct priority_group *pg;
  1335. struct pgpath *p;
  1336. unsigned pg_num;
  1337. char state;
  1338. spin_lock_irqsave(&m->lock, flags);
  1339. /* Features */
  1340. if (type == STATUSTYPE_INFO)
  1341. DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
  1342. atomic_read(&m->pg_init_count));
  1343. else {
  1344. DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
  1345. (m->pg_init_retries > 0) * 2 +
  1346. (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
  1347. test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
  1348. (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
  1349. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  1350. DMEMIT("queue_if_no_path ");
  1351. if (m->pg_init_retries)
  1352. DMEMIT("pg_init_retries %u ", m->pg_init_retries);
  1353. if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
  1354. DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
  1355. if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
  1356. DMEMIT("retain_attached_hw_handler ");
  1357. if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
  1358. switch(m->queue_mode) {
  1359. case DM_TYPE_BIO_BASED:
  1360. DMEMIT("queue_mode bio ");
  1361. break;
  1362. case DM_TYPE_MQ_REQUEST_BASED:
  1363. DMEMIT("queue_mode mq ");
  1364. break;
  1365. default:
  1366. WARN_ON_ONCE(true);
  1367. break;
  1368. }
  1369. }
  1370. }
  1371. if (!m->hw_handler_name || type == STATUSTYPE_INFO)
  1372. DMEMIT("0 ");
  1373. else
  1374. DMEMIT("1 %s ", m->hw_handler_name);
  1375. DMEMIT("%u ", m->nr_priority_groups);
  1376. if (m->next_pg)
  1377. pg_num = m->next_pg->pg_num;
  1378. else if (m->current_pg)
  1379. pg_num = m->current_pg->pg_num;
  1380. else
  1381. pg_num = (m->nr_priority_groups ? 1 : 0);
  1382. DMEMIT("%u ", pg_num);
  1383. switch (type) {
  1384. case STATUSTYPE_INFO:
  1385. list_for_each_entry(pg, &m->priority_groups, list) {
  1386. if (pg->bypassed)
  1387. state = 'D'; /* Disabled */
  1388. else if (pg == m->current_pg)
  1389. state = 'A'; /* Currently Active */
  1390. else
  1391. state = 'E'; /* Enabled */
  1392. DMEMIT("%c ", state);
  1393. if (pg->ps.type->status)
  1394. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1395. result + sz,
  1396. maxlen - sz);
  1397. else
  1398. DMEMIT("0 ");
  1399. DMEMIT("%u %u ", pg->nr_pgpaths,
  1400. pg->ps.type->info_args);
  1401. list_for_each_entry(p, &pg->pgpaths, list) {
  1402. DMEMIT("%s %s %u ", p->path.dev->name,
  1403. p->is_active ? "A" : "F",
  1404. p->fail_count);
  1405. if (pg->ps.type->status)
  1406. sz += pg->ps.type->status(&pg->ps,
  1407. &p->path, type, result + sz,
  1408. maxlen - sz);
  1409. }
  1410. }
  1411. break;
  1412. case STATUSTYPE_TABLE:
  1413. list_for_each_entry(pg, &m->priority_groups, list) {
  1414. DMEMIT("%s ", pg->ps.type->name);
  1415. if (pg->ps.type->status)
  1416. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1417. result + sz,
  1418. maxlen - sz);
  1419. else
  1420. DMEMIT("0 ");
  1421. DMEMIT("%u %u ", pg->nr_pgpaths,
  1422. pg->ps.type->table_args);
  1423. list_for_each_entry(p, &pg->pgpaths, list) {
  1424. DMEMIT("%s ", p->path.dev->name);
  1425. if (pg->ps.type->status)
  1426. sz += pg->ps.type->status(&pg->ps,
  1427. &p->path, type, result + sz,
  1428. maxlen - sz);
  1429. }
  1430. }
  1431. break;
  1432. }
  1433. spin_unlock_irqrestore(&m->lock, flags);
  1434. }
  1435. static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
  1436. {
  1437. int r = -EINVAL;
  1438. struct dm_dev *dev;
  1439. struct multipath *m = ti->private;
  1440. action_fn action;
  1441. mutex_lock(&m->work_mutex);
  1442. if (dm_suspended(ti)) {
  1443. r = -EBUSY;
  1444. goto out;
  1445. }
  1446. if (argc == 1) {
  1447. if (!strcasecmp(argv[0], "queue_if_no_path")) {
  1448. r = queue_if_no_path(m, true, false);
  1449. goto out;
  1450. } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
  1451. r = queue_if_no_path(m, false, false);
  1452. goto out;
  1453. }
  1454. }
  1455. if (argc != 2) {
  1456. DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
  1457. goto out;
  1458. }
  1459. if (!strcasecmp(argv[0], "disable_group")) {
  1460. r = bypass_pg_num(m, argv[1], true);
  1461. goto out;
  1462. } else if (!strcasecmp(argv[0], "enable_group")) {
  1463. r = bypass_pg_num(m, argv[1], false);
  1464. goto out;
  1465. } else if (!strcasecmp(argv[0], "switch_group")) {
  1466. r = switch_pg_num(m, argv[1]);
  1467. goto out;
  1468. } else if (!strcasecmp(argv[0], "reinstate_path"))
  1469. action = reinstate_path;
  1470. else if (!strcasecmp(argv[0], "fail_path"))
  1471. action = fail_path;
  1472. else {
  1473. DMWARN("Unrecognised multipath message received: %s", argv[0]);
  1474. goto out;
  1475. }
  1476. r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
  1477. if (r) {
  1478. DMWARN("message: error getting device %s",
  1479. argv[1]);
  1480. goto out;
  1481. }
  1482. r = action_dev(m, dev, action);
  1483. dm_put_device(ti, dev);
  1484. out:
  1485. mutex_unlock(&m->work_mutex);
  1486. return r;
  1487. }
  1488. static int multipath_prepare_ioctl(struct dm_target *ti,
  1489. struct block_device **bdev, fmode_t *mode)
  1490. {
  1491. struct multipath *m = ti->private;
  1492. struct pgpath *current_pgpath;
  1493. int r;
  1494. current_pgpath = lockless_dereference(m->current_pgpath);
  1495. if (!current_pgpath)
  1496. current_pgpath = choose_pgpath(m, 0);
  1497. if (current_pgpath) {
  1498. if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
  1499. *bdev = current_pgpath->path.dev->bdev;
  1500. *mode = current_pgpath->path.dev->mode;
  1501. r = 0;
  1502. } else {
  1503. /* pg_init has not started or completed */
  1504. r = -ENOTCONN;
  1505. }
  1506. } else {
  1507. /* No path is available */
  1508. if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  1509. r = -ENOTCONN;
  1510. else
  1511. r = -EIO;
  1512. }
  1513. if (r == -ENOTCONN) {
  1514. if (!lockless_dereference(m->current_pg)) {
  1515. /* Path status changed, redo selection */
  1516. (void) choose_pgpath(m, 0);
  1517. }
  1518. if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
  1519. pg_init_all_paths(m);
  1520. dm_table_run_md_queue_async(m->ti->table);
  1521. process_queued_io_list(m);
  1522. }
  1523. /*
  1524. * Only pass ioctls through if the device sizes match exactly.
  1525. */
  1526. if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
  1527. return 1;
  1528. return r;
  1529. }
  1530. static int multipath_iterate_devices(struct dm_target *ti,
  1531. iterate_devices_callout_fn fn, void *data)
  1532. {
  1533. struct multipath *m = ti->private;
  1534. struct priority_group *pg;
  1535. struct pgpath *p;
  1536. int ret = 0;
  1537. list_for_each_entry(pg, &m->priority_groups, list) {
  1538. list_for_each_entry(p, &pg->pgpaths, list) {
  1539. ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
  1540. if (ret)
  1541. goto out;
  1542. }
  1543. }
  1544. out:
  1545. return ret;
  1546. }
  1547. static int pgpath_busy(struct pgpath *pgpath)
  1548. {
  1549. struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
  1550. return blk_lld_busy(q);
  1551. }
  1552. /*
  1553. * We return "busy", only when we can map I/Os but underlying devices
  1554. * are busy (so even if we map I/Os now, the I/Os will wait on
  1555. * the underlying queue).
  1556. * In other words, if we want to kill I/Os or queue them inside us
  1557. * due to map unavailability, we don't return "busy". Otherwise,
  1558. * dm core won't give us the I/Os and we can't do what we want.
  1559. */
  1560. static int multipath_busy(struct dm_target *ti)
  1561. {
  1562. bool busy = false, has_active = false;
  1563. struct multipath *m = ti->private;
  1564. struct priority_group *pg, *next_pg;
  1565. struct pgpath *pgpath;
  1566. /* pg_init in progress */
  1567. if (atomic_read(&m->pg_init_in_progress))
  1568. return true;
  1569. /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
  1570. if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
  1571. return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
  1572. /* Guess which priority_group will be used at next mapping time */
  1573. pg = lockless_dereference(m->current_pg);
  1574. next_pg = lockless_dereference(m->next_pg);
  1575. if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
  1576. pg = next_pg;
  1577. if (!pg) {
  1578. /*
  1579. * We don't know which pg will be used at next mapping time.
  1580. * We don't call choose_pgpath() here to avoid to trigger
  1581. * pg_init just by busy checking.
  1582. * So we don't know whether underlying devices we will be using
  1583. * at next mapping time are busy or not. Just try mapping.
  1584. */
  1585. return busy;
  1586. }
  1587. /*
  1588. * If there is one non-busy active path at least, the path selector
  1589. * will be able to select it. So we consider such a pg as not busy.
  1590. */
  1591. busy = true;
  1592. list_for_each_entry(pgpath, &pg->pgpaths, list) {
  1593. if (pgpath->is_active) {
  1594. has_active = true;
  1595. if (!pgpath_busy(pgpath)) {
  1596. busy = false;
  1597. break;
  1598. }
  1599. }
  1600. }
  1601. if (!has_active) {
  1602. /*
  1603. * No active path in this pg, so this pg won't be used and
  1604. * the current_pg will be changed at next mapping time.
  1605. * We need to try mapping to determine it.
  1606. */
  1607. busy = false;
  1608. }
  1609. return busy;
  1610. }
  1611. /*-----------------------------------------------------------------
  1612. * Module setup
  1613. *---------------------------------------------------------------*/
  1614. static struct target_type multipath_target = {
  1615. .name = "multipath",
  1616. .version = {1, 12, 0},
  1617. .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
  1618. .module = THIS_MODULE,
  1619. .ctr = multipath_ctr,
  1620. .dtr = multipath_dtr,
  1621. .clone_and_map_rq = multipath_clone_and_map,
  1622. .release_clone_rq = multipath_release_clone,
  1623. .rq_end_io = multipath_end_io,
  1624. .map = multipath_map_bio,
  1625. .end_io = multipath_end_io_bio,
  1626. .presuspend = multipath_presuspend,
  1627. .postsuspend = multipath_postsuspend,
  1628. .resume = multipath_resume,
  1629. .status = multipath_status,
  1630. .message = multipath_message,
  1631. .prepare_ioctl = multipath_prepare_ioctl,
  1632. .iterate_devices = multipath_iterate_devices,
  1633. .busy = multipath_busy,
  1634. };
  1635. static int __init dm_multipath_init(void)
  1636. {
  1637. int r;
  1638. r = dm_register_target(&multipath_target);
  1639. if (r < 0) {
  1640. DMERR("request-based register failed %d", r);
  1641. r = -EINVAL;
  1642. goto bad_register_target;
  1643. }
  1644. kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
  1645. if (!kmultipathd) {
  1646. DMERR("failed to create workqueue kmpathd");
  1647. r = -ENOMEM;
  1648. goto bad_alloc_kmultipathd;
  1649. }
  1650. /*
  1651. * A separate workqueue is used to handle the device handlers
  1652. * to avoid overloading existing workqueue. Overloading the
  1653. * old workqueue would also create a bottleneck in the
  1654. * path of the storage hardware device activation.
  1655. */
  1656. kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
  1657. WQ_MEM_RECLAIM);
  1658. if (!kmpath_handlerd) {
  1659. DMERR("failed to create workqueue kmpath_handlerd");
  1660. r = -ENOMEM;
  1661. goto bad_alloc_kmpath_handlerd;
  1662. }
  1663. return 0;
  1664. bad_alloc_kmpath_handlerd:
  1665. destroy_workqueue(kmultipathd);
  1666. bad_alloc_kmultipathd:
  1667. dm_unregister_target(&multipath_target);
  1668. bad_register_target:
  1669. return r;
  1670. }
  1671. static void __exit dm_multipath_exit(void)
  1672. {
  1673. destroy_workqueue(kmpath_handlerd);
  1674. destroy_workqueue(kmultipathd);
  1675. dm_unregister_target(&multipath_target);
  1676. }
  1677. module_init(dm_multipath_init);
  1678. module_exit(dm_multipath_exit);
  1679. MODULE_DESCRIPTION(DM_NAME " multipath target");
  1680. MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
  1681. MODULE_LICENSE("GPL");