dm-kcopyd.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. /*
  2. * Copyright (C) 2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2006 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. *
  7. * Kcopyd provides a simple interface for copying an area of one
  8. * block-device to one or more other block-devices, with an asynchronous
  9. * completion notification.
  10. */
  11. #include <linux/types.h>
  12. #include <linux/atomic.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/fs.h>
  15. #include <linux/init.h>
  16. #include <linux/list.h>
  17. #include <linux/mempool.h>
  18. #include <linux/module.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/mutex.h>
  24. #include <linux/delay.h>
  25. #include <linux/device-mapper.h>
  26. #include <linux/dm-kcopyd.h>
  27. #include "dm-core.h"
  28. #define SUB_JOB_SIZE 128
  29. #define SPLIT_COUNT 8
  30. #define MIN_JOBS 8
  31. #define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
  32. /*-----------------------------------------------------------------
  33. * Each kcopyd client has its own little pool of preallocated
  34. * pages for kcopyd io.
  35. *---------------------------------------------------------------*/
  36. struct dm_kcopyd_client {
  37. struct page_list *pages;
  38. unsigned nr_reserved_pages;
  39. unsigned nr_free_pages;
  40. struct dm_io_client *io_client;
  41. wait_queue_head_t destroyq;
  42. atomic_t nr_jobs;
  43. mempool_t *job_pool;
  44. struct workqueue_struct *kcopyd_wq;
  45. struct work_struct kcopyd_work;
  46. struct dm_kcopyd_throttle *throttle;
  47. /*
  48. * We maintain three lists of jobs:
  49. *
  50. * i) jobs waiting for pages
  51. * ii) jobs that have pages, and are waiting for the io to be issued.
  52. * iii) jobs that have completed.
  53. *
  54. * All three of these are protected by job_lock.
  55. */
  56. spinlock_t job_lock;
  57. struct list_head complete_jobs;
  58. struct list_head io_jobs;
  59. struct list_head pages_jobs;
  60. };
  61. static struct page_list zero_page_list;
  62. static DEFINE_SPINLOCK(throttle_spinlock);
  63. /*
  64. * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
  65. * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
  66. * by 2.
  67. */
  68. #define ACCOUNT_INTERVAL_SHIFT SHIFT_HZ
  69. /*
  70. * Sleep this number of milliseconds.
  71. *
  72. * The value was decided experimentally.
  73. * Smaller values seem to cause an increased copy rate above the limit.
  74. * The reason for this is unknown but possibly due to jiffies rounding errors
  75. * or read/write cache inside the disk.
  76. */
  77. #define SLEEP_MSEC 100
  78. /*
  79. * Maximum number of sleep events. There is a theoretical livelock if more
  80. * kcopyd clients do work simultaneously which this limit avoids.
  81. */
  82. #define MAX_SLEEPS 10
  83. static void io_job_start(struct dm_kcopyd_throttle *t)
  84. {
  85. unsigned throttle, now, difference;
  86. int slept = 0, skew;
  87. if (unlikely(!t))
  88. return;
  89. try_again:
  90. spin_lock_irq(&throttle_spinlock);
  91. throttle = ACCESS_ONCE(t->throttle);
  92. if (likely(throttle >= 100))
  93. goto skip_limit;
  94. now = jiffies;
  95. difference = now - t->last_jiffies;
  96. t->last_jiffies = now;
  97. if (t->num_io_jobs)
  98. t->io_period += difference;
  99. t->total_period += difference;
  100. /*
  101. * Maintain sane values if we got a temporary overflow.
  102. */
  103. if (unlikely(t->io_period > t->total_period))
  104. t->io_period = t->total_period;
  105. if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
  106. int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
  107. t->total_period >>= shift;
  108. t->io_period >>= shift;
  109. }
  110. skew = t->io_period - throttle * t->total_period / 100;
  111. if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
  112. slept++;
  113. spin_unlock_irq(&throttle_spinlock);
  114. msleep(SLEEP_MSEC);
  115. goto try_again;
  116. }
  117. skip_limit:
  118. t->num_io_jobs++;
  119. spin_unlock_irq(&throttle_spinlock);
  120. }
  121. static void io_job_finish(struct dm_kcopyd_throttle *t)
  122. {
  123. unsigned long flags;
  124. if (unlikely(!t))
  125. return;
  126. spin_lock_irqsave(&throttle_spinlock, flags);
  127. t->num_io_jobs--;
  128. if (likely(ACCESS_ONCE(t->throttle) >= 100))
  129. goto skip_limit;
  130. if (!t->num_io_jobs) {
  131. unsigned now, difference;
  132. now = jiffies;
  133. difference = now - t->last_jiffies;
  134. t->last_jiffies = now;
  135. t->io_period += difference;
  136. t->total_period += difference;
  137. /*
  138. * Maintain sane values if we got a temporary overflow.
  139. */
  140. if (unlikely(t->io_period > t->total_period))
  141. t->io_period = t->total_period;
  142. }
  143. skip_limit:
  144. spin_unlock_irqrestore(&throttle_spinlock, flags);
  145. }
  146. static void wake(struct dm_kcopyd_client *kc)
  147. {
  148. queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
  149. }
  150. /*
  151. * Obtain one page for the use of kcopyd.
  152. */
  153. static struct page_list *alloc_pl(gfp_t gfp)
  154. {
  155. struct page_list *pl;
  156. pl = kmalloc(sizeof(*pl), gfp);
  157. if (!pl)
  158. return NULL;
  159. pl->page = alloc_page(gfp);
  160. if (!pl->page) {
  161. kfree(pl);
  162. return NULL;
  163. }
  164. return pl;
  165. }
  166. static void free_pl(struct page_list *pl)
  167. {
  168. __free_page(pl->page);
  169. kfree(pl);
  170. }
  171. /*
  172. * Add the provided pages to a client's free page list, releasing
  173. * back to the system any beyond the reserved_pages limit.
  174. */
  175. static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
  176. {
  177. struct page_list *next;
  178. do {
  179. next = pl->next;
  180. if (kc->nr_free_pages >= kc->nr_reserved_pages)
  181. free_pl(pl);
  182. else {
  183. pl->next = kc->pages;
  184. kc->pages = pl;
  185. kc->nr_free_pages++;
  186. }
  187. pl = next;
  188. } while (pl);
  189. }
  190. static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
  191. unsigned int nr, struct page_list **pages)
  192. {
  193. struct page_list *pl;
  194. *pages = NULL;
  195. do {
  196. pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
  197. if (unlikely(!pl)) {
  198. /* Use reserved pages */
  199. pl = kc->pages;
  200. if (unlikely(!pl))
  201. goto out_of_memory;
  202. kc->pages = pl->next;
  203. kc->nr_free_pages--;
  204. }
  205. pl->next = *pages;
  206. *pages = pl;
  207. } while (--nr);
  208. return 0;
  209. out_of_memory:
  210. if (*pages)
  211. kcopyd_put_pages(kc, *pages);
  212. return -ENOMEM;
  213. }
  214. /*
  215. * These three functions resize the page pool.
  216. */
  217. static void drop_pages(struct page_list *pl)
  218. {
  219. struct page_list *next;
  220. while (pl) {
  221. next = pl->next;
  222. free_pl(pl);
  223. pl = next;
  224. }
  225. }
  226. /*
  227. * Allocate and reserve nr_pages for the use of a specific client.
  228. */
  229. static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
  230. {
  231. unsigned i;
  232. struct page_list *pl = NULL, *next;
  233. for (i = 0; i < nr_pages; i++) {
  234. next = alloc_pl(GFP_KERNEL);
  235. if (!next) {
  236. if (pl)
  237. drop_pages(pl);
  238. return -ENOMEM;
  239. }
  240. next->next = pl;
  241. pl = next;
  242. }
  243. kc->nr_reserved_pages += nr_pages;
  244. kcopyd_put_pages(kc, pl);
  245. return 0;
  246. }
  247. static void client_free_pages(struct dm_kcopyd_client *kc)
  248. {
  249. BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
  250. drop_pages(kc->pages);
  251. kc->pages = NULL;
  252. kc->nr_free_pages = kc->nr_reserved_pages = 0;
  253. }
  254. /*-----------------------------------------------------------------
  255. * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
  256. * for this reason we use a mempool to prevent the client from
  257. * ever having to do io (which could cause a deadlock).
  258. *---------------------------------------------------------------*/
  259. struct kcopyd_job {
  260. struct dm_kcopyd_client *kc;
  261. struct list_head list;
  262. unsigned long flags;
  263. /*
  264. * Error state of the job.
  265. */
  266. int read_err;
  267. unsigned long write_err;
  268. /*
  269. * Either READ or WRITE
  270. */
  271. int rw;
  272. struct dm_io_region source;
  273. /*
  274. * The destinations for the transfer.
  275. */
  276. unsigned int num_dests;
  277. struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
  278. struct page_list *pages;
  279. /*
  280. * Set this to ensure you are notified when the job has
  281. * completed. 'context' is for callback to use.
  282. */
  283. dm_kcopyd_notify_fn fn;
  284. void *context;
  285. /*
  286. * These fields are only used if the job has been split
  287. * into more manageable parts.
  288. */
  289. struct mutex lock;
  290. atomic_t sub_jobs;
  291. sector_t progress;
  292. sector_t write_offset;
  293. struct kcopyd_job *master_job;
  294. };
  295. static struct kmem_cache *_job_cache;
  296. int __init dm_kcopyd_init(void)
  297. {
  298. _job_cache = kmem_cache_create("kcopyd_job",
  299. sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
  300. __alignof__(struct kcopyd_job), 0, NULL);
  301. if (!_job_cache)
  302. return -ENOMEM;
  303. zero_page_list.next = &zero_page_list;
  304. zero_page_list.page = ZERO_PAGE(0);
  305. return 0;
  306. }
  307. void dm_kcopyd_exit(void)
  308. {
  309. kmem_cache_destroy(_job_cache);
  310. _job_cache = NULL;
  311. }
  312. /*
  313. * Functions to push and pop a job onto the head of a given job
  314. * list.
  315. */
  316. static struct kcopyd_job *pop_io_job(struct list_head *jobs,
  317. struct dm_kcopyd_client *kc)
  318. {
  319. struct kcopyd_job *job;
  320. /*
  321. * For I/O jobs, pop any read, any write without sequential write
  322. * constraint and sequential writes that are at the right position.
  323. */
  324. list_for_each_entry(job, jobs, list) {
  325. if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
  326. list_del(&job->list);
  327. return job;
  328. }
  329. if (job->write_offset == job->master_job->write_offset) {
  330. job->master_job->write_offset += job->source.count;
  331. list_del(&job->list);
  332. return job;
  333. }
  334. }
  335. return NULL;
  336. }
  337. static struct kcopyd_job *pop(struct list_head *jobs,
  338. struct dm_kcopyd_client *kc)
  339. {
  340. struct kcopyd_job *job = NULL;
  341. unsigned long flags;
  342. spin_lock_irqsave(&kc->job_lock, flags);
  343. if (!list_empty(jobs)) {
  344. if (jobs == &kc->io_jobs)
  345. job = pop_io_job(jobs, kc);
  346. else {
  347. job = list_entry(jobs->next, struct kcopyd_job, list);
  348. list_del(&job->list);
  349. }
  350. }
  351. spin_unlock_irqrestore(&kc->job_lock, flags);
  352. return job;
  353. }
  354. static void push(struct list_head *jobs, struct kcopyd_job *job)
  355. {
  356. unsigned long flags;
  357. struct dm_kcopyd_client *kc = job->kc;
  358. spin_lock_irqsave(&kc->job_lock, flags);
  359. list_add_tail(&job->list, jobs);
  360. spin_unlock_irqrestore(&kc->job_lock, flags);
  361. }
  362. static void push_head(struct list_head *jobs, struct kcopyd_job *job)
  363. {
  364. unsigned long flags;
  365. struct dm_kcopyd_client *kc = job->kc;
  366. spin_lock_irqsave(&kc->job_lock, flags);
  367. list_add(&job->list, jobs);
  368. spin_unlock_irqrestore(&kc->job_lock, flags);
  369. }
  370. /*
  371. * These three functions process 1 item from the corresponding
  372. * job list.
  373. *
  374. * They return:
  375. * < 0: error
  376. * 0: success
  377. * > 0: can't process yet.
  378. */
  379. static int run_complete_job(struct kcopyd_job *job)
  380. {
  381. void *context = job->context;
  382. int read_err = job->read_err;
  383. unsigned long write_err = job->write_err;
  384. dm_kcopyd_notify_fn fn = job->fn;
  385. struct dm_kcopyd_client *kc = job->kc;
  386. if (job->pages && job->pages != &zero_page_list)
  387. kcopyd_put_pages(kc, job->pages);
  388. /*
  389. * If this is the master job, the sub jobs have already
  390. * completed so we can free everything.
  391. */
  392. if (job->master_job == job)
  393. mempool_free(job, kc->job_pool);
  394. fn(read_err, write_err, context);
  395. if (atomic_dec_and_test(&kc->nr_jobs))
  396. wake_up(&kc->destroyq);
  397. return 0;
  398. }
  399. static void complete_io(unsigned long error, void *context)
  400. {
  401. struct kcopyd_job *job = (struct kcopyd_job *) context;
  402. struct dm_kcopyd_client *kc = job->kc;
  403. io_job_finish(kc->throttle);
  404. if (error) {
  405. if (op_is_write(job->rw))
  406. job->write_err |= error;
  407. else
  408. job->read_err = 1;
  409. if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
  410. push(&kc->complete_jobs, job);
  411. wake(kc);
  412. return;
  413. }
  414. }
  415. if (op_is_write(job->rw))
  416. push(&kc->complete_jobs, job);
  417. else {
  418. job->rw = WRITE;
  419. push(&kc->io_jobs, job);
  420. }
  421. wake(kc);
  422. }
  423. /*
  424. * Request io on as many buffer heads as we can currently get for
  425. * a particular job.
  426. */
  427. static int run_io_job(struct kcopyd_job *job)
  428. {
  429. int r;
  430. struct dm_io_request io_req = {
  431. .bi_op = job->rw,
  432. .bi_op_flags = 0,
  433. .mem.type = DM_IO_PAGE_LIST,
  434. .mem.ptr.pl = job->pages,
  435. .mem.offset = 0,
  436. .notify.fn = complete_io,
  437. .notify.context = job,
  438. .client = job->kc->io_client,
  439. };
  440. /*
  441. * If we need to write sequentially and some reads or writes failed,
  442. * no point in continuing.
  443. */
  444. if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
  445. job->master_job->write_err)
  446. return -EIO;
  447. io_job_start(job->kc->throttle);
  448. if (job->rw == READ)
  449. r = dm_io(&io_req, 1, &job->source, NULL);
  450. else
  451. r = dm_io(&io_req, job->num_dests, job->dests, NULL);
  452. return r;
  453. }
  454. static int run_pages_job(struct kcopyd_job *job)
  455. {
  456. int r;
  457. unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
  458. r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
  459. if (!r) {
  460. /* this job is ready for io */
  461. push(&job->kc->io_jobs, job);
  462. return 0;
  463. }
  464. if (r == -ENOMEM)
  465. /* can't complete now */
  466. return 1;
  467. return r;
  468. }
  469. /*
  470. * Run through a list for as long as possible. Returns the count
  471. * of successful jobs.
  472. */
  473. static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
  474. int (*fn) (struct kcopyd_job *))
  475. {
  476. struct kcopyd_job *job;
  477. int r, count = 0;
  478. while ((job = pop(jobs, kc))) {
  479. r = fn(job);
  480. if (r < 0) {
  481. /* error this rogue job */
  482. if (op_is_write(job->rw))
  483. job->write_err = (unsigned long) -1L;
  484. else
  485. job->read_err = 1;
  486. push(&kc->complete_jobs, job);
  487. break;
  488. }
  489. if (r > 0) {
  490. /*
  491. * We couldn't service this job ATM, so
  492. * push this job back onto the list.
  493. */
  494. push_head(jobs, job);
  495. break;
  496. }
  497. count++;
  498. }
  499. return count;
  500. }
  501. /*
  502. * kcopyd does this every time it's woken up.
  503. */
  504. static void do_work(struct work_struct *work)
  505. {
  506. struct dm_kcopyd_client *kc = container_of(work,
  507. struct dm_kcopyd_client, kcopyd_work);
  508. struct blk_plug plug;
  509. /*
  510. * The order that these are called is *very* important.
  511. * complete jobs can free some pages for pages jobs.
  512. * Pages jobs when successful will jump onto the io jobs
  513. * list. io jobs call wake when they complete and it all
  514. * starts again.
  515. */
  516. blk_start_plug(&plug);
  517. process_jobs(&kc->complete_jobs, kc, run_complete_job);
  518. process_jobs(&kc->pages_jobs, kc, run_pages_job);
  519. process_jobs(&kc->io_jobs, kc, run_io_job);
  520. blk_finish_plug(&plug);
  521. }
  522. /*
  523. * If we are copying a small region we just dispatch a single job
  524. * to do the copy, otherwise the io has to be split up into many
  525. * jobs.
  526. */
  527. static void dispatch_job(struct kcopyd_job *job)
  528. {
  529. struct dm_kcopyd_client *kc = job->kc;
  530. atomic_inc(&kc->nr_jobs);
  531. if (unlikely(!job->source.count))
  532. push(&kc->complete_jobs, job);
  533. else if (job->pages == &zero_page_list)
  534. push(&kc->io_jobs, job);
  535. else
  536. push(&kc->pages_jobs, job);
  537. wake(kc);
  538. }
  539. static void segment_complete(int read_err, unsigned long write_err,
  540. void *context)
  541. {
  542. /* FIXME: tidy this function */
  543. sector_t progress = 0;
  544. sector_t count = 0;
  545. struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
  546. struct kcopyd_job *job = sub_job->master_job;
  547. struct dm_kcopyd_client *kc = job->kc;
  548. mutex_lock(&job->lock);
  549. /* update the error */
  550. if (read_err)
  551. job->read_err = 1;
  552. if (write_err)
  553. job->write_err |= write_err;
  554. /*
  555. * Only dispatch more work if there hasn't been an error.
  556. */
  557. if ((!job->read_err && !job->write_err) ||
  558. test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
  559. /* get the next chunk of work */
  560. progress = job->progress;
  561. count = job->source.count - progress;
  562. if (count) {
  563. if (count > SUB_JOB_SIZE)
  564. count = SUB_JOB_SIZE;
  565. job->progress += count;
  566. }
  567. }
  568. mutex_unlock(&job->lock);
  569. if (count) {
  570. int i;
  571. *sub_job = *job;
  572. sub_job->write_offset = progress;
  573. sub_job->source.sector += progress;
  574. sub_job->source.count = count;
  575. for (i = 0; i < job->num_dests; i++) {
  576. sub_job->dests[i].sector += progress;
  577. sub_job->dests[i].count = count;
  578. }
  579. sub_job->fn = segment_complete;
  580. sub_job->context = sub_job;
  581. dispatch_job(sub_job);
  582. } else if (atomic_dec_and_test(&job->sub_jobs)) {
  583. /*
  584. * Queue the completion callback to the kcopyd thread.
  585. *
  586. * Some callers assume that all the completions are called
  587. * from a single thread and don't race with each other.
  588. *
  589. * We must not call the callback directly here because this
  590. * code may not be executing in the thread.
  591. */
  592. push(&kc->complete_jobs, job);
  593. wake(kc);
  594. }
  595. }
  596. /*
  597. * Create some sub jobs to share the work between them.
  598. */
  599. static void split_job(struct kcopyd_job *master_job)
  600. {
  601. int i;
  602. atomic_inc(&master_job->kc->nr_jobs);
  603. atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
  604. for (i = 0; i < SPLIT_COUNT; i++) {
  605. master_job[i + 1].master_job = master_job;
  606. segment_complete(0, 0u, &master_job[i + 1]);
  607. }
  608. }
  609. int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
  610. unsigned int num_dests, struct dm_io_region *dests,
  611. unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
  612. {
  613. struct kcopyd_job *job;
  614. int i;
  615. /*
  616. * Allocate an array of jobs consisting of one master job
  617. * followed by SPLIT_COUNT sub jobs.
  618. */
  619. job = mempool_alloc(kc->job_pool, GFP_NOIO);
  620. /*
  621. * set up for the read.
  622. */
  623. job->kc = kc;
  624. job->flags = flags;
  625. job->read_err = 0;
  626. job->write_err = 0;
  627. job->num_dests = num_dests;
  628. memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
  629. /*
  630. * If one of the destination is a host-managed zoned block device,
  631. * we need to write sequentially. If one of the destination is a
  632. * host-aware device, then leave it to the caller to choose what to do.
  633. */
  634. if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
  635. for (i = 0; i < job->num_dests; i++) {
  636. if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
  637. set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags);
  638. break;
  639. }
  640. }
  641. }
  642. /*
  643. * If we need to write sequentially, errors cannot be ignored.
  644. */
  645. if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
  646. test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags))
  647. clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags);
  648. if (from) {
  649. job->source = *from;
  650. job->pages = NULL;
  651. job->rw = READ;
  652. } else {
  653. memset(&job->source, 0, sizeof job->source);
  654. job->source.count = job->dests[0].count;
  655. job->pages = &zero_page_list;
  656. /*
  657. * Use WRITE ZEROES to optimize zeroing if all dests support it.
  658. */
  659. job->rw = REQ_OP_WRITE_ZEROES;
  660. for (i = 0; i < job->num_dests; i++)
  661. if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
  662. job->rw = WRITE;
  663. break;
  664. }
  665. }
  666. job->fn = fn;
  667. job->context = context;
  668. job->master_job = job;
  669. job->write_offset = 0;
  670. if (job->source.count <= SUB_JOB_SIZE)
  671. dispatch_job(job);
  672. else {
  673. mutex_init(&job->lock);
  674. job->progress = 0;
  675. split_job(job);
  676. }
  677. return 0;
  678. }
  679. EXPORT_SYMBOL(dm_kcopyd_copy);
  680. int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
  681. unsigned num_dests, struct dm_io_region *dests,
  682. unsigned flags, dm_kcopyd_notify_fn fn, void *context)
  683. {
  684. return dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
  685. }
  686. EXPORT_SYMBOL(dm_kcopyd_zero);
  687. void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
  688. dm_kcopyd_notify_fn fn, void *context)
  689. {
  690. struct kcopyd_job *job;
  691. job = mempool_alloc(kc->job_pool, GFP_NOIO);
  692. memset(job, 0, sizeof(struct kcopyd_job));
  693. job->kc = kc;
  694. job->fn = fn;
  695. job->context = context;
  696. job->master_job = job;
  697. atomic_inc(&kc->nr_jobs);
  698. return job;
  699. }
  700. EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
  701. void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
  702. {
  703. struct kcopyd_job *job = j;
  704. struct dm_kcopyd_client *kc = job->kc;
  705. job->read_err = read_err;
  706. job->write_err = write_err;
  707. push(&kc->complete_jobs, job);
  708. wake(kc);
  709. }
  710. EXPORT_SYMBOL(dm_kcopyd_do_callback);
  711. /*
  712. * Cancels a kcopyd job, eg. someone might be deactivating a
  713. * mirror.
  714. */
  715. #if 0
  716. int kcopyd_cancel(struct kcopyd_job *job, int block)
  717. {
  718. /* FIXME: finish */
  719. return -1;
  720. }
  721. #endif /* 0 */
  722. /*-----------------------------------------------------------------
  723. * Client setup
  724. *---------------------------------------------------------------*/
  725. struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
  726. {
  727. int r = -ENOMEM;
  728. struct dm_kcopyd_client *kc;
  729. kc = kmalloc(sizeof(*kc), GFP_KERNEL);
  730. if (!kc)
  731. return ERR_PTR(-ENOMEM);
  732. spin_lock_init(&kc->job_lock);
  733. INIT_LIST_HEAD(&kc->complete_jobs);
  734. INIT_LIST_HEAD(&kc->io_jobs);
  735. INIT_LIST_HEAD(&kc->pages_jobs);
  736. kc->throttle = throttle;
  737. kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
  738. if (!kc->job_pool)
  739. goto bad_slab;
  740. INIT_WORK(&kc->kcopyd_work, do_work);
  741. kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
  742. if (!kc->kcopyd_wq)
  743. goto bad_workqueue;
  744. kc->pages = NULL;
  745. kc->nr_reserved_pages = kc->nr_free_pages = 0;
  746. r = client_reserve_pages(kc, RESERVE_PAGES);
  747. if (r)
  748. goto bad_client_pages;
  749. kc->io_client = dm_io_client_create();
  750. if (IS_ERR(kc->io_client)) {
  751. r = PTR_ERR(kc->io_client);
  752. goto bad_io_client;
  753. }
  754. init_waitqueue_head(&kc->destroyq);
  755. atomic_set(&kc->nr_jobs, 0);
  756. return kc;
  757. bad_io_client:
  758. client_free_pages(kc);
  759. bad_client_pages:
  760. destroy_workqueue(kc->kcopyd_wq);
  761. bad_workqueue:
  762. mempool_destroy(kc->job_pool);
  763. bad_slab:
  764. kfree(kc);
  765. return ERR_PTR(r);
  766. }
  767. EXPORT_SYMBOL(dm_kcopyd_client_create);
  768. void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
  769. {
  770. /* Wait for completion of all jobs submitted by this client. */
  771. wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
  772. BUG_ON(!list_empty(&kc->complete_jobs));
  773. BUG_ON(!list_empty(&kc->io_jobs));
  774. BUG_ON(!list_empty(&kc->pages_jobs));
  775. destroy_workqueue(kc->kcopyd_wq);
  776. dm_io_client_destroy(kc->io_client);
  777. client_free_pages(kc);
  778. mempool_destroy(kc->job_pool);
  779. kfree(kc);
  780. }
  781. EXPORT_SYMBOL(dm_kcopyd_client_destroy);