dm-io.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2006 Red Hat GmbH
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm.h"
  8. #include <linux/device-mapper.h>
  9. #include <linux/bio.h>
  10. #include <linux/completion.h>
  11. #include <linux/mempool.h>
  12. #include <linux/module.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/dm-io.h>
  16. #define DM_MSG_PREFIX "io"
  17. #define DM_IO_MAX_REGIONS BITS_PER_LONG
  18. struct dm_io_client {
  19. mempool_t *pool;
  20. struct bio_set *bios;
  21. };
  22. /*
  23. * Aligning 'struct io' reduces the number of bits required to store
  24. * its address. Refer to store_io_and_region_in_bio() below.
  25. */
  26. struct io {
  27. unsigned long error_bits;
  28. atomic_t count;
  29. struct completion *wait;
  30. struct dm_io_client *client;
  31. io_notify_fn callback;
  32. void *context;
  33. void *vma_invalidate_address;
  34. unsigned long vma_invalidate_size;
  35. } __attribute__((aligned(DM_IO_MAX_REGIONS)));
  36. static struct kmem_cache *_dm_io_cache;
  37. /*
  38. * Create a client with mempool and bioset.
  39. */
  40. struct dm_io_client *dm_io_client_create(void)
  41. {
  42. struct dm_io_client *client;
  43. unsigned min_ios = dm_get_reserved_bio_based_ios();
  44. client = kmalloc(sizeof(*client), GFP_KERNEL);
  45. if (!client)
  46. return ERR_PTR(-ENOMEM);
  47. client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
  48. if (!client->pool)
  49. goto bad;
  50. client->bios = bioset_create(min_ios, 0);
  51. if (!client->bios)
  52. goto bad;
  53. return client;
  54. bad:
  55. if (client->pool)
  56. mempool_destroy(client->pool);
  57. kfree(client);
  58. return ERR_PTR(-ENOMEM);
  59. }
  60. EXPORT_SYMBOL(dm_io_client_create);
  61. void dm_io_client_destroy(struct dm_io_client *client)
  62. {
  63. mempool_destroy(client->pool);
  64. bioset_free(client->bios);
  65. kfree(client);
  66. }
  67. EXPORT_SYMBOL(dm_io_client_destroy);
  68. /*-----------------------------------------------------------------
  69. * We need to keep track of which region a bio is doing io for.
  70. * To avoid a memory allocation to store just 5 or 6 bits, we
  71. * ensure the 'struct io' pointer is aligned so enough low bits are
  72. * always zero and then combine it with the region number directly in
  73. * bi_private.
  74. *---------------------------------------------------------------*/
  75. static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
  76. unsigned region)
  77. {
  78. if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
  79. DMCRIT("Unaligned struct io pointer %p", io);
  80. BUG();
  81. }
  82. bio->bi_private = (void *)((unsigned long)io | region);
  83. }
  84. static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
  85. unsigned *region)
  86. {
  87. unsigned long val = (unsigned long)bio->bi_private;
  88. *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
  89. *region = val & (DM_IO_MAX_REGIONS - 1);
  90. }
  91. /*-----------------------------------------------------------------
  92. * We need an io object to keep track of the number of bios that
  93. * have been dispatched for a particular io.
  94. *---------------------------------------------------------------*/
  95. static void dec_count(struct io *io, unsigned int region, int error)
  96. {
  97. if (error)
  98. set_bit(region, &io->error_bits);
  99. if (atomic_dec_and_test(&io->count)) {
  100. if (io->vma_invalidate_size)
  101. invalidate_kernel_vmap_range(io->vma_invalidate_address,
  102. io->vma_invalidate_size);
  103. if (io->wait)
  104. complete(io->wait);
  105. else {
  106. unsigned long r = io->error_bits;
  107. io_notify_fn fn = io->callback;
  108. void *context = io->context;
  109. mempool_free(io, io->client->pool);
  110. fn(r, context);
  111. }
  112. }
  113. }
  114. static void endio(struct bio *bio, int error)
  115. {
  116. struct io *io;
  117. unsigned region;
  118. if (error && bio_data_dir(bio) == READ)
  119. zero_fill_bio(bio);
  120. /*
  121. * The bio destructor in bio_put() may use the io object.
  122. */
  123. retrieve_io_and_region_from_bio(bio, &io, &region);
  124. bio_put(bio);
  125. dec_count(io, region, error);
  126. }
  127. /*-----------------------------------------------------------------
  128. * These little objects provide an abstraction for getting a new
  129. * destination page for io.
  130. *---------------------------------------------------------------*/
  131. struct dpages {
  132. void (*get_page)(struct dpages *dp,
  133. struct page **p, unsigned long *len, unsigned *offset);
  134. void (*next_page)(struct dpages *dp);
  135. unsigned context_u;
  136. void *context_ptr;
  137. void *vma_invalidate_address;
  138. unsigned long vma_invalidate_size;
  139. };
  140. /*
  141. * Functions for getting the pages from a list.
  142. */
  143. static void list_get_page(struct dpages *dp,
  144. struct page **p, unsigned long *len, unsigned *offset)
  145. {
  146. unsigned o = dp->context_u;
  147. struct page_list *pl = (struct page_list *) dp->context_ptr;
  148. *p = pl->page;
  149. *len = PAGE_SIZE - o;
  150. *offset = o;
  151. }
  152. static void list_next_page(struct dpages *dp)
  153. {
  154. struct page_list *pl = (struct page_list *) dp->context_ptr;
  155. dp->context_ptr = pl->next;
  156. dp->context_u = 0;
  157. }
  158. static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
  159. {
  160. dp->get_page = list_get_page;
  161. dp->next_page = list_next_page;
  162. dp->context_u = offset;
  163. dp->context_ptr = pl;
  164. }
  165. /*
  166. * Functions for getting the pages from a bvec.
  167. */
  168. static void bio_get_page(struct dpages *dp, struct page **p,
  169. unsigned long *len, unsigned *offset)
  170. {
  171. struct bio_vec *bvec = dp->context_ptr;
  172. *p = bvec->bv_page;
  173. *len = bvec->bv_len - dp->context_u;
  174. *offset = bvec->bv_offset + dp->context_u;
  175. }
  176. static void bio_next_page(struct dpages *dp)
  177. {
  178. struct bio_vec *bvec = dp->context_ptr;
  179. dp->context_ptr = bvec + 1;
  180. dp->context_u = 0;
  181. }
  182. static void bio_dp_init(struct dpages *dp, struct bio *bio)
  183. {
  184. dp->get_page = bio_get_page;
  185. dp->next_page = bio_next_page;
  186. dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
  187. dp->context_u = bio->bi_iter.bi_bvec_done;
  188. }
  189. /*
  190. * Functions for getting the pages from a VMA.
  191. */
  192. static void vm_get_page(struct dpages *dp,
  193. struct page **p, unsigned long *len, unsigned *offset)
  194. {
  195. *p = vmalloc_to_page(dp->context_ptr);
  196. *offset = dp->context_u;
  197. *len = PAGE_SIZE - dp->context_u;
  198. }
  199. static void vm_next_page(struct dpages *dp)
  200. {
  201. dp->context_ptr += PAGE_SIZE - dp->context_u;
  202. dp->context_u = 0;
  203. }
  204. static void vm_dp_init(struct dpages *dp, void *data)
  205. {
  206. dp->get_page = vm_get_page;
  207. dp->next_page = vm_next_page;
  208. dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
  209. dp->context_ptr = data;
  210. }
  211. /*
  212. * Functions for getting the pages from kernel memory.
  213. */
  214. static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
  215. unsigned *offset)
  216. {
  217. *p = virt_to_page(dp->context_ptr);
  218. *offset = dp->context_u;
  219. *len = PAGE_SIZE - dp->context_u;
  220. }
  221. static void km_next_page(struct dpages *dp)
  222. {
  223. dp->context_ptr += PAGE_SIZE - dp->context_u;
  224. dp->context_u = 0;
  225. }
  226. static void km_dp_init(struct dpages *dp, void *data)
  227. {
  228. dp->get_page = km_get_page;
  229. dp->next_page = km_next_page;
  230. dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
  231. dp->context_ptr = data;
  232. }
  233. /*-----------------------------------------------------------------
  234. * IO routines that accept a list of pages.
  235. *---------------------------------------------------------------*/
  236. static void do_region(int rw, unsigned region, struct dm_io_region *where,
  237. struct dpages *dp, struct io *io)
  238. {
  239. struct bio *bio;
  240. struct page *page;
  241. unsigned long len;
  242. unsigned offset;
  243. unsigned num_bvecs;
  244. sector_t remaining = where->count;
  245. struct request_queue *q = bdev_get_queue(where->bdev);
  246. unsigned short logical_block_size = queue_logical_block_size(q);
  247. sector_t num_sectors;
  248. /*
  249. * where->count may be zero if rw holds a flush and we need to
  250. * send a zero-sized flush.
  251. */
  252. do {
  253. /*
  254. * Allocate a suitably sized-bio.
  255. */
  256. if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
  257. num_bvecs = 1;
  258. else
  259. num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
  260. dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
  261. bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
  262. bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
  263. bio->bi_bdev = where->bdev;
  264. bio->bi_end_io = endio;
  265. store_io_and_region_in_bio(bio, io, region);
  266. if (rw & REQ_DISCARD) {
  267. num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
  268. bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
  269. remaining -= num_sectors;
  270. } else if (rw & REQ_WRITE_SAME) {
  271. /*
  272. * WRITE SAME only uses a single page.
  273. */
  274. dp->get_page(dp, &page, &len, &offset);
  275. bio_add_page(bio, page, logical_block_size, offset);
  276. num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
  277. bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
  278. offset = 0;
  279. remaining -= num_sectors;
  280. dp->next_page(dp);
  281. } else while (remaining) {
  282. /*
  283. * Try and add as many pages as possible.
  284. */
  285. dp->get_page(dp, &page, &len, &offset);
  286. len = min(len, to_bytes(remaining));
  287. if (!bio_add_page(bio, page, len, offset))
  288. break;
  289. offset = 0;
  290. remaining -= to_sector(len);
  291. dp->next_page(dp);
  292. }
  293. atomic_inc(&io->count);
  294. submit_bio(rw, bio);
  295. } while (remaining);
  296. }
  297. static void dispatch_io(int rw, unsigned int num_regions,
  298. struct dm_io_region *where, struct dpages *dp,
  299. struct io *io, int sync)
  300. {
  301. int i;
  302. struct dpages old_pages = *dp;
  303. BUG_ON(num_regions > DM_IO_MAX_REGIONS);
  304. if (sync)
  305. rw |= REQ_SYNC;
  306. /*
  307. * For multiple regions we need to be careful to rewind
  308. * the dp object for each call to do_region.
  309. */
  310. for (i = 0; i < num_regions; i++) {
  311. *dp = old_pages;
  312. if (where[i].count || (rw & REQ_FLUSH))
  313. do_region(rw, i, where + i, dp, io);
  314. }
  315. /*
  316. * Drop the extra reference that we were holding to avoid
  317. * the io being completed too early.
  318. */
  319. dec_count(io, 0, 0);
  320. }
  321. static int sync_io(struct dm_io_client *client, unsigned int num_regions,
  322. struct dm_io_region *where, int rw, struct dpages *dp,
  323. unsigned long *error_bits)
  324. {
  325. /*
  326. * gcc <= 4.3 can't do the alignment for stack variables, so we must
  327. * align it on our own.
  328. * volatile prevents the optimizer from removing or reusing
  329. * "io_" field from the stack frame (allowed in ANSI C).
  330. */
  331. volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
  332. struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
  333. DECLARE_COMPLETION_ONSTACK(wait);
  334. if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
  335. WARN_ON(1);
  336. return -EIO;
  337. }
  338. io->error_bits = 0;
  339. atomic_set(&io->count, 1); /* see dispatch_io() */
  340. io->wait = &wait;
  341. io->client = client;
  342. io->vma_invalidate_address = dp->vma_invalidate_address;
  343. io->vma_invalidate_size = dp->vma_invalidate_size;
  344. dispatch_io(rw, num_regions, where, dp, io, 1);
  345. wait_for_completion_io(&wait);
  346. if (error_bits)
  347. *error_bits = io->error_bits;
  348. return io->error_bits ? -EIO : 0;
  349. }
  350. static int async_io(struct dm_io_client *client, unsigned int num_regions,
  351. struct dm_io_region *where, int rw, struct dpages *dp,
  352. io_notify_fn fn, void *context)
  353. {
  354. struct io *io;
  355. if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
  356. WARN_ON(1);
  357. fn(1, context);
  358. return -EIO;
  359. }
  360. io = mempool_alloc(client->pool, GFP_NOIO);
  361. io->error_bits = 0;
  362. atomic_set(&io->count, 1); /* see dispatch_io() */
  363. io->wait = NULL;
  364. io->client = client;
  365. io->callback = fn;
  366. io->context = context;
  367. io->vma_invalidate_address = dp->vma_invalidate_address;
  368. io->vma_invalidate_size = dp->vma_invalidate_size;
  369. dispatch_io(rw, num_regions, where, dp, io, 0);
  370. return 0;
  371. }
  372. static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
  373. unsigned long size)
  374. {
  375. /* Set up dpages based on memory type */
  376. dp->vma_invalidate_address = NULL;
  377. dp->vma_invalidate_size = 0;
  378. switch (io_req->mem.type) {
  379. case DM_IO_PAGE_LIST:
  380. list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
  381. break;
  382. case DM_IO_BIO:
  383. bio_dp_init(dp, io_req->mem.ptr.bio);
  384. break;
  385. case DM_IO_VMA:
  386. flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
  387. if ((io_req->bi_rw & RW_MASK) == READ) {
  388. dp->vma_invalidate_address = io_req->mem.ptr.vma;
  389. dp->vma_invalidate_size = size;
  390. }
  391. vm_dp_init(dp, io_req->mem.ptr.vma);
  392. break;
  393. case DM_IO_KMEM:
  394. km_dp_init(dp, io_req->mem.ptr.addr);
  395. break;
  396. default:
  397. return -EINVAL;
  398. }
  399. return 0;
  400. }
  401. /*
  402. * New collapsed (a)synchronous interface.
  403. *
  404. * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
  405. * the queue with blk_unplug() some time later or set REQ_SYNC in
  406. io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
  407. * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  408. */
  409. int dm_io(struct dm_io_request *io_req, unsigned num_regions,
  410. struct dm_io_region *where, unsigned long *sync_error_bits)
  411. {
  412. int r;
  413. struct dpages dp;
  414. r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
  415. if (r)
  416. return r;
  417. if (!io_req->notify.fn)
  418. return sync_io(io_req->client, num_regions, where,
  419. io_req->bi_rw, &dp, sync_error_bits);
  420. return async_io(io_req->client, num_regions, where, io_req->bi_rw,
  421. &dp, io_req->notify.fn, io_req->notify.context);
  422. }
  423. EXPORT_SYMBOL(dm_io);
  424. int __init dm_io_init(void)
  425. {
  426. _dm_io_cache = KMEM_CACHE(io, 0);
  427. if (!_dm_io_cache)
  428. return -ENOMEM;
  429. return 0;
  430. }
  431. void dm_io_exit(void)
  432. {
  433. kmem_cache_destroy(_dm_io_cache);
  434. _dm_io_cache = NULL;
  435. }