dm-bio-prison-v1.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison-v1.h"
  8. #include "dm-bio-prison-v2.h"
  9. #include <linux/spinlock.h>
  10. #include <linux/mempool.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. /*----------------------------------------------------------------*/
  14. #define MIN_CELLS 1024
  15. struct dm_bio_prison {
  16. spinlock_t lock;
  17. mempool_t *cell_pool;
  18. struct rb_root cells;
  19. };
  20. static struct kmem_cache *_cell_cache;
  21. /*----------------------------------------------------------------*/
  22. /*
  23. * @nr_cells should be the number of cells you want in use _concurrently_.
  24. * Don't confuse it with the number of distinct keys.
  25. */
  26. struct dm_bio_prison *dm_bio_prison_create(void)
  27. {
  28. struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
  29. if (!prison)
  30. return NULL;
  31. spin_lock_init(&prison->lock);
  32. prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
  33. if (!prison->cell_pool) {
  34. kfree(prison);
  35. return NULL;
  36. }
  37. prison->cells = RB_ROOT;
  38. return prison;
  39. }
  40. EXPORT_SYMBOL_GPL(dm_bio_prison_create);
  41. void dm_bio_prison_destroy(struct dm_bio_prison *prison)
  42. {
  43. mempool_destroy(prison->cell_pool);
  44. kfree(prison);
  45. }
  46. EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
  47. struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
  48. {
  49. return mempool_alloc(prison->cell_pool, gfp);
  50. }
  51. EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
  52. void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
  53. struct dm_bio_prison_cell *cell)
  54. {
  55. mempool_free(cell, prison->cell_pool);
  56. }
  57. EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
  58. static void __setup_new_cell(struct dm_cell_key *key,
  59. struct bio *holder,
  60. struct dm_bio_prison_cell *cell)
  61. {
  62. memcpy(&cell->key, key, sizeof(cell->key));
  63. cell->holder = holder;
  64. bio_list_init(&cell->bios);
  65. }
  66. static int cmp_keys(struct dm_cell_key *lhs,
  67. struct dm_cell_key *rhs)
  68. {
  69. if (lhs->virtual < rhs->virtual)
  70. return -1;
  71. if (lhs->virtual > rhs->virtual)
  72. return 1;
  73. if (lhs->dev < rhs->dev)
  74. return -1;
  75. if (lhs->dev > rhs->dev)
  76. return 1;
  77. if (lhs->block_end <= rhs->block_begin)
  78. return -1;
  79. if (lhs->block_begin >= rhs->block_end)
  80. return 1;
  81. return 0;
  82. }
  83. static int __bio_detain(struct dm_bio_prison *prison,
  84. struct dm_cell_key *key,
  85. struct bio *inmate,
  86. struct dm_bio_prison_cell *cell_prealloc,
  87. struct dm_bio_prison_cell **cell_result)
  88. {
  89. int r;
  90. struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
  91. while (*new) {
  92. struct dm_bio_prison_cell *cell =
  93. container_of(*new, struct dm_bio_prison_cell, node);
  94. r = cmp_keys(key, &cell->key);
  95. parent = *new;
  96. if (r < 0)
  97. new = &((*new)->rb_left);
  98. else if (r > 0)
  99. new = &((*new)->rb_right);
  100. else {
  101. if (inmate)
  102. bio_list_add(&cell->bios, inmate);
  103. *cell_result = cell;
  104. return 1;
  105. }
  106. }
  107. __setup_new_cell(key, inmate, cell_prealloc);
  108. *cell_result = cell_prealloc;
  109. rb_link_node(&cell_prealloc->node, parent, new);
  110. rb_insert_color(&cell_prealloc->node, &prison->cells);
  111. return 0;
  112. }
  113. static int bio_detain(struct dm_bio_prison *prison,
  114. struct dm_cell_key *key,
  115. struct bio *inmate,
  116. struct dm_bio_prison_cell *cell_prealloc,
  117. struct dm_bio_prison_cell **cell_result)
  118. {
  119. int r;
  120. unsigned long flags;
  121. spin_lock_irqsave(&prison->lock, flags);
  122. r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  123. spin_unlock_irqrestore(&prison->lock, flags);
  124. return r;
  125. }
  126. int dm_bio_detain(struct dm_bio_prison *prison,
  127. struct dm_cell_key *key,
  128. struct bio *inmate,
  129. struct dm_bio_prison_cell *cell_prealloc,
  130. struct dm_bio_prison_cell **cell_result)
  131. {
  132. return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  133. }
  134. EXPORT_SYMBOL_GPL(dm_bio_detain);
  135. int dm_get_cell(struct dm_bio_prison *prison,
  136. struct dm_cell_key *key,
  137. struct dm_bio_prison_cell *cell_prealloc,
  138. struct dm_bio_prison_cell **cell_result)
  139. {
  140. return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
  141. }
  142. EXPORT_SYMBOL_GPL(dm_get_cell);
  143. /*
  144. * @inmates must have been initialised prior to this call
  145. */
  146. static void __cell_release(struct dm_bio_prison *prison,
  147. struct dm_bio_prison_cell *cell,
  148. struct bio_list *inmates)
  149. {
  150. rb_erase(&cell->node, &prison->cells);
  151. if (inmates) {
  152. if (cell->holder)
  153. bio_list_add(inmates, cell->holder);
  154. bio_list_merge(inmates, &cell->bios);
  155. }
  156. }
  157. void dm_cell_release(struct dm_bio_prison *prison,
  158. struct dm_bio_prison_cell *cell,
  159. struct bio_list *bios)
  160. {
  161. unsigned long flags;
  162. spin_lock_irqsave(&prison->lock, flags);
  163. __cell_release(prison, cell, bios);
  164. spin_unlock_irqrestore(&prison->lock, flags);
  165. }
  166. EXPORT_SYMBOL_GPL(dm_cell_release);
  167. /*
  168. * Sometimes we don't want the holder, just the additional bios.
  169. */
  170. static void __cell_release_no_holder(struct dm_bio_prison *prison,
  171. struct dm_bio_prison_cell *cell,
  172. struct bio_list *inmates)
  173. {
  174. rb_erase(&cell->node, &prison->cells);
  175. bio_list_merge(inmates, &cell->bios);
  176. }
  177. void dm_cell_release_no_holder(struct dm_bio_prison *prison,
  178. struct dm_bio_prison_cell *cell,
  179. struct bio_list *inmates)
  180. {
  181. unsigned long flags;
  182. spin_lock_irqsave(&prison->lock, flags);
  183. __cell_release_no_holder(prison, cell, inmates);
  184. spin_unlock_irqrestore(&prison->lock, flags);
  185. }
  186. EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
  187. void dm_cell_error(struct dm_bio_prison *prison,
  188. struct dm_bio_prison_cell *cell, int error)
  189. {
  190. struct bio_list bios;
  191. struct bio *bio;
  192. bio_list_init(&bios);
  193. dm_cell_release(prison, cell, &bios);
  194. while ((bio = bio_list_pop(&bios))) {
  195. bio->bi_error = error;
  196. bio_endio(bio);
  197. }
  198. }
  199. EXPORT_SYMBOL_GPL(dm_cell_error);
  200. void dm_cell_visit_release(struct dm_bio_prison *prison,
  201. void (*visit_fn)(void *, struct dm_bio_prison_cell *),
  202. void *context,
  203. struct dm_bio_prison_cell *cell)
  204. {
  205. unsigned long flags;
  206. spin_lock_irqsave(&prison->lock, flags);
  207. visit_fn(context, cell);
  208. rb_erase(&cell->node, &prison->cells);
  209. spin_unlock_irqrestore(&prison->lock, flags);
  210. }
  211. EXPORT_SYMBOL_GPL(dm_cell_visit_release);
  212. static int __promote_or_release(struct dm_bio_prison *prison,
  213. struct dm_bio_prison_cell *cell)
  214. {
  215. if (bio_list_empty(&cell->bios)) {
  216. rb_erase(&cell->node, &prison->cells);
  217. return 1;
  218. }
  219. cell->holder = bio_list_pop(&cell->bios);
  220. return 0;
  221. }
  222. int dm_cell_promote_or_release(struct dm_bio_prison *prison,
  223. struct dm_bio_prison_cell *cell)
  224. {
  225. int r;
  226. unsigned long flags;
  227. spin_lock_irqsave(&prison->lock, flags);
  228. r = __promote_or_release(prison, cell);
  229. spin_unlock_irqrestore(&prison->lock, flags);
  230. return r;
  231. }
  232. EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
  233. /*----------------------------------------------------------------*/
  234. #define DEFERRED_SET_SIZE 64
  235. struct dm_deferred_entry {
  236. struct dm_deferred_set *ds;
  237. unsigned count;
  238. struct list_head work_items;
  239. };
  240. struct dm_deferred_set {
  241. spinlock_t lock;
  242. unsigned current_entry;
  243. unsigned sweeper;
  244. struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
  245. };
  246. struct dm_deferred_set *dm_deferred_set_create(void)
  247. {
  248. int i;
  249. struct dm_deferred_set *ds;
  250. ds = kmalloc(sizeof(*ds), GFP_KERNEL);
  251. if (!ds)
  252. return NULL;
  253. spin_lock_init(&ds->lock);
  254. ds->current_entry = 0;
  255. ds->sweeper = 0;
  256. for (i = 0; i < DEFERRED_SET_SIZE; i++) {
  257. ds->entries[i].ds = ds;
  258. ds->entries[i].count = 0;
  259. INIT_LIST_HEAD(&ds->entries[i].work_items);
  260. }
  261. return ds;
  262. }
  263. EXPORT_SYMBOL_GPL(dm_deferred_set_create);
  264. void dm_deferred_set_destroy(struct dm_deferred_set *ds)
  265. {
  266. kfree(ds);
  267. }
  268. EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
  269. struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
  270. {
  271. unsigned long flags;
  272. struct dm_deferred_entry *entry;
  273. spin_lock_irqsave(&ds->lock, flags);
  274. entry = ds->entries + ds->current_entry;
  275. entry->count++;
  276. spin_unlock_irqrestore(&ds->lock, flags);
  277. return entry;
  278. }
  279. EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
  280. static unsigned ds_next(unsigned index)
  281. {
  282. return (index + 1) % DEFERRED_SET_SIZE;
  283. }
  284. static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
  285. {
  286. while ((ds->sweeper != ds->current_entry) &&
  287. !ds->entries[ds->sweeper].count) {
  288. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  289. ds->sweeper = ds_next(ds->sweeper);
  290. }
  291. if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
  292. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  293. }
  294. void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
  295. {
  296. unsigned long flags;
  297. spin_lock_irqsave(&entry->ds->lock, flags);
  298. BUG_ON(!entry->count);
  299. --entry->count;
  300. __sweep(entry->ds, head);
  301. spin_unlock_irqrestore(&entry->ds->lock, flags);
  302. }
  303. EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
  304. /*
  305. * Returns 1 if deferred or 0 if no pending items to delay job.
  306. */
  307. int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
  308. {
  309. int r = 1;
  310. unsigned long flags;
  311. unsigned next_entry;
  312. spin_lock_irqsave(&ds->lock, flags);
  313. if ((ds->sweeper == ds->current_entry) &&
  314. !ds->entries[ds->current_entry].count)
  315. r = 0;
  316. else {
  317. list_add(work, &ds->entries[ds->current_entry].work_items);
  318. next_entry = ds_next(ds->current_entry);
  319. if (!ds->entries[next_entry].count)
  320. ds->current_entry = next_entry;
  321. }
  322. spin_unlock_irqrestore(&ds->lock, flags);
  323. return r;
  324. }
  325. EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
  326. /*----------------------------------------------------------------*/
  327. static int __init dm_bio_prison_init_v1(void)
  328. {
  329. _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
  330. if (!_cell_cache)
  331. return -ENOMEM;
  332. return 0;
  333. }
  334. static void dm_bio_prison_exit_v1(void)
  335. {
  336. kmem_cache_destroy(_cell_cache);
  337. _cell_cache = NULL;
  338. }
  339. static int (*_inits[])(void) __initdata = {
  340. dm_bio_prison_init_v1,
  341. dm_bio_prison_init_v2,
  342. };
  343. static void (*_exits[])(void) = {
  344. dm_bio_prison_exit_v1,
  345. dm_bio_prison_exit_v2,
  346. };
  347. static int __init dm_bio_prison_init(void)
  348. {
  349. const int count = ARRAY_SIZE(_inits);
  350. int r, i;
  351. for (i = 0; i < count; i++) {
  352. r = _inits[i]();
  353. if (r)
  354. goto bad;
  355. }
  356. return 0;
  357. bad:
  358. while (i--)
  359. _exits[i]();
  360. return r;
  361. }
  362. static void __exit dm_bio_prison_exit(void)
  363. {
  364. int i = ARRAY_SIZE(_exits);
  365. while (i--)
  366. _exits[i]();
  367. }
  368. /*
  369. * module hooks
  370. */
  371. module_init(dm_bio_prison_init);
  372. module_exit(dm_bio_prison_exit);
  373. MODULE_DESCRIPTION(DM_NAME " bio prison");
  374. MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
  375. MODULE_LICENSE("GPL");