ore_raid.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /*
  2. * Copyright (C) 2011
  3. * Boaz Harrosh <bharrosh@panasas.com>
  4. *
  5. * This file is part of the objects raid engine (ore).
  6. *
  7. * It is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as published
  9. * by the Free Software Foundation.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with "ore". If not, write to the Free Software Foundation, Inc:
  13. * "Free Software Foundation <info@fsf.org>"
  14. */
  15. #include <linux/gfp.h>
  16. #include <linux/async_tx.h>
  17. #include "ore_raid.h"
  18. #undef ORE_DBGMSG2
  19. #define ORE_DBGMSG2 ORE_DBGMSG
  20. static struct page *_raid_page_alloc(void)
  21. {
  22. return alloc_page(GFP_KERNEL);
  23. }
  24. static void _raid_page_free(struct page *p)
  25. {
  26. __free_page(p);
  27. }
  28. /* This struct is forward declare in ore_io_state, but is private to here.
  29. * It is put on ios->sp2d for RAID5/6 writes only. See _gen_xor_unit.
  30. *
  31. * __stripe_pages_2d is a 2d array of pages, and it is also a corner turn.
  32. * Ascending page index access is sp2d(p-minor, c-major). But storage is
  33. * sp2d[p-minor][c-major], so it can be properlly presented to the async-xor
  34. * API.
  35. */
  36. struct __stripe_pages_2d {
  37. /* Cache some hot path repeated calculations */
  38. unsigned parity;
  39. unsigned data_devs;
  40. unsigned pages_in_unit;
  41. bool needed ;
  42. /* Array size is pages_in_unit (layout->stripe_unit / PAGE_SIZE) */
  43. struct __1_page_stripe {
  44. bool alloc;
  45. unsigned write_count;
  46. struct async_submit_ctl submit;
  47. struct dma_async_tx_descriptor *tx;
  48. /* The size of this array is data_devs + parity */
  49. struct page **pages;
  50. struct page **scribble;
  51. /* bool array, size of this array is data_devs */
  52. char *page_is_read;
  53. } _1p_stripes[];
  54. };
  55. /* This can get bigger then a page. So support multiple page allocations
  56. * _sp2d_free should be called even if _sp2d_alloc fails (by returning
  57. * none-zero).
  58. */
  59. static int _sp2d_alloc(unsigned pages_in_unit, unsigned group_width,
  60. unsigned parity, struct __stripe_pages_2d **psp2d)
  61. {
  62. struct __stripe_pages_2d *sp2d;
  63. unsigned data_devs = group_width - parity;
  64. struct _alloc_all_bytes {
  65. struct __alloc_stripe_pages_2d {
  66. struct __stripe_pages_2d sp2d;
  67. struct __1_page_stripe _1p_stripes[pages_in_unit];
  68. } __asp2d;
  69. struct __alloc_1p_arrays {
  70. struct page *pages[group_width];
  71. struct page *scribble[group_width];
  72. char page_is_read[data_devs];
  73. } __a1pa[pages_in_unit];
  74. } *_aab;
  75. struct __alloc_1p_arrays *__a1pa;
  76. struct __alloc_1p_arrays *__a1pa_end;
  77. const unsigned sizeof__a1pa = sizeof(_aab->__a1pa[0]);
  78. unsigned num_a1pa, alloc_size, i;
  79. /* FIXME: check these numbers in ore_verify_layout */
  80. BUG_ON(sizeof(_aab->__asp2d) > PAGE_SIZE);
  81. BUG_ON(sizeof__a1pa > PAGE_SIZE);
  82. if (sizeof(*_aab) > PAGE_SIZE) {
  83. num_a1pa = (PAGE_SIZE - sizeof(_aab->__asp2d)) / sizeof__a1pa;
  84. alloc_size = sizeof(_aab->__asp2d) + sizeof__a1pa * num_a1pa;
  85. } else {
  86. num_a1pa = pages_in_unit;
  87. alloc_size = sizeof(*_aab);
  88. }
  89. _aab = kzalloc(alloc_size, GFP_KERNEL);
  90. if (unlikely(!_aab)) {
  91. ORE_DBGMSG("!! Failed to alloc sp2d size=%d\n", alloc_size);
  92. return -ENOMEM;
  93. }
  94. sp2d = &_aab->__asp2d.sp2d;
  95. *psp2d = sp2d; /* From here Just call _sp2d_free */
  96. __a1pa = _aab->__a1pa;
  97. __a1pa_end = __a1pa + num_a1pa;
  98. for (i = 0; i < pages_in_unit; ++i) {
  99. if (unlikely(__a1pa >= __a1pa_end)) {
  100. num_a1pa = min_t(unsigned, PAGE_SIZE / sizeof__a1pa,
  101. pages_in_unit - i);
  102. __a1pa = kzalloc(num_a1pa * sizeof__a1pa, GFP_KERNEL);
  103. if (unlikely(!__a1pa)) {
  104. ORE_DBGMSG("!! Failed to _alloc_1p_arrays=%d\n",
  105. num_a1pa);
  106. return -ENOMEM;
  107. }
  108. __a1pa_end = __a1pa + num_a1pa;
  109. /* First *pages is marked for kfree of the buffer */
  110. sp2d->_1p_stripes[i].alloc = true;
  111. }
  112. sp2d->_1p_stripes[i].pages = __a1pa->pages;
  113. sp2d->_1p_stripes[i].scribble = __a1pa->scribble ;
  114. sp2d->_1p_stripes[i].page_is_read = __a1pa->page_is_read;
  115. ++__a1pa;
  116. }
  117. sp2d->parity = parity;
  118. sp2d->data_devs = data_devs;
  119. sp2d->pages_in_unit = pages_in_unit;
  120. return 0;
  121. }
  122. static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
  123. const struct _ore_r4w_op *r4w, void *priv)
  124. {
  125. unsigned data_devs = sp2d->data_devs;
  126. unsigned group_width = data_devs + sp2d->parity;
  127. int p, c;
  128. if (!sp2d->needed)
  129. return;
  130. for (c = data_devs - 1; c >= 0; --c)
  131. for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
  132. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  133. if (_1ps->page_is_read[c]) {
  134. struct page *page = _1ps->pages[c];
  135. r4w->put_page(priv, page);
  136. _1ps->page_is_read[c] = false;
  137. }
  138. }
  139. for (p = 0; p < sp2d->pages_in_unit; p++) {
  140. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  141. memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
  142. _1ps->write_count = 0;
  143. _1ps->tx = NULL;
  144. }
  145. sp2d->needed = false;
  146. }
  147. static void _sp2d_free(struct __stripe_pages_2d *sp2d)
  148. {
  149. unsigned i;
  150. if (!sp2d)
  151. return;
  152. for (i = 0; i < sp2d->pages_in_unit; ++i) {
  153. if (sp2d->_1p_stripes[i].alloc)
  154. kfree(sp2d->_1p_stripes[i].pages);
  155. }
  156. kfree(sp2d);
  157. }
  158. static unsigned _sp2d_min_pg(struct __stripe_pages_2d *sp2d)
  159. {
  160. unsigned p;
  161. for (p = 0; p < sp2d->pages_in_unit; p++) {
  162. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  163. if (_1ps->write_count)
  164. return p;
  165. }
  166. return ~0;
  167. }
  168. static unsigned _sp2d_max_pg(struct __stripe_pages_2d *sp2d)
  169. {
  170. int p;
  171. for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
  172. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  173. if (_1ps->write_count)
  174. return p;
  175. }
  176. return ~0;
  177. }
  178. static void _gen_xor_unit(struct __stripe_pages_2d *sp2d)
  179. {
  180. unsigned p;
  181. for (p = 0; p < sp2d->pages_in_unit; p++) {
  182. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  183. if (!_1ps->write_count)
  184. continue;
  185. init_async_submit(&_1ps->submit,
  186. ASYNC_TX_XOR_ZERO_DST | ASYNC_TX_ACK,
  187. NULL, NULL, NULL, (addr_conv_t *)_1ps->scribble);
  188. /* TODO: raid6 */
  189. _1ps->tx = async_xor(_1ps->pages[sp2d->data_devs], _1ps->pages,
  190. 0, sp2d->data_devs, PAGE_SIZE,
  191. &_1ps->submit);
  192. }
  193. for (p = 0; p < sp2d->pages_in_unit; p++) {
  194. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  195. /* NOTE: We wait for HW synchronously (I don't have such HW
  196. * to test with.) Is parallelism needed with today's multi
  197. * cores?
  198. */
  199. async_tx_issue_pending(_1ps->tx);
  200. }
  201. }
  202. void _ore_add_stripe_page(struct __stripe_pages_2d *sp2d,
  203. struct ore_striping_info *si, struct page *page)
  204. {
  205. struct __1_page_stripe *_1ps;
  206. sp2d->needed = true;
  207. _1ps = &sp2d->_1p_stripes[si->cur_pg];
  208. _1ps->pages[si->cur_comp] = page;
  209. ++_1ps->write_count;
  210. si->cur_pg = (si->cur_pg + 1) % sp2d->pages_in_unit;
  211. /* si->cur_comp is advanced outside at main loop */
  212. }
  213. void _ore_add_sg_seg(struct ore_per_dev_state *per_dev, unsigned cur_len,
  214. bool not_last)
  215. {
  216. struct osd_sg_entry *sge;
  217. ORE_DBGMSG("dev=%d cur_len=0x%x not_last=%d cur_sg=%d "
  218. "offset=0x%llx length=0x%x last_sgs_total=0x%x\n",
  219. per_dev->dev, cur_len, not_last, per_dev->cur_sg,
  220. _LLU(per_dev->offset), per_dev->length,
  221. per_dev->last_sgs_total);
  222. if (!per_dev->cur_sg) {
  223. sge = per_dev->sglist;
  224. /* First time we prepare two entries */
  225. if (per_dev->length) {
  226. ++per_dev->cur_sg;
  227. sge->offset = per_dev->offset;
  228. sge->len = per_dev->length;
  229. } else {
  230. /* Here the parity is the first unit of this object.
  231. * This happens every time we reach a parity device on
  232. * the same stripe as the per_dev->offset. We need to
  233. * just skip this unit.
  234. */
  235. per_dev->offset += cur_len;
  236. return;
  237. }
  238. } else {
  239. /* finalize the last one */
  240. sge = &per_dev->sglist[per_dev->cur_sg - 1];
  241. sge->len = per_dev->length - per_dev->last_sgs_total;
  242. }
  243. if (not_last) {
  244. /* Partly prepare the next one */
  245. struct osd_sg_entry *next_sge = sge + 1;
  246. ++per_dev->cur_sg;
  247. next_sge->offset = sge->offset + sge->len + cur_len;
  248. /* Save cur len so we know how mutch was added next time */
  249. per_dev->last_sgs_total = per_dev->length;
  250. next_sge->len = 0;
  251. } else if (!sge->len) {
  252. /* Optimize for when the last unit is a parity */
  253. --per_dev->cur_sg;
  254. }
  255. }
  256. static int _alloc_read_4_write(struct ore_io_state *ios)
  257. {
  258. struct ore_layout *layout = ios->layout;
  259. int ret;
  260. /* We want to only read those pages not in cache so worst case
  261. * is a stripe populated with every other page
  262. */
  263. unsigned sgs_per_dev = ios->sp2d->pages_in_unit + 2;
  264. ret = _ore_get_io_state(layout, ios->oc,
  265. layout->group_width * layout->mirrors_p1,
  266. sgs_per_dev, 0, &ios->ios_read_4_write);
  267. return ret;
  268. }
  269. /* @si contains info of the to-be-inserted page. Update of @si should be
  270. * maintained by caller. Specificaly si->dev, si->obj_offset, ...
  271. */
  272. static int _add_to_r4w(struct ore_io_state *ios, struct ore_striping_info *si,
  273. struct page *page, unsigned pg_len)
  274. {
  275. struct request_queue *q;
  276. struct ore_per_dev_state *per_dev;
  277. struct ore_io_state *read_ios;
  278. unsigned first_dev = si->dev - (si->dev %
  279. (ios->layout->group_width * ios->layout->mirrors_p1));
  280. unsigned comp = si->dev - first_dev;
  281. unsigned added_len;
  282. if (!ios->ios_read_4_write) {
  283. int ret = _alloc_read_4_write(ios);
  284. if (unlikely(ret))
  285. return ret;
  286. }
  287. read_ios = ios->ios_read_4_write;
  288. read_ios->numdevs = ios->layout->group_width * ios->layout->mirrors_p1;
  289. per_dev = &read_ios->per_dev[comp];
  290. if (!per_dev->length) {
  291. per_dev->bio = bio_kmalloc(GFP_KERNEL,
  292. ios->sp2d->pages_in_unit);
  293. if (unlikely(!per_dev->bio)) {
  294. ORE_DBGMSG("Failed to allocate BIO size=%u\n",
  295. ios->sp2d->pages_in_unit);
  296. return -ENOMEM;
  297. }
  298. per_dev->offset = si->obj_offset;
  299. per_dev->dev = si->dev;
  300. } else if (si->obj_offset != (per_dev->offset + per_dev->length)) {
  301. u64 gap = si->obj_offset - (per_dev->offset + per_dev->length);
  302. _ore_add_sg_seg(per_dev, gap, true);
  303. }
  304. q = osd_request_queue(ore_comp_dev(read_ios->oc, per_dev->dev));
  305. added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len,
  306. si->obj_offset % PAGE_SIZE);
  307. if (unlikely(added_len != pg_len)) {
  308. ORE_DBGMSG("Failed to bio_add_pc_page bi_vcnt=%d\n",
  309. per_dev->bio->bi_vcnt);
  310. return -ENOMEM;
  311. }
  312. per_dev->length += pg_len;
  313. return 0;
  314. }
  315. /* read the beginning of an unaligned first page */
  316. static int _add_to_r4w_first_page(struct ore_io_state *ios, struct page *page)
  317. {
  318. struct ore_striping_info si;
  319. unsigned pg_len;
  320. ore_calc_stripe_info(ios->layout, ios->offset, 0, &si);
  321. pg_len = si.obj_offset % PAGE_SIZE;
  322. si.obj_offset -= pg_len;
  323. ORE_DBGMSG("offset=0x%llx len=0x%x index=0x%lx dev=%x\n",
  324. _LLU(si.obj_offset), pg_len, page->index, si.dev);
  325. return _add_to_r4w(ios, &si, page, pg_len);
  326. }
  327. /* read the end of an incomplete last page */
  328. static int _add_to_r4w_last_page(struct ore_io_state *ios, u64 *offset)
  329. {
  330. struct ore_striping_info si;
  331. struct page *page;
  332. unsigned pg_len, p, c;
  333. ore_calc_stripe_info(ios->layout, *offset, 0, &si);
  334. p = si.unit_off / PAGE_SIZE;
  335. c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
  336. ios->layout->mirrors_p1, si.par_dev, si.dev);
  337. page = ios->sp2d->_1p_stripes[p].pages[c];
  338. pg_len = PAGE_SIZE - (si.unit_off % PAGE_SIZE);
  339. *offset += pg_len;
  340. ORE_DBGMSG("p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n",
  341. p, c, _LLU(*offset), pg_len, si.dev, si.par_dev);
  342. BUG_ON(!page);
  343. return _add_to_r4w(ios, &si, page, pg_len);
  344. }
  345. static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
  346. {
  347. struct bio_vec *bv;
  348. unsigned i, d;
  349. /* loop on all devices all pages */
  350. for (d = 0; d < ios->numdevs; d++) {
  351. struct bio *bio = ios->per_dev[d].bio;
  352. if (!bio)
  353. continue;
  354. bio_for_each_segment_all(bv, bio, i) {
  355. struct page *page = bv->bv_page;
  356. SetPageUptodate(page);
  357. if (PageError(page))
  358. ClearPageError(page);
  359. }
  360. }
  361. }
  362. /* read_4_write is hacked to read the start of the first stripe and/or
  363. * the end of the last stripe. If needed, with an sg-gap at each device/page.
  364. * It is assumed to be called after the to_be_written pages of the first stripe
  365. * are populating ios->sp2d[][]
  366. *
  367. * NOTE: We call ios->r4w->lock_fn for all pages needed for parity calculations
  368. * These pages are held at sp2d[p].pages[c] but with
  369. * sp2d[p].page_is_read[c] = true. At _sp2d_reset these pages are
  370. * ios->r4w->lock_fn(). The ios->r4w->lock_fn might signal that the page is
  371. * @uptodate=true, so we don't need to read it, only unlock, after IO.
  372. *
  373. * TODO: The read_4_write should calc a need_to_read_pages_count, if bigger then
  374. * to-be-written count, we should consider the xor-in-place mode.
  375. * need_to_read_pages_count is the actual number of pages not present in cache.
  376. * maybe "devs_in_group - ios->sp2d[p].write_count" is a good enough
  377. * approximation? In this mode the read pages are put in the empty places of
  378. * ios->sp2d[p][*], xor is calculated the same way. These pages are
  379. * allocated/freed and don't go through cache
  380. */
  381. static int _read_4_write_first_stripe(struct ore_io_state *ios)
  382. {
  383. struct ore_striping_info read_si;
  384. struct __stripe_pages_2d *sp2d = ios->sp2d;
  385. u64 offset = ios->si.first_stripe_start;
  386. unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
  387. if (offset == ios->offset) /* Go to start collect $200 */
  388. goto read_last_stripe;
  389. min_p = _sp2d_min_pg(sp2d);
  390. max_p = _sp2d_max_pg(sp2d);
  391. ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
  392. offset, ios->offset, min_p, max_p);
  393. for (c = 0; ; c++) {
  394. ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
  395. read_si.obj_offset += min_p * PAGE_SIZE;
  396. offset += min_p * PAGE_SIZE;
  397. for (p = min_p; p <= max_p; p++) {
  398. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  399. struct page **pp = &_1ps->pages[c];
  400. bool uptodate;
  401. if (*pp) {
  402. if (ios->offset % PAGE_SIZE)
  403. /* Read the remainder of the page */
  404. _add_to_r4w_first_page(ios, *pp);
  405. /* to-be-written pages start here */
  406. goto read_last_stripe;
  407. }
  408. *pp = ios->r4w->get_page(ios->private, offset,
  409. &uptodate);
  410. if (unlikely(!*pp))
  411. return -ENOMEM;
  412. if (!uptodate)
  413. _add_to_r4w(ios, &read_si, *pp, PAGE_SIZE);
  414. /* Mark read-pages to be cache_released */
  415. _1ps->page_is_read[c] = true;
  416. read_si.obj_offset += PAGE_SIZE;
  417. offset += PAGE_SIZE;
  418. }
  419. offset += (sp2d->pages_in_unit - p) * PAGE_SIZE;
  420. }
  421. read_last_stripe:
  422. return 0;
  423. }
  424. static int _read_4_write_last_stripe(struct ore_io_state *ios)
  425. {
  426. struct ore_striping_info read_si;
  427. struct __stripe_pages_2d *sp2d = ios->sp2d;
  428. u64 offset;
  429. u64 last_stripe_end;
  430. unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
  431. unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
  432. offset = ios->offset + ios->length;
  433. if (offset % PAGE_SIZE)
  434. _add_to_r4w_last_page(ios, &offset);
  435. /* offset will be aligned to next page */
  436. last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe)
  437. * bytes_in_stripe;
  438. if (offset == last_stripe_end) /* Optimize for the aligned case */
  439. goto read_it;
  440. ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
  441. p = read_si.unit_off / PAGE_SIZE;
  442. c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
  443. ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
  444. if (min_p == sp2d->pages_in_unit) {
  445. /* Didn't do it yet */
  446. min_p = _sp2d_min_pg(sp2d);
  447. max_p = _sp2d_max_pg(sp2d);
  448. }
  449. ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
  450. offset, last_stripe_end, min_p, max_p);
  451. while (offset < last_stripe_end) {
  452. struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
  453. if ((min_p <= p) && (p <= max_p)) {
  454. struct page *page;
  455. bool uptodate;
  456. BUG_ON(_1ps->pages[c]);
  457. page = ios->r4w->get_page(ios->private, offset,
  458. &uptodate);
  459. if (unlikely(!page))
  460. return -ENOMEM;
  461. _1ps->pages[c] = page;
  462. /* Mark read-pages to be cache_released */
  463. _1ps->page_is_read[c] = true;
  464. if (!uptodate)
  465. _add_to_r4w(ios, &read_si, page, PAGE_SIZE);
  466. }
  467. offset += PAGE_SIZE;
  468. if (p == (sp2d->pages_in_unit - 1)) {
  469. ++c;
  470. p = 0;
  471. ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
  472. } else {
  473. read_si.obj_offset += PAGE_SIZE;
  474. ++p;
  475. }
  476. }
  477. read_it:
  478. return 0;
  479. }
  480. static int _read_4_write_execute(struct ore_io_state *ios)
  481. {
  482. struct ore_io_state *ios_read;
  483. unsigned i;
  484. int ret;
  485. ios_read = ios->ios_read_4_write;
  486. if (!ios_read)
  487. return 0;
  488. /* FIXME: Ugly to signal _sbi_read_mirror that we have bio(s). Change
  489. * to check for per_dev->bio
  490. */
  491. ios_read->pages = ios->pages;
  492. /* Now read these devices */
  493. for (i = 0; i < ios_read->numdevs; i += ios_read->layout->mirrors_p1) {
  494. ret = _ore_read_mirror(ios_read, i);
  495. if (unlikely(ret))
  496. return ret;
  497. }
  498. ret = ore_io_execute(ios_read); /* Synchronus execution */
  499. if (unlikely(ret)) {
  500. ORE_DBGMSG("!! ore_io_execute => %d\n", ret);
  501. return ret;
  502. }
  503. _mark_read4write_pages_uptodate(ios_read, ret);
  504. ore_put_io_state(ios_read);
  505. ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
  506. return 0;
  507. }
  508. /* In writes @cur_len means length left. .i.e cur_len==0 is the last parity U */
  509. int _ore_add_parity_unit(struct ore_io_state *ios,
  510. struct ore_striping_info *si,
  511. struct ore_per_dev_state *per_dev,
  512. unsigned cur_len)
  513. {
  514. if (ios->reading) {
  515. if (per_dev->cur_sg >= ios->sgs_per_dev) {
  516. ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" ,
  517. per_dev->cur_sg, ios->sgs_per_dev);
  518. return -ENOMEM;
  519. }
  520. _ore_add_sg_seg(per_dev, cur_len, true);
  521. } else {
  522. struct __stripe_pages_2d *sp2d = ios->sp2d;
  523. struct page **pages = ios->parity_pages + ios->cur_par_page;
  524. unsigned num_pages;
  525. unsigned array_start = 0;
  526. unsigned i;
  527. int ret;
  528. si->cur_pg = _sp2d_min_pg(sp2d);
  529. num_pages = _sp2d_max_pg(sp2d) + 1 - si->cur_pg;
  530. if (!cur_len) /* If last stripe operate on parity comp */
  531. si->cur_comp = sp2d->data_devs;
  532. if (!per_dev->length) {
  533. per_dev->offset += si->cur_pg * PAGE_SIZE;
  534. /* If first stripe, Read in all read4write pages
  535. * (if needed) before we calculate the first parity.
  536. */
  537. _read_4_write_first_stripe(ios);
  538. }
  539. if (!cur_len) /* If last stripe r4w pages of last stripe */
  540. _read_4_write_last_stripe(ios);
  541. _read_4_write_execute(ios);
  542. for (i = 0; i < num_pages; i++) {
  543. pages[i] = _raid_page_alloc();
  544. if (unlikely(!pages[i]))
  545. return -ENOMEM;
  546. ++(ios->cur_par_page);
  547. }
  548. BUG_ON(si->cur_comp != sp2d->data_devs);
  549. BUG_ON(si->cur_pg + num_pages > sp2d->pages_in_unit);
  550. ret = _ore_add_stripe_unit(ios, &array_start, 0, pages,
  551. per_dev, num_pages * PAGE_SIZE);
  552. if (unlikely(ret))
  553. return ret;
  554. /* TODO: raid6 if (last_parity_dev) */
  555. _gen_xor_unit(sp2d);
  556. _sp2d_reset(sp2d, ios->r4w, ios->private);
  557. }
  558. return 0;
  559. }
  560. int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
  561. {
  562. if (ios->parity_pages) {
  563. struct ore_layout *layout = ios->layout;
  564. unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
  565. if (_sp2d_alloc(pages_in_unit, layout->group_width,
  566. layout->parity, &ios->sp2d)) {
  567. return -ENOMEM;
  568. }
  569. }
  570. return 0;
  571. }
  572. void _ore_free_raid_stuff(struct ore_io_state *ios)
  573. {
  574. if (ios->sp2d) { /* writing and raid */
  575. unsigned i;
  576. for (i = 0; i < ios->cur_par_page; i++) {
  577. struct page *page = ios->parity_pages[i];
  578. if (page)
  579. _raid_page_free(page);
  580. }
  581. if (ios->extra_part_alloc)
  582. kfree(ios->parity_pages);
  583. /* If IO returned an error pages might need unlocking */
  584. _sp2d_reset(ios->sp2d, ios->r4w, ios->private);
  585. _sp2d_free(ios->sp2d);
  586. } else {
  587. /* Will only be set if raid reading && sglist is big */
  588. if (ios->extra_part_alloc)
  589. kfree(ios->per_dev[0].sglist);
  590. }
  591. if (ios->ios_read_4_write)
  592. ore_put_io_state(ios->ios_read_4_write);
  593. }