raid56.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703
  1. /*
  2. * Copyright (C) 2012 Fusion-io All rights reserved.
  3. * Copyright (C) 2012 Intel Corp. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public
  7. * License v2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public
  15. * License along with this program; if not, write to the
  16. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17. * Boston, MA 021110-1307, USA.
  18. */
  19. #include <linux/sched.h>
  20. #include <linux/wait.h>
  21. #include <linux/bio.h>
  22. #include <linux/slab.h>
  23. #include <linux/buffer_head.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/random.h>
  26. #include <linux/iocontext.h>
  27. #include <linux/capability.h>
  28. #include <linux/ratelimit.h>
  29. #include <linux/kthread.h>
  30. #include <linux/raid/pq.h>
  31. #include <linux/hash.h>
  32. #include <linux/list_sort.h>
  33. #include <linux/raid/xor.h>
  34. #include <linux/vmalloc.h>
  35. #include <asm/div64.h>
  36. #include "ctree.h"
  37. #include "extent_map.h"
  38. #include "disk-io.h"
  39. #include "transaction.h"
  40. #include "print-tree.h"
  41. #include "volumes.h"
  42. #include "raid56.h"
  43. #include "async-thread.h"
  44. #include "check-integrity.h"
  45. #include "rcu-string.h"
  46. /* set when additional merges to this rbio are not allowed */
  47. #define RBIO_RMW_LOCKED_BIT 1
  48. /*
  49. * set when this rbio is sitting in the hash, but it is just a cache
  50. * of past RMW
  51. */
  52. #define RBIO_CACHE_BIT 2
  53. /*
  54. * set when it is safe to trust the stripe_pages for caching
  55. */
  56. #define RBIO_CACHE_READY_BIT 3
  57. #define RBIO_CACHE_SIZE 1024
  58. enum btrfs_rbio_ops {
  59. BTRFS_RBIO_WRITE,
  60. BTRFS_RBIO_READ_REBUILD,
  61. BTRFS_RBIO_PARITY_SCRUB,
  62. BTRFS_RBIO_REBUILD_MISSING,
  63. };
  64. struct btrfs_raid_bio {
  65. struct btrfs_fs_info *fs_info;
  66. struct btrfs_bio *bbio;
  67. /* while we're doing rmw on a stripe
  68. * we put it into a hash table so we can
  69. * lock the stripe and merge more rbios
  70. * into it.
  71. */
  72. struct list_head hash_list;
  73. /*
  74. * LRU list for the stripe cache
  75. */
  76. struct list_head stripe_cache;
  77. /*
  78. * for scheduling work in the helper threads
  79. */
  80. struct btrfs_work work;
  81. /*
  82. * bio list and bio_list_lock are used
  83. * to add more bios into the stripe
  84. * in hopes of avoiding the full rmw
  85. */
  86. struct bio_list bio_list;
  87. spinlock_t bio_list_lock;
  88. /* also protected by the bio_list_lock, the
  89. * plug list is used by the plugging code
  90. * to collect partial bios while plugged. The
  91. * stripe locking code also uses it to hand off
  92. * the stripe lock to the next pending IO
  93. */
  94. struct list_head plug_list;
  95. /*
  96. * flags that tell us if it is safe to
  97. * merge with this bio
  98. */
  99. unsigned long flags;
  100. /* size of each individual stripe on disk */
  101. int stripe_len;
  102. /* number of data stripes (no p/q) */
  103. int nr_data;
  104. int real_stripes;
  105. int stripe_npages;
  106. /*
  107. * set if we're doing a parity rebuild
  108. * for a read from higher up, which is handled
  109. * differently from a parity rebuild as part of
  110. * rmw
  111. */
  112. enum btrfs_rbio_ops operation;
  113. /* first bad stripe */
  114. int faila;
  115. /* second bad stripe (for raid6 use) */
  116. int failb;
  117. int scrubp;
  118. /*
  119. * number of pages needed to represent the full
  120. * stripe
  121. */
  122. int nr_pages;
  123. /*
  124. * size of all the bios in the bio_list. This
  125. * helps us decide if the rbio maps to a full
  126. * stripe or not
  127. */
  128. int bio_list_bytes;
  129. int generic_bio_cnt;
  130. atomic_t refs;
  131. atomic_t stripes_pending;
  132. atomic_t error;
  133. /*
  134. * these are two arrays of pointers. We allocate the
  135. * rbio big enough to hold them both and setup their
  136. * locations when the rbio is allocated
  137. */
  138. /* pointers to pages that we allocated for
  139. * reading/writing stripes directly from the disk (including P/Q)
  140. */
  141. struct page **stripe_pages;
  142. /*
  143. * pointers to the pages in the bio_list. Stored
  144. * here for faster lookup
  145. */
  146. struct page **bio_pages;
  147. /*
  148. * bitmap to record which horizontal stripe has data
  149. */
  150. unsigned long *dbitmap;
  151. };
  152. static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
  153. static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
  154. static void rmw_work(struct btrfs_work *work);
  155. static void read_rebuild_work(struct btrfs_work *work);
  156. static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
  157. static void async_read_rebuild(struct btrfs_raid_bio *rbio);
  158. static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
  159. static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
  160. static void __free_raid_bio(struct btrfs_raid_bio *rbio);
  161. static void index_rbio_pages(struct btrfs_raid_bio *rbio);
  162. static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
  163. static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
  164. int need_check);
  165. static void async_scrub_parity(struct btrfs_raid_bio *rbio);
  166. /*
  167. * the stripe hash table is used for locking, and to collect
  168. * bios in hopes of making a full stripe
  169. */
  170. int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
  171. {
  172. struct btrfs_stripe_hash_table *table;
  173. struct btrfs_stripe_hash_table *x;
  174. struct btrfs_stripe_hash *cur;
  175. struct btrfs_stripe_hash *h;
  176. int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
  177. int i;
  178. int table_size;
  179. if (info->stripe_hash_table)
  180. return 0;
  181. /*
  182. * The table is large, starting with order 4 and can go as high as
  183. * order 7 in case lock debugging is turned on.
  184. *
  185. * Try harder to allocate and fallback to vmalloc to lower the chance
  186. * of a failing mount.
  187. */
  188. table_size = sizeof(*table) + sizeof(*h) * num_entries;
  189. table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
  190. if (!table) {
  191. table = vzalloc(table_size);
  192. if (!table)
  193. return -ENOMEM;
  194. }
  195. spin_lock_init(&table->cache_lock);
  196. INIT_LIST_HEAD(&table->stripe_cache);
  197. h = table->table;
  198. for (i = 0; i < num_entries; i++) {
  199. cur = h + i;
  200. INIT_LIST_HEAD(&cur->hash_list);
  201. spin_lock_init(&cur->lock);
  202. init_waitqueue_head(&cur->wait);
  203. }
  204. x = cmpxchg(&info->stripe_hash_table, NULL, table);
  205. if (x)
  206. kvfree(x);
  207. return 0;
  208. }
  209. /*
  210. * caching an rbio means to copy anything from the
  211. * bio_pages array into the stripe_pages array. We
  212. * use the page uptodate bit in the stripe cache array
  213. * to indicate if it has valid data
  214. *
  215. * once the caching is done, we set the cache ready
  216. * bit.
  217. */
  218. static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
  219. {
  220. int i;
  221. char *s;
  222. char *d;
  223. int ret;
  224. ret = alloc_rbio_pages(rbio);
  225. if (ret)
  226. return;
  227. for (i = 0; i < rbio->nr_pages; i++) {
  228. if (!rbio->bio_pages[i])
  229. continue;
  230. s = kmap(rbio->bio_pages[i]);
  231. d = kmap(rbio->stripe_pages[i]);
  232. memcpy(d, s, PAGE_CACHE_SIZE);
  233. kunmap(rbio->bio_pages[i]);
  234. kunmap(rbio->stripe_pages[i]);
  235. SetPageUptodate(rbio->stripe_pages[i]);
  236. }
  237. set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  238. }
  239. /*
  240. * we hash on the first logical address of the stripe
  241. */
  242. static int rbio_bucket(struct btrfs_raid_bio *rbio)
  243. {
  244. u64 num = rbio->bbio->raid_map[0];
  245. /*
  246. * we shift down quite a bit. We're using byte
  247. * addressing, and most of the lower bits are zeros.
  248. * This tends to upset hash_64, and it consistently
  249. * returns just one or two different values.
  250. *
  251. * shifting off the lower bits fixes things.
  252. */
  253. return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
  254. }
  255. /*
  256. * stealing an rbio means taking all the uptodate pages from the stripe
  257. * array in the source rbio and putting them into the destination rbio
  258. */
  259. static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
  260. {
  261. int i;
  262. struct page *s;
  263. struct page *d;
  264. if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
  265. return;
  266. for (i = 0; i < dest->nr_pages; i++) {
  267. s = src->stripe_pages[i];
  268. if (!s || !PageUptodate(s)) {
  269. continue;
  270. }
  271. d = dest->stripe_pages[i];
  272. if (d)
  273. __free_page(d);
  274. dest->stripe_pages[i] = s;
  275. src->stripe_pages[i] = NULL;
  276. }
  277. }
  278. /*
  279. * merging means we take the bio_list from the victim and
  280. * splice it into the destination. The victim should
  281. * be discarded afterwards.
  282. *
  283. * must be called with dest->rbio_list_lock held
  284. */
  285. static void merge_rbio(struct btrfs_raid_bio *dest,
  286. struct btrfs_raid_bio *victim)
  287. {
  288. bio_list_merge(&dest->bio_list, &victim->bio_list);
  289. dest->bio_list_bytes += victim->bio_list_bytes;
  290. dest->generic_bio_cnt += victim->generic_bio_cnt;
  291. bio_list_init(&victim->bio_list);
  292. }
  293. /*
  294. * used to prune items that are in the cache. The caller
  295. * must hold the hash table lock.
  296. */
  297. static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
  298. {
  299. int bucket = rbio_bucket(rbio);
  300. struct btrfs_stripe_hash_table *table;
  301. struct btrfs_stripe_hash *h;
  302. int freeit = 0;
  303. /*
  304. * check the bit again under the hash table lock.
  305. */
  306. if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
  307. return;
  308. table = rbio->fs_info->stripe_hash_table;
  309. h = table->table + bucket;
  310. /* hold the lock for the bucket because we may be
  311. * removing it from the hash table
  312. */
  313. spin_lock(&h->lock);
  314. /*
  315. * hold the lock for the bio list because we need
  316. * to make sure the bio list is empty
  317. */
  318. spin_lock(&rbio->bio_list_lock);
  319. if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
  320. list_del_init(&rbio->stripe_cache);
  321. table->cache_size -= 1;
  322. freeit = 1;
  323. /* if the bio list isn't empty, this rbio is
  324. * still involved in an IO. We take it out
  325. * of the cache list, and drop the ref that
  326. * was held for the list.
  327. *
  328. * If the bio_list was empty, we also remove
  329. * the rbio from the hash_table, and drop
  330. * the corresponding ref
  331. */
  332. if (bio_list_empty(&rbio->bio_list)) {
  333. if (!list_empty(&rbio->hash_list)) {
  334. list_del_init(&rbio->hash_list);
  335. atomic_dec(&rbio->refs);
  336. BUG_ON(!list_empty(&rbio->plug_list));
  337. }
  338. }
  339. }
  340. spin_unlock(&rbio->bio_list_lock);
  341. spin_unlock(&h->lock);
  342. if (freeit)
  343. __free_raid_bio(rbio);
  344. }
  345. /*
  346. * prune a given rbio from the cache
  347. */
  348. static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
  349. {
  350. struct btrfs_stripe_hash_table *table;
  351. unsigned long flags;
  352. if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
  353. return;
  354. table = rbio->fs_info->stripe_hash_table;
  355. spin_lock_irqsave(&table->cache_lock, flags);
  356. __remove_rbio_from_cache(rbio);
  357. spin_unlock_irqrestore(&table->cache_lock, flags);
  358. }
  359. /*
  360. * remove everything in the cache
  361. */
  362. static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
  363. {
  364. struct btrfs_stripe_hash_table *table;
  365. unsigned long flags;
  366. struct btrfs_raid_bio *rbio;
  367. table = info->stripe_hash_table;
  368. spin_lock_irqsave(&table->cache_lock, flags);
  369. while (!list_empty(&table->stripe_cache)) {
  370. rbio = list_entry(table->stripe_cache.next,
  371. struct btrfs_raid_bio,
  372. stripe_cache);
  373. __remove_rbio_from_cache(rbio);
  374. }
  375. spin_unlock_irqrestore(&table->cache_lock, flags);
  376. }
  377. /*
  378. * remove all cached entries and free the hash table
  379. * used by unmount
  380. */
  381. void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
  382. {
  383. if (!info->stripe_hash_table)
  384. return;
  385. btrfs_clear_rbio_cache(info);
  386. kvfree(info->stripe_hash_table);
  387. info->stripe_hash_table = NULL;
  388. }
  389. /*
  390. * insert an rbio into the stripe cache. It
  391. * must have already been prepared by calling
  392. * cache_rbio_pages
  393. *
  394. * If this rbio was already cached, it gets
  395. * moved to the front of the lru.
  396. *
  397. * If the size of the rbio cache is too big, we
  398. * prune an item.
  399. */
  400. static void cache_rbio(struct btrfs_raid_bio *rbio)
  401. {
  402. struct btrfs_stripe_hash_table *table;
  403. unsigned long flags;
  404. if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
  405. return;
  406. table = rbio->fs_info->stripe_hash_table;
  407. spin_lock_irqsave(&table->cache_lock, flags);
  408. spin_lock(&rbio->bio_list_lock);
  409. /* bump our ref if we were not in the list before */
  410. if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
  411. atomic_inc(&rbio->refs);
  412. if (!list_empty(&rbio->stripe_cache)){
  413. list_move(&rbio->stripe_cache, &table->stripe_cache);
  414. } else {
  415. list_add(&rbio->stripe_cache, &table->stripe_cache);
  416. table->cache_size += 1;
  417. }
  418. spin_unlock(&rbio->bio_list_lock);
  419. if (table->cache_size > RBIO_CACHE_SIZE) {
  420. struct btrfs_raid_bio *found;
  421. found = list_entry(table->stripe_cache.prev,
  422. struct btrfs_raid_bio,
  423. stripe_cache);
  424. if (found != rbio)
  425. __remove_rbio_from_cache(found);
  426. }
  427. spin_unlock_irqrestore(&table->cache_lock, flags);
  428. }
  429. /*
  430. * helper function to run the xor_blocks api. It is only
  431. * able to do MAX_XOR_BLOCKS at a time, so we need to
  432. * loop through.
  433. */
  434. static void run_xor(void **pages, int src_cnt, ssize_t len)
  435. {
  436. int src_off = 0;
  437. int xor_src_cnt = 0;
  438. void *dest = pages[src_cnt];
  439. while(src_cnt > 0) {
  440. xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
  441. xor_blocks(xor_src_cnt, len, dest, pages + src_off);
  442. src_cnt -= xor_src_cnt;
  443. src_off += xor_src_cnt;
  444. }
  445. }
  446. /*
  447. * returns true if the bio list inside this rbio
  448. * covers an entire stripe (no rmw required).
  449. * Must be called with the bio list lock held, or
  450. * at a time when you know it is impossible to add
  451. * new bios into the list
  452. */
  453. static int __rbio_is_full(struct btrfs_raid_bio *rbio)
  454. {
  455. unsigned long size = rbio->bio_list_bytes;
  456. int ret = 1;
  457. if (size != rbio->nr_data * rbio->stripe_len)
  458. ret = 0;
  459. BUG_ON(size > rbio->nr_data * rbio->stripe_len);
  460. return ret;
  461. }
  462. static int rbio_is_full(struct btrfs_raid_bio *rbio)
  463. {
  464. unsigned long flags;
  465. int ret;
  466. spin_lock_irqsave(&rbio->bio_list_lock, flags);
  467. ret = __rbio_is_full(rbio);
  468. spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
  469. return ret;
  470. }
  471. /*
  472. * returns 1 if it is safe to merge two rbios together.
  473. * The merging is safe if the two rbios correspond to
  474. * the same stripe and if they are both going in the same
  475. * direction (read vs write), and if neither one is
  476. * locked for final IO
  477. *
  478. * The caller is responsible for locking such that
  479. * rmw_locked is safe to test
  480. */
  481. static int rbio_can_merge(struct btrfs_raid_bio *last,
  482. struct btrfs_raid_bio *cur)
  483. {
  484. if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
  485. test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
  486. return 0;
  487. /*
  488. * we can't merge with cached rbios, since the
  489. * idea is that when we merge the destination
  490. * rbio is going to run our IO for us. We can
  491. * steal from cached rbio's though, other functions
  492. * handle that.
  493. */
  494. if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
  495. test_bit(RBIO_CACHE_BIT, &cur->flags))
  496. return 0;
  497. if (last->bbio->raid_map[0] !=
  498. cur->bbio->raid_map[0])
  499. return 0;
  500. /* we can't merge with different operations */
  501. if (last->operation != cur->operation)
  502. return 0;
  503. /*
  504. * We've need read the full stripe from the drive.
  505. * check and repair the parity and write the new results.
  506. *
  507. * We're not allowed to add any new bios to the
  508. * bio list here, anyone else that wants to
  509. * change this stripe needs to do their own rmw.
  510. */
  511. if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
  512. cur->operation == BTRFS_RBIO_PARITY_SCRUB)
  513. return 0;
  514. if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
  515. cur->operation == BTRFS_RBIO_REBUILD_MISSING)
  516. return 0;
  517. return 1;
  518. }
  519. static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
  520. int index)
  521. {
  522. return stripe * rbio->stripe_npages + index;
  523. }
  524. /*
  525. * these are just the pages from the rbio array, not from anything
  526. * the FS sent down to us
  527. */
  528. static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
  529. int index)
  530. {
  531. return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
  532. }
  533. /*
  534. * helper to index into the pstripe
  535. */
  536. static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
  537. {
  538. return rbio_stripe_page(rbio, rbio->nr_data, index);
  539. }
  540. /*
  541. * helper to index into the qstripe, returns null
  542. * if there is no qstripe
  543. */
  544. static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
  545. {
  546. if (rbio->nr_data + 1 == rbio->real_stripes)
  547. return NULL;
  548. return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
  549. }
  550. /*
  551. * The first stripe in the table for a logical address
  552. * has the lock. rbios are added in one of three ways:
  553. *
  554. * 1) Nobody has the stripe locked yet. The rbio is given
  555. * the lock and 0 is returned. The caller must start the IO
  556. * themselves.
  557. *
  558. * 2) Someone has the stripe locked, but we're able to merge
  559. * with the lock owner. The rbio is freed and the IO will
  560. * start automatically along with the existing rbio. 1 is returned.
  561. *
  562. * 3) Someone has the stripe locked, but we're not able to merge.
  563. * The rbio is added to the lock owner's plug list, or merged into
  564. * an rbio already on the plug list. When the lock owner unlocks,
  565. * the next rbio on the list is run and the IO is started automatically.
  566. * 1 is returned
  567. *
  568. * If we return 0, the caller still owns the rbio and must continue with
  569. * IO submission. If we return 1, the caller must assume the rbio has
  570. * already been freed.
  571. */
  572. static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
  573. {
  574. int bucket = rbio_bucket(rbio);
  575. struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
  576. struct btrfs_raid_bio *cur;
  577. struct btrfs_raid_bio *pending;
  578. unsigned long flags;
  579. DEFINE_WAIT(wait);
  580. struct btrfs_raid_bio *freeit = NULL;
  581. struct btrfs_raid_bio *cache_drop = NULL;
  582. int ret = 0;
  583. int walk = 0;
  584. spin_lock_irqsave(&h->lock, flags);
  585. list_for_each_entry(cur, &h->hash_list, hash_list) {
  586. walk++;
  587. if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
  588. spin_lock(&cur->bio_list_lock);
  589. /* can we steal this cached rbio's pages? */
  590. if (bio_list_empty(&cur->bio_list) &&
  591. list_empty(&cur->plug_list) &&
  592. test_bit(RBIO_CACHE_BIT, &cur->flags) &&
  593. !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
  594. list_del_init(&cur->hash_list);
  595. atomic_dec(&cur->refs);
  596. steal_rbio(cur, rbio);
  597. cache_drop = cur;
  598. spin_unlock(&cur->bio_list_lock);
  599. goto lockit;
  600. }
  601. /* can we merge into the lock owner? */
  602. if (rbio_can_merge(cur, rbio)) {
  603. merge_rbio(cur, rbio);
  604. spin_unlock(&cur->bio_list_lock);
  605. freeit = rbio;
  606. ret = 1;
  607. goto out;
  608. }
  609. /*
  610. * we couldn't merge with the running
  611. * rbio, see if we can merge with the
  612. * pending ones. We don't have to
  613. * check for rmw_locked because there
  614. * is no way they are inside finish_rmw
  615. * right now
  616. */
  617. list_for_each_entry(pending, &cur->plug_list,
  618. plug_list) {
  619. if (rbio_can_merge(pending, rbio)) {
  620. merge_rbio(pending, rbio);
  621. spin_unlock(&cur->bio_list_lock);
  622. freeit = rbio;
  623. ret = 1;
  624. goto out;
  625. }
  626. }
  627. /* no merging, put us on the tail of the plug list,
  628. * our rbio will be started with the currently
  629. * running rbio unlocks
  630. */
  631. list_add_tail(&rbio->plug_list, &cur->plug_list);
  632. spin_unlock(&cur->bio_list_lock);
  633. ret = 1;
  634. goto out;
  635. }
  636. }
  637. lockit:
  638. atomic_inc(&rbio->refs);
  639. list_add(&rbio->hash_list, &h->hash_list);
  640. out:
  641. spin_unlock_irqrestore(&h->lock, flags);
  642. if (cache_drop)
  643. remove_rbio_from_cache(cache_drop);
  644. if (freeit)
  645. __free_raid_bio(freeit);
  646. return ret;
  647. }
  648. /*
  649. * called as rmw or parity rebuild is completed. If the plug list has more
  650. * rbios waiting for this stripe, the next one on the list will be started
  651. */
  652. static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
  653. {
  654. int bucket;
  655. struct btrfs_stripe_hash *h;
  656. unsigned long flags;
  657. int keep_cache = 0;
  658. bucket = rbio_bucket(rbio);
  659. h = rbio->fs_info->stripe_hash_table->table + bucket;
  660. if (list_empty(&rbio->plug_list))
  661. cache_rbio(rbio);
  662. spin_lock_irqsave(&h->lock, flags);
  663. spin_lock(&rbio->bio_list_lock);
  664. if (!list_empty(&rbio->hash_list)) {
  665. /*
  666. * if we're still cached and there is no other IO
  667. * to perform, just leave this rbio here for others
  668. * to steal from later
  669. */
  670. if (list_empty(&rbio->plug_list) &&
  671. test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
  672. keep_cache = 1;
  673. clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
  674. BUG_ON(!bio_list_empty(&rbio->bio_list));
  675. goto done;
  676. }
  677. list_del_init(&rbio->hash_list);
  678. atomic_dec(&rbio->refs);
  679. /*
  680. * we use the plug list to hold all the rbios
  681. * waiting for the chance to lock this stripe.
  682. * hand the lock over to one of them.
  683. */
  684. if (!list_empty(&rbio->plug_list)) {
  685. struct btrfs_raid_bio *next;
  686. struct list_head *head = rbio->plug_list.next;
  687. next = list_entry(head, struct btrfs_raid_bio,
  688. plug_list);
  689. list_del_init(&rbio->plug_list);
  690. list_add(&next->hash_list, &h->hash_list);
  691. atomic_inc(&next->refs);
  692. spin_unlock(&rbio->bio_list_lock);
  693. spin_unlock_irqrestore(&h->lock, flags);
  694. if (next->operation == BTRFS_RBIO_READ_REBUILD)
  695. async_read_rebuild(next);
  696. else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
  697. steal_rbio(rbio, next);
  698. async_read_rebuild(next);
  699. } else if (next->operation == BTRFS_RBIO_WRITE) {
  700. steal_rbio(rbio, next);
  701. async_rmw_stripe(next);
  702. } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
  703. steal_rbio(rbio, next);
  704. async_scrub_parity(next);
  705. }
  706. goto done_nolock;
  707. /*
  708. * The barrier for this waitqueue_active is not needed,
  709. * we're protected by h->lock and can't miss a wakeup.
  710. */
  711. } else if (waitqueue_active(&h->wait)) {
  712. spin_unlock(&rbio->bio_list_lock);
  713. spin_unlock_irqrestore(&h->lock, flags);
  714. wake_up(&h->wait);
  715. goto done_nolock;
  716. }
  717. }
  718. done:
  719. spin_unlock(&rbio->bio_list_lock);
  720. spin_unlock_irqrestore(&h->lock, flags);
  721. done_nolock:
  722. if (!keep_cache)
  723. remove_rbio_from_cache(rbio);
  724. }
  725. static void __free_raid_bio(struct btrfs_raid_bio *rbio)
  726. {
  727. int i;
  728. WARN_ON(atomic_read(&rbio->refs) < 0);
  729. if (!atomic_dec_and_test(&rbio->refs))
  730. return;
  731. WARN_ON(!list_empty(&rbio->stripe_cache));
  732. WARN_ON(!list_empty(&rbio->hash_list));
  733. WARN_ON(!bio_list_empty(&rbio->bio_list));
  734. for (i = 0; i < rbio->nr_pages; i++) {
  735. if (rbio->stripe_pages[i]) {
  736. __free_page(rbio->stripe_pages[i]);
  737. rbio->stripe_pages[i] = NULL;
  738. }
  739. }
  740. btrfs_put_bbio(rbio->bbio);
  741. kfree(rbio);
  742. }
  743. static void free_raid_bio(struct btrfs_raid_bio *rbio)
  744. {
  745. unlock_stripe(rbio);
  746. __free_raid_bio(rbio);
  747. }
  748. /*
  749. * this frees the rbio and runs through all the bios in the
  750. * bio_list and calls end_io on them
  751. */
  752. static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
  753. {
  754. struct bio *cur = bio_list_get(&rbio->bio_list);
  755. struct bio *next;
  756. if (rbio->generic_bio_cnt)
  757. btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
  758. free_raid_bio(rbio);
  759. while (cur) {
  760. next = cur->bi_next;
  761. cur->bi_next = NULL;
  762. cur->bi_error = err;
  763. bio_endio(cur);
  764. cur = next;
  765. }
  766. }
  767. /*
  768. * end io function used by finish_rmw. When we finally
  769. * get here, we've written a full stripe
  770. */
  771. static void raid_write_end_io(struct bio *bio)
  772. {
  773. struct btrfs_raid_bio *rbio = bio->bi_private;
  774. int err = bio->bi_error;
  775. int max_errors;
  776. if (err)
  777. fail_bio_stripe(rbio, bio);
  778. bio_put(bio);
  779. if (!atomic_dec_and_test(&rbio->stripes_pending))
  780. return;
  781. err = 0;
  782. /* OK, we have read all the stripes we need to. */
  783. max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
  784. 0 : rbio->bbio->max_errors;
  785. if (atomic_read(&rbio->error) > max_errors)
  786. err = -EIO;
  787. rbio_orig_end_io(rbio, err);
  788. }
  789. /*
  790. * the read/modify/write code wants to use the original bio for
  791. * any pages it included, and then use the rbio for everything
  792. * else. This function decides if a given index (stripe number)
  793. * and page number in that stripe fall inside the original bio
  794. * or the rbio.
  795. *
  796. * if you set bio_list_only, you'll get a NULL back for any ranges
  797. * that are outside the bio_list
  798. *
  799. * This doesn't take any refs on anything, you get a bare page pointer
  800. * and the caller must bump refs as required.
  801. *
  802. * You must call index_rbio_pages once before you can trust
  803. * the answers from this function.
  804. */
  805. static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
  806. int index, int pagenr, int bio_list_only)
  807. {
  808. int chunk_page;
  809. struct page *p = NULL;
  810. chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
  811. spin_lock_irq(&rbio->bio_list_lock);
  812. p = rbio->bio_pages[chunk_page];
  813. spin_unlock_irq(&rbio->bio_list_lock);
  814. if (p || bio_list_only)
  815. return p;
  816. return rbio->stripe_pages[chunk_page];
  817. }
  818. /*
  819. * number of pages we need for the entire stripe across all the
  820. * drives
  821. */
  822. static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
  823. {
  824. return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes;
  825. }
  826. /*
  827. * allocation and initial setup for the btrfs_raid_bio. Not
  828. * this does not allocate any pages for rbio->pages.
  829. */
  830. static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
  831. struct btrfs_bio *bbio, u64 stripe_len)
  832. {
  833. struct btrfs_raid_bio *rbio;
  834. int nr_data = 0;
  835. int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
  836. int num_pages = rbio_nr_pages(stripe_len, real_stripes);
  837. int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
  838. void *p;
  839. rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
  840. DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
  841. sizeof(long), GFP_NOFS);
  842. if (!rbio)
  843. return ERR_PTR(-ENOMEM);
  844. bio_list_init(&rbio->bio_list);
  845. INIT_LIST_HEAD(&rbio->plug_list);
  846. spin_lock_init(&rbio->bio_list_lock);
  847. INIT_LIST_HEAD(&rbio->stripe_cache);
  848. INIT_LIST_HEAD(&rbio->hash_list);
  849. rbio->bbio = bbio;
  850. rbio->fs_info = root->fs_info;
  851. rbio->stripe_len = stripe_len;
  852. rbio->nr_pages = num_pages;
  853. rbio->real_stripes = real_stripes;
  854. rbio->stripe_npages = stripe_npages;
  855. rbio->faila = -1;
  856. rbio->failb = -1;
  857. atomic_set(&rbio->refs, 1);
  858. atomic_set(&rbio->error, 0);
  859. atomic_set(&rbio->stripes_pending, 0);
  860. /*
  861. * the stripe_pages and bio_pages array point to the extra
  862. * memory we allocated past the end of the rbio
  863. */
  864. p = rbio + 1;
  865. rbio->stripe_pages = p;
  866. rbio->bio_pages = p + sizeof(struct page *) * num_pages;
  867. rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
  868. if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
  869. nr_data = real_stripes - 1;
  870. else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
  871. nr_data = real_stripes - 2;
  872. else
  873. BUG();
  874. rbio->nr_data = nr_data;
  875. return rbio;
  876. }
  877. /* allocate pages for all the stripes in the bio, including parity */
  878. static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
  879. {
  880. int i;
  881. struct page *page;
  882. for (i = 0; i < rbio->nr_pages; i++) {
  883. if (rbio->stripe_pages[i])
  884. continue;
  885. page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  886. if (!page)
  887. return -ENOMEM;
  888. rbio->stripe_pages[i] = page;
  889. }
  890. return 0;
  891. }
  892. /* only allocate pages for p/q stripes */
  893. static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
  894. {
  895. int i;
  896. struct page *page;
  897. i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
  898. for (; i < rbio->nr_pages; i++) {
  899. if (rbio->stripe_pages[i])
  900. continue;
  901. page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  902. if (!page)
  903. return -ENOMEM;
  904. rbio->stripe_pages[i] = page;
  905. }
  906. return 0;
  907. }
  908. /*
  909. * add a single page from a specific stripe into our list of bios for IO
  910. * this will try to merge into existing bios if possible, and returns
  911. * zero if all went well.
  912. */
  913. static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
  914. struct bio_list *bio_list,
  915. struct page *page,
  916. int stripe_nr,
  917. unsigned long page_index,
  918. unsigned long bio_max_len)
  919. {
  920. struct bio *last = bio_list->tail;
  921. u64 last_end = 0;
  922. int ret;
  923. struct bio *bio;
  924. struct btrfs_bio_stripe *stripe;
  925. u64 disk_start;
  926. stripe = &rbio->bbio->stripes[stripe_nr];
  927. disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
  928. /* if the device is missing, just fail this stripe */
  929. if (!stripe->dev->bdev)
  930. return fail_rbio_index(rbio, stripe_nr);
  931. /* see if we can add this page onto our existing bio */
  932. if (last) {
  933. last_end = (u64)last->bi_iter.bi_sector << 9;
  934. last_end += last->bi_iter.bi_size;
  935. /*
  936. * we can't merge these if they are from different
  937. * devices or if they are not contiguous
  938. */
  939. if (last_end == disk_start && stripe->dev->bdev &&
  940. !last->bi_error &&
  941. last->bi_bdev == stripe->dev->bdev) {
  942. ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
  943. if (ret == PAGE_CACHE_SIZE)
  944. return 0;
  945. }
  946. }
  947. /* put a new bio on the list */
  948. bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
  949. if (!bio)
  950. return -ENOMEM;
  951. bio->bi_iter.bi_size = 0;
  952. bio->bi_bdev = stripe->dev->bdev;
  953. bio->bi_iter.bi_sector = disk_start >> 9;
  954. bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
  955. bio_list_add(bio_list, bio);
  956. return 0;
  957. }
  958. /*
  959. * while we're doing the read/modify/write cycle, we could
  960. * have errors in reading pages off the disk. This checks
  961. * for errors and if we're not able to read the page it'll
  962. * trigger parity reconstruction. The rmw will be finished
  963. * after we've reconstructed the failed stripes
  964. */
  965. static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
  966. {
  967. if (rbio->faila >= 0 || rbio->failb >= 0) {
  968. BUG_ON(rbio->faila == rbio->real_stripes - 1);
  969. __raid56_parity_recover(rbio);
  970. } else {
  971. finish_rmw(rbio);
  972. }
  973. }
  974. /*
  975. * helper function to walk our bio list and populate the bio_pages array with
  976. * the result. This seems expensive, but it is faster than constantly
  977. * searching through the bio list as we setup the IO in finish_rmw or stripe
  978. * reconstruction.
  979. *
  980. * This must be called before you trust the answers from page_in_rbio
  981. */
  982. static void index_rbio_pages(struct btrfs_raid_bio *rbio)
  983. {
  984. struct bio *bio;
  985. u64 start;
  986. unsigned long stripe_offset;
  987. unsigned long page_index;
  988. struct page *p;
  989. int i;
  990. spin_lock_irq(&rbio->bio_list_lock);
  991. bio_list_for_each(bio, &rbio->bio_list) {
  992. start = (u64)bio->bi_iter.bi_sector << 9;
  993. stripe_offset = start - rbio->bbio->raid_map[0];
  994. page_index = stripe_offset >> PAGE_CACHE_SHIFT;
  995. for (i = 0; i < bio->bi_vcnt; i++) {
  996. p = bio->bi_io_vec[i].bv_page;
  997. rbio->bio_pages[page_index + i] = p;
  998. }
  999. }
  1000. spin_unlock_irq(&rbio->bio_list_lock);
  1001. }
  1002. /*
  1003. * this is called from one of two situations. We either
  1004. * have a full stripe from the higher layers, or we've read all
  1005. * the missing bits off disk.
  1006. *
  1007. * This will calculate the parity and then send down any
  1008. * changed blocks.
  1009. */
  1010. static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
  1011. {
  1012. struct btrfs_bio *bbio = rbio->bbio;
  1013. void *pointers[rbio->real_stripes];
  1014. int nr_data = rbio->nr_data;
  1015. int stripe;
  1016. int pagenr;
  1017. int p_stripe = -1;
  1018. int q_stripe = -1;
  1019. struct bio_list bio_list;
  1020. struct bio *bio;
  1021. int ret;
  1022. bio_list_init(&bio_list);
  1023. if (rbio->real_stripes - rbio->nr_data == 1) {
  1024. p_stripe = rbio->real_stripes - 1;
  1025. } else if (rbio->real_stripes - rbio->nr_data == 2) {
  1026. p_stripe = rbio->real_stripes - 2;
  1027. q_stripe = rbio->real_stripes - 1;
  1028. } else {
  1029. BUG();
  1030. }
  1031. /* at this point we either have a full stripe,
  1032. * or we've read the full stripe from the drive.
  1033. * recalculate the parity and write the new results.
  1034. *
  1035. * We're not allowed to add any new bios to the
  1036. * bio list here, anyone else that wants to
  1037. * change this stripe needs to do their own rmw.
  1038. */
  1039. spin_lock_irq(&rbio->bio_list_lock);
  1040. set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
  1041. spin_unlock_irq(&rbio->bio_list_lock);
  1042. atomic_set(&rbio->error, 0);
  1043. /*
  1044. * now that we've set rmw_locked, run through the
  1045. * bio list one last time and map the page pointers
  1046. *
  1047. * We don't cache full rbios because we're assuming
  1048. * the higher layers are unlikely to use this area of
  1049. * the disk again soon. If they do use it again,
  1050. * hopefully they will send another full bio.
  1051. */
  1052. index_rbio_pages(rbio);
  1053. if (!rbio_is_full(rbio))
  1054. cache_rbio_pages(rbio);
  1055. else
  1056. clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  1057. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1058. struct page *p;
  1059. /* first collect one page from each data stripe */
  1060. for (stripe = 0; stripe < nr_data; stripe++) {
  1061. p = page_in_rbio(rbio, stripe, pagenr, 0);
  1062. pointers[stripe] = kmap(p);
  1063. }
  1064. /* then add the parity stripe */
  1065. p = rbio_pstripe_page(rbio, pagenr);
  1066. SetPageUptodate(p);
  1067. pointers[stripe++] = kmap(p);
  1068. if (q_stripe != -1) {
  1069. /*
  1070. * raid6, add the qstripe and call the
  1071. * library function to fill in our p/q
  1072. */
  1073. p = rbio_qstripe_page(rbio, pagenr);
  1074. SetPageUptodate(p);
  1075. pointers[stripe++] = kmap(p);
  1076. raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
  1077. pointers);
  1078. } else {
  1079. /* raid5 */
  1080. memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
  1081. run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
  1082. }
  1083. for (stripe = 0; stripe < rbio->real_stripes; stripe++)
  1084. kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
  1085. }
  1086. /*
  1087. * time to start writing. Make bios for everything from the
  1088. * higher layers (the bio_list in our rbio) and our p/q. Ignore
  1089. * everything else.
  1090. */
  1091. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1092. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1093. struct page *page;
  1094. if (stripe < rbio->nr_data) {
  1095. page = page_in_rbio(rbio, stripe, pagenr, 1);
  1096. if (!page)
  1097. continue;
  1098. } else {
  1099. page = rbio_stripe_page(rbio, stripe, pagenr);
  1100. }
  1101. ret = rbio_add_io_page(rbio, &bio_list,
  1102. page, stripe, pagenr, rbio->stripe_len);
  1103. if (ret)
  1104. goto cleanup;
  1105. }
  1106. }
  1107. if (likely(!bbio->num_tgtdevs))
  1108. goto write_data;
  1109. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1110. if (!bbio->tgtdev_map[stripe])
  1111. continue;
  1112. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1113. struct page *page;
  1114. if (stripe < rbio->nr_data) {
  1115. page = page_in_rbio(rbio, stripe, pagenr, 1);
  1116. if (!page)
  1117. continue;
  1118. } else {
  1119. page = rbio_stripe_page(rbio, stripe, pagenr);
  1120. }
  1121. ret = rbio_add_io_page(rbio, &bio_list, page,
  1122. rbio->bbio->tgtdev_map[stripe],
  1123. pagenr, rbio->stripe_len);
  1124. if (ret)
  1125. goto cleanup;
  1126. }
  1127. }
  1128. write_data:
  1129. atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
  1130. BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
  1131. while (1) {
  1132. bio = bio_list_pop(&bio_list);
  1133. if (!bio)
  1134. break;
  1135. bio->bi_private = rbio;
  1136. bio->bi_end_io = raid_write_end_io;
  1137. submit_bio(WRITE, bio);
  1138. }
  1139. return;
  1140. cleanup:
  1141. rbio_orig_end_io(rbio, -EIO);
  1142. }
  1143. /*
  1144. * helper to find the stripe number for a given bio. Used to figure out which
  1145. * stripe has failed. This expects the bio to correspond to a physical disk,
  1146. * so it looks up based on physical sector numbers.
  1147. */
  1148. static int find_bio_stripe(struct btrfs_raid_bio *rbio,
  1149. struct bio *bio)
  1150. {
  1151. u64 physical = bio->bi_iter.bi_sector;
  1152. u64 stripe_start;
  1153. int i;
  1154. struct btrfs_bio_stripe *stripe;
  1155. physical <<= 9;
  1156. for (i = 0; i < rbio->bbio->num_stripes; i++) {
  1157. stripe = &rbio->bbio->stripes[i];
  1158. stripe_start = stripe->physical;
  1159. if (physical >= stripe_start &&
  1160. physical < stripe_start + rbio->stripe_len &&
  1161. bio->bi_bdev == stripe->dev->bdev) {
  1162. return i;
  1163. }
  1164. }
  1165. return -1;
  1166. }
  1167. /*
  1168. * helper to find the stripe number for a given
  1169. * bio (before mapping). Used to figure out which stripe has
  1170. * failed. This looks up based on logical block numbers.
  1171. */
  1172. static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
  1173. struct bio *bio)
  1174. {
  1175. u64 logical = bio->bi_iter.bi_sector;
  1176. u64 stripe_start;
  1177. int i;
  1178. logical <<= 9;
  1179. for (i = 0; i < rbio->nr_data; i++) {
  1180. stripe_start = rbio->bbio->raid_map[i];
  1181. if (logical >= stripe_start &&
  1182. logical < stripe_start + rbio->stripe_len) {
  1183. return i;
  1184. }
  1185. }
  1186. return -1;
  1187. }
  1188. /*
  1189. * returns -EIO if we had too many failures
  1190. */
  1191. static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
  1192. {
  1193. unsigned long flags;
  1194. int ret = 0;
  1195. spin_lock_irqsave(&rbio->bio_list_lock, flags);
  1196. /* we already know this stripe is bad, move on */
  1197. if (rbio->faila == failed || rbio->failb == failed)
  1198. goto out;
  1199. if (rbio->faila == -1) {
  1200. /* first failure on this rbio */
  1201. rbio->faila = failed;
  1202. atomic_inc(&rbio->error);
  1203. } else if (rbio->failb == -1) {
  1204. /* second failure on this rbio */
  1205. rbio->failb = failed;
  1206. atomic_inc(&rbio->error);
  1207. } else {
  1208. ret = -EIO;
  1209. }
  1210. out:
  1211. spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
  1212. return ret;
  1213. }
  1214. /*
  1215. * helper to fail a stripe based on a physical disk
  1216. * bio.
  1217. */
  1218. static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
  1219. struct bio *bio)
  1220. {
  1221. int failed = find_bio_stripe(rbio, bio);
  1222. if (failed < 0)
  1223. return -EIO;
  1224. return fail_rbio_index(rbio, failed);
  1225. }
  1226. /*
  1227. * this sets each page in the bio uptodate. It should only be used on private
  1228. * rbio pages, nothing that comes in from the higher layers
  1229. */
  1230. static void set_bio_pages_uptodate(struct bio *bio)
  1231. {
  1232. int i;
  1233. struct page *p;
  1234. for (i = 0; i < bio->bi_vcnt; i++) {
  1235. p = bio->bi_io_vec[i].bv_page;
  1236. SetPageUptodate(p);
  1237. }
  1238. }
  1239. /*
  1240. * end io for the read phase of the rmw cycle. All the bios here are physical
  1241. * stripe bios we've read from the disk so we can recalculate the parity of the
  1242. * stripe.
  1243. *
  1244. * This will usually kick off finish_rmw once all the bios are read in, but it
  1245. * may trigger parity reconstruction if we had any errors along the way
  1246. */
  1247. static void raid_rmw_end_io(struct bio *bio)
  1248. {
  1249. struct btrfs_raid_bio *rbio = bio->bi_private;
  1250. if (bio->bi_error)
  1251. fail_bio_stripe(rbio, bio);
  1252. else
  1253. set_bio_pages_uptodate(bio);
  1254. bio_put(bio);
  1255. if (!atomic_dec_and_test(&rbio->stripes_pending))
  1256. return;
  1257. if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
  1258. goto cleanup;
  1259. /*
  1260. * this will normally call finish_rmw to start our write
  1261. * but if there are any failed stripes we'll reconstruct
  1262. * from parity first
  1263. */
  1264. validate_rbio_for_rmw(rbio);
  1265. return;
  1266. cleanup:
  1267. rbio_orig_end_io(rbio, -EIO);
  1268. }
  1269. static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
  1270. {
  1271. btrfs_init_work(&rbio->work, btrfs_rmw_helper,
  1272. rmw_work, NULL, NULL);
  1273. btrfs_queue_work(rbio->fs_info->rmw_workers,
  1274. &rbio->work);
  1275. }
  1276. static void async_read_rebuild(struct btrfs_raid_bio *rbio)
  1277. {
  1278. btrfs_init_work(&rbio->work, btrfs_rmw_helper,
  1279. read_rebuild_work, NULL, NULL);
  1280. btrfs_queue_work(rbio->fs_info->rmw_workers,
  1281. &rbio->work);
  1282. }
  1283. /*
  1284. * the stripe must be locked by the caller. It will
  1285. * unlock after all the writes are done
  1286. */
  1287. static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
  1288. {
  1289. int bios_to_read = 0;
  1290. struct bio_list bio_list;
  1291. int ret;
  1292. int pagenr;
  1293. int stripe;
  1294. struct bio *bio;
  1295. bio_list_init(&bio_list);
  1296. ret = alloc_rbio_pages(rbio);
  1297. if (ret)
  1298. goto cleanup;
  1299. index_rbio_pages(rbio);
  1300. atomic_set(&rbio->error, 0);
  1301. /*
  1302. * build a list of bios to read all the missing parts of this
  1303. * stripe
  1304. */
  1305. for (stripe = 0; stripe < rbio->nr_data; stripe++) {
  1306. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1307. struct page *page;
  1308. /*
  1309. * we want to find all the pages missing from
  1310. * the rbio and read them from the disk. If
  1311. * page_in_rbio finds a page in the bio list
  1312. * we don't need to read it off the stripe.
  1313. */
  1314. page = page_in_rbio(rbio, stripe, pagenr, 1);
  1315. if (page)
  1316. continue;
  1317. page = rbio_stripe_page(rbio, stripe, pagenr);
  1318. /*
  1319. * the bio cache may have handed us an uptodate
  1320. * page. If so, be happy and use it
  1321. */
  1322. if (PageUptodate(page))
  1323. continue;
  1324. ret = rbio_add_io_page(rbio, &bio_list, page,
  1325. stripe, pagenr, rbio->stripe_len);
  1326. if (ret)
  1327. goto cleanup;
  1328. }
  1329. }
  1330. bios_to_read = bio_list_size(&bio_list);
  1331. if (!bios_to_read) {
  1332. /*
  1333. * this can happen if others have merged with
  1334. * us, it means there is nothing left to read.
  1335. * But if there are missing devices it may not be
  1336. * safe to do the full stripe write yet.
  1337. */
  1338. goto finish;
  1339. }
  1340. /*
  1341. * the bbio may be freed once we submit the last bio. Make sure
  1342. * not to touch it after that
  1343. */
  1344. atomic_set(&rbio->stripes_pending, bios_to_read);
  1345. while (1) {
  1346. bio = bio_list_pop(&bio_list);
  1347. if (!bio)
  1348. break;
  1349. bio->bi_private = rbio;
  1350. bio->bi_end_io = raid_rmw_end_io;
  1351. btrfs_bio_wq_end_io(rbio->fs_info, bio,
  1352. BTRFS_WQ_ENDIO_RAID56);
  1353. submit_bio(READ, bio);
  1354. }
  1355. /* the actual write will happen once the reads are done */
  1356. return 0;
  1357. cleanup:
  1358. rbio_orig_end_io(rbio, -EIO);
  1359. return -EIO;
  1360. finish:
  1361. validate_rbio_for_rmw(rbio);
  1362. return 0;
  1363. }
  1364. /*
  1365. * if the upper layers pass in a full stripe, we thank them by only allocating
  1366. * enough pages to hold the parity, and sending it all down quickly.
  1367. */
  1368. static int full_stripe_write(struct btrfs_raid_bio *rbio)
  1369. {
  1370. int ret;
  1371. ret = alloc_rbio_parity_pages(rbio);
  1372. if (ret) {
  1373. __free_raid_bio(rbio);
  1374. return ret;
  1375. }
  1376. ret = lock_stripe_add(rbio);
  1377. if (ret == 0)
  1378. finish_rmw(rbio);
  1379. return 0;
  1380. }
  1381. /*
  1382. * partial stripe writes get handed over to async helpers.
  1383. * We're really hoping to merge a few more writes into this
  1384. * rbio before calculating new parity
  1385. */
  1386. static int partial_stripe_write(struct btrfs_raid_bio *rbio)
  1387. {
  1388. int ret;
  1389. ret = lock_stripe_add(rbio);
  1390. if (ret == 0)
  1391. async_rmw_stripe(rbio);
  1392. return 0;
  1393. }
  1394. /*
  1395. * sometimes while we were reading from the drive to
  1396. * recalculate parity, enough new bios come into create
  1397. * a full stripe. So we do a check here to see if we can
  1398. * go directly to finish_rmw
  1399. */
  1400. static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
  1401. {
  1402. /* head off into rmw land if we don't have a full stripe */
  1403. if (!rbio_is_full(rbio))
  1404. return partial_stripe_write(rbio);
  1405. return full_stripe_write(rbio);
  1406. }
  1407. /*
  1408. * We use plugging call backs to collect full stripes.
  1409. * Any time we get a partial stripe write while plugged
  1410. * we collect it into a list. When the unplug comes down,
  1411. * we sort the list by logical block number and merge
  1412. * everything we can into the same rbios
  1413. */
  1414. struct btrfs_plug_cb {
  1415. struct blk_plug_cb cb;
  1416. struct btrfs_fs_info *info;
  1417. struct list_head rbio_list;
  1418. struct btrfs_work work;
  1419. };
  1420. /*
  1421. * rbios on the plug list are sorted for easier merging.
  1422. */
  1423. static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
  1424. {
  1425. struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
  1426. plug_list);
  1427. struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
  1428. plug_list);
  1429. u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
  1430. u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
  1431. if (a_sector < b_sector)
  1432. return -1;
  1433. if (a_sector > b_sector)
  1434. return 1;
  1435. return 0;
  1436. }
  1437. static void run_plug(struct btrfs_plug_cb *plug)
  1438. {
  1439. struct btrfs_raid_bio *cur;
  1440. struct btrfs_raid_bio *last = NULL;
  1441. /*
  1442. * sort our plug list then try to merge
  1443. * everything we can in hopes of creating full
  1444. * stripes.
  1445. */
  1446. list_sort(NULL, &plug->rbio_list, plug_cmp);
  1447. while (!list_empty(&plug->rbio_list)) {
  1448. cur = list_entry(plug->rbio_list.next,
  1449. struct btrfs_raid_bio, plug_list);
  1450. list_del_init(&cur->plug_list);
  1451. if (rbio_is_full(cur)) {
  1452. /* we have a full stripe, send it down */
  1453. full_stripe_write(cur);
  1454. continue;
  1455. }
  1456. if (last) {
  1457. if (rbio_can_merge(last, cur)) {
  1458. merge_rbio(last, cur);
  1459. __free_raid_bio(cur);
  1460. continue;
  1461. }
  1462. __raid56_parity_write(last);
  1463. }
  1464. last = cur;
  1465. }
  1466. if (last) {
  1467. __raid56_parity_write(last);
  1468. }
  1469. kfree(plug);
  1470. }
  1471. /*
  1472. * if the unplug comes from schedule, we have to push the
  1473. * work off to a helper thread
  1474. */
  1475. static void unplug_work(struct btrfs_work *work)
  1476. {
  1477. struct btrfs_plug_cb *plug;
  1478. plug = container_of(work, struct btrfs_plug_cb, work);
  1479. run_plug(plug);
  1480. }
  1481. static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
  1482. {
  1483. struct btrfs_plug_cb *plug;
  1484. plug = container_of(cb, struct btrfs_plug_cb, cb);
  1485. if (from_schedule) {
  1486. btrfs_init_work(&plug->work, btrfs_rmw_helper,
  1487. unplug_work, NULL, NULL);
  1488. btrfs_queue_work(plug->info->rmw_workers,
  1489. &plug->work);
  1490. return;
  1491. }
  1492. run_plug(plug);
  1493. }
  1494. /*
  1495. * our main entry point for writes from the rest of the FS.
  1496. */
  1497. int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
  1498. struct btrfs_bio *bbio, u64 stripe_len)
  1499. {
  1500. struct btrfs_raid_bio *rbio;
  1501. struct btrfs_plug_cb *plug = NULL;
  1502. struct blk_plug_cb *cb;
  1503. int ret;
  1504. rbio = alloc_rbio(root, bbio, stripe_len);
  1505. if (IS_ERR(rbio)) {
  1506. btrfs_put_bbio(bbio);
  1507. return PTR_ERR(rbio);
  1508. }
  1509. bio_list_add(&rbio->bio_list, bio);
  1510. rbio->bio_list_bytes = bio->bi_iter.bi_size;
  1511. rbio->operation = BTRFS_RBIO_WRITE;
  1512. btrfs_bio_counter_inc_noblocked(root->fs_info);
  1513. rbio->generic_bio_cnt = 1;
  1514. /*
  1515. * don't plug on full rbios, just get them out the door
  1516. * as quickly as we can
  1517. */
  1518. if (rbio_is_full(rbio)) {
  1519. ret = full_stripe_write(rbio);
  1520. if (ret)
  1521. btrfs_bio_counter_dec(root->fs_info);
  1522. return ret;
  1523. }
  1524. cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
  1525. sizeof(*plug));
  1526. if (cb) {
  1527. plug = container_of(cb, struct btrfs_plug_cb, cb);
  1528. if (!plug->info) {
  1529. plug->info = root->fs_info;
  1530. INIT_LIST_HEAD(&plug->rbio_list);
  1531. }
  1532. list_add_tail(&rbio->plug_list, &plug->rbio_list);
  1533. ret = 0;
  1534. } else {
  1535. ret = __raid56_parity_write(rbio);
  1536. if (ret)
  1537. btrfs_bio_counter_dec(root->fs_info);
  1538. }
  1539. return ret;
  1540. }
  1541. /*
  1542. * all parity reconstruction happens here. We've read in everything
  1543. * we can find from the drives and this does the heavy lifting of
  1544. * sorting the good from the bad.
  1545. */
  1546. static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
  1547. {
  1548. int pagenr, stripe;
  1549. void **pointers;
  1550. int faila = -1, failb = -1;
  1551. struct page *page;
  1552. int err;
  1553. int i;
  1554. pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
  1555. if (!pointers) {
  1556. err = -ENOMEM;
  1557. goto cleanup_io;
  1558. }
  1559. faila = rbio->faila;
  1560. failb = rbio->failb;
  1561. if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1562. rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
  1563. spin_lock_irq(&rbio->bio_list_lock);
  1564. set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
  1565. spin_unlock_irq(&rbio->bio_list_lock);
  1566. }
  1567. index_rbio_pages(rbio);
  1568. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1569. /*
  1570. * Now we just use bitmap to mark the horizontal stripes in
  1571. * which we have data when doing parity scrub.
  1572. */
  1573. if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
  1574. !test_bit(pagenr, rbio->dbitmap))
  1575. continue;
  1576. /* setup our array of pointers with pages
  1577. * from each stripe
  1578. */
  1579. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1580. /*
  1581. * if we're rebuilding a read, we have to use
  1582. * pages from the bio list
  1583. */
  1584. if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1585. rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
  1586. (stripe == faila || stripe == failb)) {
  1587. page = page_in_rbio(rbio, stripe, pagenr, 0);
  1588. } else {
  1589. page = rbio_stripe_page(rbio, stripe, pagenr);
  1590. }
  1591. pointers[stripe] = kmap(page);
  1592. }
  1593. /* all raid6 handling here */
  1594. if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
  1595. /*
  1596. * single failure, rebuild from parity raid5
  1597. * style
  1598. */
  1599. if (failb < 0) {
  1600. if (faila == rbio->nr_data) {
  1601. /*
  1602. * Just the P stripe has failed, without
  1603. * a bad data or Q stripe.
  1604. * TODO, we should redo the xor here.
  1605. */
  1606. err = -EIO;
  1607. goto cleanup;
  1608. }
  1609. /*
  1610. * a single failure in raid6 is rebuilt
  1611. * in the pstripe code below
  1612. */
  1613. goto pstripe;
  1614. }
  1615. /* make sure our ps and qs are in order */
  1616. if (faila > failb) {
  1617. int tmp = failb;
  1618. failb = faila;
  1619. faila = tmp;
  1620. }
  1621. /* if the q stripe is failed, do a pstripe reconstruction
  1622. * from the xors.
  1623. * If both the q stripe and the P stripe are failed, we're
  1624. * here due to a crc mismatch and we can't give them the
  1625. * data they want
  1626. */
  1627. if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
  1628. if (rbio->bbio->raid_map[faila] ==
  1629. RAID5_P_STRIPE) {
  1630. err = -EIO;
  1631. goto cleanup;
  1632. }
  1633. /*
  1634. * otherwise we have one bad data stripe and
  1635. * a good P stripe. raid5!
  1636. */
  1637. goto pstripe;
  1638. }
  1639. if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
  1640. raid6_datap_recov(rbio->real_stripes,
  1641. PAGE_SIZE, faila, pointers);
  1642. } else {
  1643. raid6_2data_recov(rbio->real_stripes,
  1644. PAGE_SIZE, faila, failb,
  1645. pointers);
  1646. }
  1647. } else {
  1648. void *p;
  1649. /* rebuild from P stripe here (raid5 or raid6) */
  1650. BUG_ON(failb != -1);
  1651. pstripe:
  1652. /* Copy parity block into failed block to start with */
  1653. memcpy(pointers[faila],
  1654. pointers[rbio->nr_data],
  1655. PAGE_CACHE_SIZE);
  1656. /* rearrange the pointer array */
  1657. p = pointers[faila];
  1658. for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
  1659. pointers[stripe] = pointers[stripe + 1];
  1660. pointers[rbio->nr_data - 1] = p;
  1661. /* xor in the rest */
  1662. run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
  1663. }
  1664. /* if we're doing this rebuild as part of an rmw, go through
  1665. * and set all of our private rbio pages in the
  1666. * failed stripes as uptodate. This way finish_rmw will
  1667. * know they can be trusted. If this was a read reconstruction,
  1668. * other endio functions will fiddle the uptodate bits
  1669. */
  1670. if (rbio->operation == BTRFS_RBIO_WRITE) {
  1671. for (i = 0; i < rbio->stripe_npages; i++) {
  1672. if (faila != -1) {
  1673. page = rbio_stripe_page(rbio, faila, i);
  1674. SetPageUptodate(page);
  1675. }
  1676. if (failb != -1) {
  1677. page = rbio_stripe_page(rbio, failb, i);
  1678. SetPageUptodate(page);
  1679. }
  1680. }
  1681. }
  1682. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1683. /*
  1684. * if we're rebuilding a read, we have to use
  1685. * pages from the bio list
  1686. */
  1687. if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1688. rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
  1689. (stripe == faila || stripe == failb)) {
  1690. page = page_in_rbio(rbio, stripe, pagenr, 0);
  1691. } else {
  1692. page = rbio_stripe_page(rbio, stripe, pagenr);
  1693. }
  1694. kunmap(page);
  1695. }
  1696. }
  1697. err = 0;
  1698. cleanup:
  1699. kfree(pointers);
  1700. cleanup_io:
  1701. if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
  1702. if (err == 0)
  1703. cache_rbio_pages(rbio);
  1704. else
  1705. clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  1706. rbio_orig_end_io(rbio, err);
  1707. } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
  1708. rbio_orig_end_io(rbio, err);
  1709. } else if (err == 0) {
  1710. rbio->faila = -1;
  1711. rbio->failb = -1;
  1712. if (rbio->operation == BTRFS_RBIO_WRITE)
  1713. finish_rmw(rbio);
  1714. else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
  1715. finish_parity_scrub(rbio, 0);
  1716. else
  1717. BUG();
  1718. } else {
  1719. rbio_orig_end_io(rbio, err);
  1720. }
  1721. }
  1722. /*
  1723. * This is called only for stripes we've read from disk to
  1724. * reconstruct the parity.
  1725. */
  1726. static void raid_recover_end_io(struct bio *bio)
  1727. {
  1728. struct btrfs_raid_bio *rbio = bio->bi_private;
  1729. /*
  1730. * we only read stripe pages off the disk, set them
  1731. * up to date if there were no errors
  1732. */
  1733. if (bio->bi_error)
  1734. fail_bio_stripe(rbio, bio);
  1735. else
  1736. set_bio_pages_uptodate(bio);
  1737. bio_put(bio);
  1738. if (!atomic_dec_and_test(&rbio->stripes_pending))
  1739. return;
  1740. if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
  1741. rbio_orig_end_io(rbio, -EIO);
  1742. else
  1743. __raid_recover_end_io(rbio);
  1744. }
  1745. /*
  1746. * reads everything we need off the disk to reconstruct
  1747. * the parity. endio handlers trigger final reconstruction
  1748. * when the IO is done.
  1749. *
  1750. * This is used both for reads from the higher layers and for
  1751. * parity construction required to finish a rmw cycle.
  1752. */
  1753. static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
  1754. {
  1755. int bios_to_read = 0;
  1756. struct bio_list bio_list;
  1757. int ret;
  1758. int pagenr;
  1759. int stripe;
  1760. struct bio *bio;
  1761. bio_list_init(&bio_list);
  1762. ret = alloc_rbio_pages(rbio);
  1763. if (ret)
  1764. goto cleanup;
  1765. atomic_set(&rbio->error, 0);
  1766. /*
  1767. * read everything that hasn't failed. Thanks to the
  1768. * stripe cache, it is possible that some or all of these
  1769. * pages are going to be uptodate.
  1770. */
  1771. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1772. if (rbio->faila == stripe || rbio->failb == stripe) {
  1773. atomic_inc(&rbio->error);
  1774. continue;
  1775. }
  1776. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1777. struct page *p;
  1778. /*
  1779. * the rmw code may have already read this
  1780. * page in
  1781. */
  1782. p = rbio_stripe_page(rbio, stripe, pagenr);
  1783. if (PageUptodate(p))
  1784. continue;
  1785. ret = rbio_add_io_page(rbio, &bio_list,
  1786. rbio_stripe_page(rbio, stripe, pagenr),
  1787. stripe, pagenr, rbio->stripe_len);
  1788. if (ret < 0)
  1789. goto cleanup;
  1790. }
  1791. }
  1792. bios_to_read = bio_list_size(&bio_list);
  1793. if (!bios_to_read) {
  1794. /*
  1795. * we might have no bios to read just because the pages
  1796. * were up to date, or we might have no bios to read because
  1797. * the devices were gone.
  1798. */
  1799. if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
  1800. __raid_recover_end_io(rbio);
  1801. goto out;
  1802. } else {
  1803. goto cleanup;
  1804. }
  1805. }
  1806. /*
  1807. * the bbio may be freed once we submit the last bio. Make sure
  1808. * not to touch it after that
  1809. */
  1810. atomic_set(&rbio->stripes_pending, bios_to_read);
  1811. while (1) {
  1812. bio = bio_list_pop(&bio_list);
  1813. if (!bio)
  1814. break;
  1815. bio->bi_private = rbio;
  1816. bio->bi_end_io = raid_recover_end_io;
  1817. btrfs_bio_wq_end_io(rbio->fs_info, bio,
  1818. BTRFS_WQ_ENDIO_RAID56);
  1819. submit_bio(READ, bio);
  1820. }
  1821. out:
  1822. return 0;
  1823. cleanup:
  1824. if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1825. rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
  1826. rbio_orig_end_io(rbio, -EIO);
  1827. return -EIO;
  1828. }
  1829. /*
  1830. * the main entry point for reads from the higher layers. This
  1831. * is really only called when the normal read path had a failure,
  1832. * so we assume the bio they send down corresponds to a failed part
  1833. * of the drive.
  1834. */
  1835. int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
  1836. struct btrfs_bio *bbio, u64 stripe_len,
  1837. int mirror_num, int generic_io)
  1838. {
  1839. struct btrfs_raid_bio *rbio;
  1840. int ret;
  1841. rbio = alloc_rbio(root, bbio, stripe_len);
  1842. if (IS_ERR(rbio)) {
  1843. if (generic_io)
  1844. btrfs_put_bbio(bbio);
  1845. return PTR_ERR(rbio);
  1846. }
  1847. rbio->operation = BTRFS_RBIO_READ_REBUILD;
  1848. bio_list_add(&rbio->bio_list, bio);
  1849. rbio->bio_list_bytes = bio->bi_iter.bi_size;
  1850. rbio->faila = find_logical_bio_stripe(rbio, bio);
  1851. if (rbio->faila == -1) {
  1852. BUG();
  1853. if (generic_io)
  1854. btrfs_put_bbio(bbio);
  1855. kfree(rbio);
  1856. return -EIO;
  1857. }
  1858. if (generic_io) {
  1859. btrfs_bio_counter_inc_noblocked(root->fs_info);
  1860. rbio->generic_bio_cnt = 1;
  1861. } else {
  1862. btrfs_get_bbio(bbio);
  1863. }
  1864. /*
  1865. * reconstruct from the q stripe if they are
  1866. * asking for mirror 3
  1867. */
  1868. if (mirror_num == 3)
  1869. rbio->failb = rbio->real_stripes - 2;
  1870. ret = lock_stripe_add(rbio);
  1871. /*
  1872. * __raid56_parity_recover will end the bio with
  1873. * any errors it hits. We don't want to return
  1874. * its error value up the stack because our caller
  1875. * will end up calling bio_endio with any nonzero
  1876. * return
  1877. */
  1878. if (ret == 0)
  1879. __raid56_parity_recover(rbio);
  1880. /*
  1881. * our rbio has been added to the list of
  1882. * rbios that will be handled after the
  1883. * currently lock owner is done
  1884. */
  1885. return 0;
  1886. }
  1887. static void rmw_work(struct btrfs_work *work)
  1888. {
  1889. struct btrfs_raid_bio *rbio;
  1890. rbio = container_of(work, struct btrfs_raid_bio, work);
  1891. raid56_rmw_stripe(rbio);
  1892. }
  1893. static void read_rebuild_work(struct btrfs_work *work)
  1894. {
  1895. struct btrfs_raid_bio *rbio;
  1896. rbio = container_of(work, struct btrfs_raid_bio, work);
  1897. __raid56_parity_recover(rbio);
  1898. }
  1899. /*
  1900. * The following code is used to scrub/replace the parity stripe
  1901. *
  1902. * Note: We need make sure all the pages that add into the scrub/replace
  1903. * raid bio are correct and not be changed during the scrub/replace. That
  1904. * is those pages just hold metadata or file data with checksum.
  1905. */
  1906. struct btrfs_raid_bio *
  1907. raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
  1908. struct btrfs_bio *bbio, u64 stripe_len,
  1909. struct btrfs_device *scrub_dev,
  1910. unsigned long *dbitmap, int stripe_nsectors)
  1911. {
  1912. struct btrfs_raid_bio *rbio;
  1913. int i;
  1914. rbio = alloc_rbio(root, bbio, stripe_len);
  1915. if (IS_ERR(rbio))
  1916. return NULL;
  1917. bio_list_add(&rbio->bio_list, bio);
  1918. /*
  1919. * This is a special bio which is used to hold the completion handler
  1920. * and make the scrub rbio is similar to the other types
  1921. */
  1922. ASSERT(!bio->bi_iter.bi_size);
  1923. rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
  1924. for (i = 0; i < rbio->real_stripes; i++) {
  1925. if (bbio->stripes[i].dev == scrub_dev) {
  1926. rbio->scrubp = i;
  1927. break;
  1928. }
  1929. }
  1930. /* Now we just support the sectorsize equals to page size */
  1931. ASSERT(root->sectorsize == PAGE_SIZE);
  1932. ASSERT(rbio->stripe_npages == stripe_nsectors);
  1933. bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
  1934. return rbio;
  1935. }
  1936. /* Used for both parity scrub and missing. */
  1937. void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
  1938. u64 logical)
  1939. {
  1940. int stripe_offset;
  1941. int index;
  1942. ASSERT(logical >= rbio->bbio->raid_map[0]);
  1943. ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
  1944. rbio->stripe_len * rbio->nr_data);
  1945. stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
  1946. index = stripe_offset >> PAGE_CACHE_SHIFT;
  1947. rbio->bio_pages[index] = page;
  1948. }
  1949. /*
  1950. * We just scrub the parity that we have correct data on the same horizontal,
  1951. * so we needn't allocate all pages for all the stripes.
  1952. */
  1953. static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
  1954. {
  1955. int i;
  1956. int bit;
  1957. int index;
  1958. struct page *page;
  1959. for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
  1960. for (i = 0; i < rbio->real_stripes; i++) {
  1961. index = i * rbio->stripe_npages + bit;
  1962. if (rbio->stripe_pages[index])
  1963. continue;
  1964. page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  1965. if (!page)
  1966. return -ENOMEM;
  1967. rbio->stripe_pages[index] = page;
  1968. }
  1969. }
  1970. return 0;
  1971. }
  1972. static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
  1973. int need_check)
  1974. {
  1975. struct btrfs_bio *bbio = rbio->bbio;
  1976. void *pointers[rbio->real_stripes];
  1977. DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
  1978. int nr_data = rbio->nr_data;
  1979. int stripe;
  1980. int pagenr;
  1981. int p_stripe = -1;
  1982. int q_stripe = -1;
  1983. struct page *p_page = NULL;
  1984. struct page *q_page = NULL;
  1985. struct bio_list bio_list;
  1986. struct bio *bio;
  1987. int is_replace = 0;
  1988. int ret;
  1989. bio_list_init(&bio_list);
  1990. if (rbio->real_stripes - rbio->nr_data == 1) {
  1991. p_stripe = rbio->real_stripes - 1;
  1992. } else if (rbio->real_stripes - rbio->nr_data == 2) {
  1993. p_stripe = rbio->real_stripes - 2;
  1994. q_stripe = rbio->real_stripes - 1;
  1995. } else {
  1996. BUG();
  1997. }
  1998. if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
  1999. is_replace = 1;
  2000. bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
  2001. }
  2002. /*
  2003. * Because the higher layers(scrubber) are unlikely to
  2004. * use this area of the disk again soon, so don't cache
  2005. * it.
  2006. */
  2007. clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  2008. if (!need_check)
  2009. goto writeback;
  2010. p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  2011. if (!p_page)
  2012. goto cleanup;
  2013. SetPageUptodate(p_page);
  2014. if (q_stripe != -1) {
  2015. q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  2016. if (!q_page) {
  2017. __free_page(p_page);
  2018. goto cleanup;
  2019. }
  2020. SetPageUptodate(q_page);
  2021. }
  2022. atomic_set(&rbio->error, 0);
  2023. for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
  2024. struct page *p;
  2025. void *parity;
  2026. /* first collect one page from each data stripe */
  2027. for (stripe = 0; stripe < nr_data; stripe++) {
  2028. p = page_in_rbio(rbio, stripe, pagenr, 0);
  2029. pointers[stripe] = kmap(p);
  2030. }
  2031. /* then add the parity stripe */
  2032. pointers[stripe++] = kmap(p_page);
  2033. if (q_stripe != -1) {
  2034. /*
  2035. * raid6, add the qstripe and call the
  2036. * library function to fill in our p/q
  2037. */
  2038. pointers[stripe++] = kmap(q_page);
  2039. raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
  2040. pointers);
  2041. } else {
  2042. /* raid5 */
  2043. memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
  2044. run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
  2045. }
  2046. /* Check scrubbing pairty and repair it */
  2047. p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
  2048. parity = kmap(p);
  2049. if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
  2050. memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
  2051. else
  2052. /* Parity is right, needn't writeback */
  2053. bitmap_clear(rbio->dbitmap, pagenr, 1);
  2054. kunmap(p);
  2055. for (stripe = 0; stripe < rbio->real_stripes; stripe++)
  2056. kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
  2057. }
  2058. __free_page(p_page);
  2059. if (q_page)
  2060. __free_page(q_page);
  2061. writeback:
  2062. /*
  2063. * time to start writing. Make bios for everything from the
  2064. * higher layers (the bio_list in our rbio) and our p/q. Ignore
  2065. * everything else.
  2066. */
  2067. for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
  2068. struct page *page;
  2069. page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
  2070. ret = rbio_add_io_page(rbio, &bio_list,
  2071. page, rbio->scrubp, pagenr, rbio->stripe_len);
  2072. if (ret)
  2073. goto cleanup;
  2074. }
  2075. if (!is_replace)
  2076. goto submit_write;
  2077. for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
  2078. struct page *page;
  2079. page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
  2080. ret = rbio_add_io_page(rbio, &bio_list, page,
  2081. bbio->tgtdev_map[rbio->scrubp],
  2082. pagenr, rbio->stripe_len);
  2083. if (ret)
  2084. goto cleanup;
  2085. }
  2086. submit_write:
  2087. nr_data = bio_list_size(&bio_list);
  2088. if (!nr_data) {
  2089. /* Every parity is right */
  2090. rbio_orig_end_io(rbio, 0);
  2091. return;
  2092. }
  2093. atomic_set(&rbio->stripes_pending, nr_data);
  2094. while (1) {
  2095. bio = bio_list_pop(&bio_list);
  2096. if (!bio)
  2097. break;
  2098. bio->bi_private = rbio;
  2099. bio->bi_end_io = raid_write_end_io;
  2100. submit_bio(WRITE, bio);
  2101. }
  2102. return;
  2103. cleanup:
  2104. rbio_orig_end_io(rbio, -EIO);
  2105. }
  2106. static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
  2107. {
  2108. if (stripe >= 0 && stripe < rbio->nr_data)
  2109. return 1;
  2110. return 0;
  2111. }
  2112. /*
  2113. * While we're doing the parity check and repair, we could have errors
  2114. * in reading pages off the disk. This checks for errors and if we're
  2115. * not able to read the page it'll trigger parity reconstruction. The
  2116. * parity scrub will be finished after we've reconstructed the failed
  2117. * stripes
  2118. */
  2119. static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
  2120. {
  2121. if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
  2122. goto cleanup;
  2123. if (rbio->faila >= 0 || rbio->failb >= 0) {
  2124. int dfail = 0, failp = -1;
  2125. if (is_data_stripe(rbio, rbio->faila))
  2126. dfail++;
  2127. else if (is_parity_stripe(rbio->faila))
  2128. failp = rbio->faila;
  2129. if (is_data_stripe(rbio, rbio->failb))
  2130. dfail++;
  2131. else if (is_parity_stripe(rbio->failb))
  2132. failp = rbio->failb;
  2133. /*
  2134. * Because we can not use a scrubbing parity to repair
  2135. * the data, so the capability of the repair is declined.
  2136. * (In the case of RAID5, we can not repair anything)
  2137. */
  2138. if (dfail > rbio->bbio->max_errors - 1)
  2139. goto cleanup;
  2140. /*
  2141. * If all data is good, only parity is correctly, just
  2142. * repair the parity.
  2143. */
  2144. if (dfail == 0) {
  2145. finish_parity_scrub(rbio, 0);
  2146. return;
  2147. }
  2148. /*
  2149. * Here means we got one corrupted data stripe and one
  2150. * corrupted parity on RAID6, if the corrupted parity
  2151. * is scrubbing parity, luckly, use the other one to repair
  2152. * the data, or we can not repair the data stripe.
  2153. */
  2154. if (failp != rbio->scrubp)
  2155. goto cleanup;
  2156. __raid_recover_end_io(rbio);
  2157. } else {
  2158. finish_parity_scrub(rbio, 1);
  2159. }
  2160. return;
  2161. cleanup:
  2162. rbio_orig_end_io(rbio, -EIO);
  2163. }
  2164. /*
  2165. * end io for the read phase of the rmw cycle. All the bios here are physical
  2166. * stripe bios we've read from the disk so we can recalculate the parity of the
  2167. * stripe.
  2168. *
  2169. * This will usually kick off finish_rmw once all the bios are read in, but it
  2170. * may trigger parity reconstruction if we had any errors along the way
  2171. */
  2172. static void raid56_parity_scrub_end_io(struct bio *bio)
  2173. {
  2174. struct btrfs_raid_bio *rbio = bio->bi_private;
  2175. if (bio->bi_error)
  2176. fail_bio_stripe(rbio, bio);
  2177. else
  2178. set_bio_pages_uptodate(bio);
  2179. bio_put(bio);
  2180. if (!atomic_dec_and_test(&rbio->stripes_pending))
  2181. return;
  2182. /*
  2183. * this will normally call finish_rmw to start our write
  2184. * but if there are any failed stripes we'll reconstruct
  2185. * from parity first
  2186. */
  2187. validate_rbio_for_parity_scrub(rbio);
  2188. }
  2189. static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
  2190. {
  2191. int bios_to_read = 0;
  2192. struct bio_list bio_list;
  2193. int ret;
  2194. int pagenr;
  2195. int stripe;
  2196. struct bio *bio;
  2197. ret = alloc_rbio_essential_pages(rbio);
  2198. if (ret)
  2199. goto cleanup;
  2200. bio_list_init(&bio_list);
  2201. atomic_set(&rbio->error, 0);
  2202. /*
  2203. * build a list of bios to read all the missing parts of this
  2204. * stripe
  2205. */
  2206. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  2207. for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
  2208. struct page *page;
  2209. /*
  2210. * we want to find all the pages missing from
  2211. * the rbio and read them from the disk. If
  2212. * page_in_rbio finds a page in the bio list
  2213. * we don't need to read it off the stripe.
  2214. */
  2215. page = page_in_rbio(rbio, stripe, pagenr, 1);
  2216. if (page)
  2217. continue;
  2218. page = rbio_stripe_page(rbio, stripe, pagenr);
  2219. /*
  2220. * the bio cache may have handed us an uptodate
  2221. * page. If so, be happy and use it
  2222. */
  2223. if (PageUptodate(page))
  2224. continue;
  2225. ret = rbio_add_io_page(rbio, &bio_list, page,
  2226. stripe, pagenr, rbio->stripe_len);
  2227. if (ret)
  2228. goto cleanup;
  2229. }
  2230. }
  2231. bios_to_read = bio_list_size(&bio_list);
  2232. if (!bios_to_read) {
  2233. /*
  2234. * this can happen if others have merged with
  2235. * us, it means there is nothing left to read.
  2236. * But if there are missing devices it may not be
  2237. * safe to do the full stripe write yet.
  2238. */
  2239. goto finish;
  2240. }
  2241. /*
  2242. * the bbio may be freed once we submit the last bio. Make sure
  2243. * not to touch it after that
  2244. */
  2245. atomic_set(&rbio->stripes_pending, bios_to_read);
  2246. while (1) {
  2247. bio = bio_list_pop(&bio_list);
  2248. if (!bio)
  2249. break;
  2250. bio->bi_private = rbio;
  2251. bio->bi_end_io = raid56_parity_scrub_end_io;
  2252. btrfs_bio_wq_end_io(rbio->fs_info, bio,
  2253. BTRFS_WQ_ENDIO_RAID56);
  2254. submit_bio(READ, bio);
  2255. }
  2256. /* the actual write will happen once the reads are done */
  2257. return;
  2258. cleanup:
  2259. rbio_orig_end_io(rbio, -EIO);
  2260. return;
  2261. finish:
  2262. validate_rbio_for_parity_scrub(rbio);
  2263. }
  2264. static void scrub_parity_work(struct btrfs_work *work)
  2265. {
  2266. struct btrfs_raid_bio *rbio;
  2267. rbio = container_of(work, struct btrfs_raid_bio, work);
  2268. raid56_parity_scrub_stripe(rbio);
  2269. }
  2270. static void async_scrub_parity(struct btrfs_raid_bio *rbio)
  2271. {
  2272. btrfs_init_work(&rbio->work, btrfs_rmw_helper,
  2273. scrub_parity_work, NULL, NULL);
  2274. btrfs_queue_work(rbio->fs_info->rmw_workers,
  2275. &rbio->work);
  2276. }
  2277. void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
  2278. {
  2279. if (!lock_stripe_add(rbio))
  2280. async_scrub_parity(rbio);
  2281. }
  2282. /* The following code is used for dev replace of a missing RAID 5/6 device. */
  2283. struct btrfs_raid_bio *
  2284. raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
  2285. struct btrfs_bio *bbio, u64 length)
  2286. {
  2287. struct btrfs_raid_bio *rbio;
  2288. rbio = alloc_rbio(root, bbio, length);
  2289. if (IS_ERR(rbio))
  2290. return NULL;
  2291. rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
  2292. bio_list_add(&rbio->bio_list, bio);
  2293. /*
  2294. * This is a special bio which is used to hold the completion handler
  2295. * and make the scrub rbio is similar to the other types
  2296. */
  2297. ASSERT(!bio->bi_iter.bi_size);
  2298. rbio->faila = find_logical_bio_stripe(rbio, bio);
  2299. if (rbio->faila == -1) {
  2300. BUG();
  2301. kfree(rbio);
  2302. return NULL;
  2303. }
  2304. return rbio;
  2305. }
  2306. static void missing_raid56_work(struct btrfs_work *work)
  2307. {
  2308. struct btrfs_raid_bio *rbio;
  2309. rbio = container_of(work, struct btrfs_raid_bio, work);
  2310. __raid56_parity_recover(rbio);
  2311. }
  2312. static void async_missing_raid56(struct btrfs_raid_bio *rbio)
  2313. {
  2314. btrfs_init_work(&rbio->work, btrfs_rmw_helper,
  2315. missing_raid56_work, NULL, NULL);
  2316. btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
  2317. }
  2318. void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
  2319. {
  2320. if (!lock_stripe_add(rbio))
  2321. async_missing_raid56(rbio);
  2322. }