scrub.c 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450
  1. /*
  2. * Copyright (C) 2011, 2012 STRATO. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/blkdev.h>
  19. #include <linux/ratelimit.h>
  20. #include "ctree.h"
  21. #include "volumes.h"
  22. #include "disk-io.h"
  23. #include "ordered-data.h"
  24. #include "transaction.h"
  25. #include "backref.h"
  26. #include "extent_io.h"
  27. #include "dev-replace.h"
  28. #include "check-integrity.h"
  29. #include "rcu-string.h"
  30. #include "raid56.h"
  31. /*
  32. * This is only the first step towards a full-features scrub. It reads all
  33. * extent and super block and verifies the checksums. In case a bad checksum
  34. * is found or the extent cannot be read, good data will be written back if
  35. * any can be found.
  36. *
  37. * Future enhancements:
  38. * - In case an unrepairable extent is encountered, track which files are
  39. * affected and report them
  40. * - track and record media errors, throw out bad devices
  41. * - add a mode to also read unallocated space
  42. */
  43. struct scrub_block;
  44. struct scrub_ctx;
  45. /*
  46. * the following three values only influence the performance.
  47. * The last one configures the number of parallel and outstanding I/O
  48. * operations. The first two values configure an upper limit for the number
  49. * of (dynamically allocated) pages that are added to a bio.
  50. */
  51. #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
  52. #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
  53. #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
  54. /*
  55. * the following value times PAGE_SIZE needs to be large enough to match the
  56. * largest node/leaf/sector size that shall be supported.
  57. * Values larger than BTRFS_STRIPE_LEN are not supported.
  58. */
  59. #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
  60. struct scrub_recover {
  61. refcount_t refs;
  62. struct btrfs_bio *bbio;
  63. u64 map_length;
  64. };
  65. struct scrub_page {
  66. struct scrub_block *sblock;
  67. struct page *page;
  68. struct btrfs_device *dev;
  69. struct list_head list;
  70. u64 flags; /* extent flags */
  71. u64 generation;
  72. u64 logical;
  73. u64 physical;
  74. u64 physical_for_dev_replace;
  75. atomic_t refs;
  76. struct {
  77. unsigned int mirror_num:8;
  78. unsigned int have_csum:1;
  79. unsigned int io_error:1;
  80. };
  81. u8 csum[BTRFS_CSUM_SIZE];
  82. struct scrub_recover *recover;
  83. };
  84. struct scrub_bio {
  85. int index;
  86. struct scrub_ctx *sctx;
  87. struct btrfs_device *dev;
  88. struct bio *bio;
  89. int err;
  90. u64 logical;
  91. u64 physical;
  92. #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
  93. struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
  94. #else
  95. struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
  96. #endif
  97. int page_count;
  98. int next_free;
  99. struct btrfs_work work;
  100. };
  101. struct scrub_block {
  102. struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
  103. int page_count;
  104. atomic_t outstanding_pages;
  105. refcount_t refs; /* free mem on transition to zero */
  106. struct scrub_ctx *sctx;
  107. struct scrub_parity *sparity;
  108. struct {
  109. unsigned int header_error:1;
  110. unsigned int checksum_error:1;
  111. unsigned int no_io_error_seen:1;
  112. unsigned int generation_error:1; /* also sets header_error */
  113. /* The following is for the data used to check parity */
  114. /* It is for the data with checksum */
  115. unsigned int data_corrected:1;
  116. };
  117. struct btrfs_work work;
  118. };
  119. /* Used for the chunks with parity stripe such RAID5/6 */
  120. struct scrub_parity {
  121. struct scrub_ctx *sctx;
  122. struct btrfs_device *scrub_dev;
  123. u64 logic_start;
  124. u64 logic_end;
  125. int nsectors;
  126. int stripe_len;
  127. refcount_t refs;
  128. struct list_head spages;
  129. /* Work of parity check and repair */
  130. struct btrfs_work work;
  131. /* Mark the parity blocks which have data */
  132. unsigned long *dbitmap;
  133. /*
  134. * Mark the parity blocks which have data, but errors happen when
  135. * read data or check data
  136. */
  137. unsigned long *ebitmap;
  138. unsigned long bitmap[0];
  139. };
  140. struct scrub_wr_ctx {
  141. struct scrub_bio *wr_curr_bio;
  142. struct btrfs_device *tgtdev;
  143. int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
  144. atomic_t flush_all_writes;
  145. struct mutex wr_lock;
  146. };
  147. struct scrub_ctx {
  148. struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
  149. struct btrfs_fs_info *fs_info;
  150. int first_free;
  151. int curr;
  152. atomic_t bios_in_flight;
  153. atomic_t workers_pending;
  154. spinlock_t list_lock;
  155. wait_queue_head_t list_wait;
  156. u16 csum_size;
  157. struct list_head csum_list;
  158. atomic_t cancel_req;
  159. int readonly;
  160. int pages_per_rd_bio;
  161. u32 sectorsize;
  162. u32 nodesize;
  163. int is_dev_replace;
  164. struct scrub_wr_ctx wr_ctx;
  165. /*
  166. * statistics
  167. */
  168. struct btrfs_scrub_progress stat;
  169. spinlock_t stat_lock;
  170. /*
  171. * Use a ref counter to avoid use-after-free issues. Scrub workers
  172. * decrement bios_in_flight and workers_pending and then do a wakeup
  173. * on the list_wait wait queue. We must ensure the main scrub task
  174. * doesn't free the scrub context before or while the workers are
  175. * doing the wakeup() call.
  176. */
  177. refcount_t refs;
  178. };
  179. struct scrub_fixup_nodatasum {
  180. struct scrub_ctx *sctx;
  181. struct btrfs_device *dev;
  182. u64 logical;
  183. struct btrfs_root *root;
  184. struct btrfs_work work;
  185. int mirror_num;
  186. };
  187. struct scrub_nocow_inode {
  188. u64 inum;
  189. u64 offset;
  190. u64 root;
  191. struct list_head list;
  192. };
  193. struct scrub_copy_nocow_ctx {
  194. struct scrub_ctx *sctx;
  195. u64 logical;
  196. u64 len;
  197. int mirror_num;
  198. u64 physical_for_dev_replace;
  199. struct list_head inodes;
  200. struct btrfs_work work;
  201. };
  202. struct scrub_warning {
  203. struct btrfs_path *path;
  204. u64 extent_item_size;
  205. const char *errstr;
  206. sector_t sector;
  207. u64 logical;
  208. struct btrfs_device *dev;
  209. };
  210. static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
  211. static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
  212. static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
  213. static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
  214. static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
  215. static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
  216. struct scrub_block *sblocks_for_recheck);
  217. static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
  218. struct scrub_block *sblock,
  219. int retry_failed_mirror);
  220. static void scrub_recheck_block_checksum(struct scrub_block *sblock);
  221. static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
  222. struct scrub_block *sblock_good);
  223. static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
  224. struct scrub_block *sblock_good,
  225. int page_num, int force_write);
  226. static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
  227. static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
  228. int page_num);
  229. static int scrub_checksum_data(struct scrub_block *sblock);
  230. static int scrub_checksum_tree_block(struct scrub_block *sblock);
  231. static int scrub_checksum_super(struct scrub_block *sblock);
  232. static void scrub_block_get(struct scrub_block *sblock);
  233. static void scrub_block_put(struct scrub_block *sblock);
  234. static void scrub_page_get(struct scrub_page *spage);
  235. static void scrub_page_put(struct scrub_page *spage);
  236. static void scrub_parity_get(struct scrub_parity *sparity);
  237. static void scrub_parity_put(struct scrub_parity *sparity);
  238. static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
  239. struct scrub_page *spage);
  240. static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  241. u64 physical, struct btrfs_device *dev, u64 flags,
  242. u64 gen, int mirror_num, u8 *csum, int force,
  243. u64 physical_for_dev_replace);
  244. static void scrub_bio_end_io(struct bio *bio);
  245. static void scrub_bio_end_io_worker(struct btrfs_work *work);
  246. static void scrub_block_complete(struct scrub_block *sblock);
  247. static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
  248. u64 extent_logical, u64 extent_len,
  249. u64 *extent_physical,
  250. struct btrfs_device **extent_dev,
  251. int *extent_mirror_num);
  252. static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
  253. struct btrfs_device *dev,
  254. int is_dev_replace);
  255. static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
  256. static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
  257. struct scrub_page *spage);
  258. static void scrub_wr_submit(struct scrub_ctx *sctx);
  259. static void scrub_wr_bio_end_io(struct bio *bio);
  260. static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
  261. static int write_page_nocow(struct scrub_ctx *sctx,
  262. u64 physical_for_dev_replace, struct page *page);
  263. static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
  264. struct scrub_copy_nocow_ctx *ctx);
  265. static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  266. int mirror_num, u64 physical_for_dev_replace);
  267. static void copy_nocow_pages_worker(struct btrfs_work *work);
  268. static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
  269. static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
  270. static void scrub_put_ctx(struct scrub_ctx *sctx);
  271. static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
  272. {
  273. refcount_inc(&sctx->refs);
  274. atomic_inc(&sctx->bios_in_flight);
  275. }
  276. static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
  277. {
  278. atomic_dec(&sctx->bios_in_flight);
  279. wake_up(&sctx->list_wait);
  280. scrub_put_ctx(sctx);
  281. }
  282. static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
  283. {
  284. while (atomic_read(&fs_info->scrub_pause_req)) {
  285. mutex_unlock(&fs_info->scrub_lock);
  286. wait_event(fs_info->scrub_pause_wait,
  287. atomic_read(&fs_info->scrub_pause_req) == 0);
  288. mutex_lock(&fs_info->scrub_lock);
  289. }
  290. }
  291. static void scrub_pause_on(struct btrfs_fs_info *fs_info)
  292. {
  293. atomic_inc(&fs_info->scrubs_paused);
  294. wake_up(&fs_info->scrub_pause_wait);
  295. }
  296. static void scrub_pause_off(struct btrfs_fs_info *fs_info)
  297. {
  298. mutex_lock(&fs_info->scrub_lock);
  299. __scrub_blocked_if_needed(fs_info);
  300. atomic_dec(&fs_info->scrubs_paused);
  301. mutex_unlock(&fs_info->scrub_lock);
  302. wake_up(&fs_info->scrub_pause_wait);
  303. }
  304. static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
  305. {
  306. scrub_pause_on(fs_info);
  307. scrub_pause_off(fs_info);
  308. }
  309. /*
  310. * used for workers that require transaction commits (i.e., for the
  311. * NOCOW case)
  312. */
  313. static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
  314. {
  315. struct btrfs_fs_info *fs_info = sctx->fs_info;
  316. refcount_inc(&sctx->refs);
  317. /*
  318. * increment scrubs_running to prevent cancel requests from
  319. * completing as long as a worker is running. we must also
  320. * increment scrubs_paused to prevent deadlocking on pause
  321. * requests used for transactions commits (as the worker uses a
  322. * transaction context). it is safe to regard the worker
  323. * as paused for all matters practical. effectively, we only
  324. * avoid cancellation requests from completing.
  325. */
  326. mutex_lock(&fs_info->scrub_lock);
  327. atomic_inc(&fs_info->scrubs_running);
  328. atomic_inc(&fs_info->scrubs_paused);
  329. mutex_unlock(&fs_info->scrub_lock);
  330. /*
  331. * check if @scrubs_running=@scrubs_paused condition
  332. * inside wait_event() is not an atomic operation.
  333. * which means we may inc/dec @scrub_running/paused
  334. * at any time. Let's wake up @scrub_pause_wait as
  335. * much as we can to let commit transaction blocked less.
  336. */
  337. wake_up(&fs_info->scrub_pause_wait);
  338. atomic_inc(&sctx->workers_pending);
  339. }
  340. /* used for workers that require transaction commits */
  341. static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
  342. {
  343. struct btrfs_fs_info *fs_info = sctx->fs_info;
  344. /*
  345. * see scrub_pending_trans_workers_inc() why we're pretending
  346. * to be paused in the scrub counters
  347. */
  348. mutex_lock(&fs_info->scrub_lock);
  349. atomic_dec(&fs_info->scrubs_running);
  350. atomic_dec(&fs_info->scrubs_paused);
  351. mutex_unlock(&fs_info->scrub_lock);
  352. atomic_dec(&sctx->workers_pending);
  353. wake_up(&fs_info->scrub_pause_wait);
  354. wake_up(&sctx->list_wait);
  355. scrub_put_ctx(sctx);
  356. }
  357. static void scrub_free_csums(struct scrub_ctx *sctx)
  358. {
  359. while (!list_empty(&sctx->csum_list)) {
  360. struct btrfs_ordered_sum *sum;
  361. sum = list_first_entry(&sctx->csum_list,
  362. struct btrfs_ordered_sum, list);
  363. list_del(&sum->list);
  364. kfree(sum);
  365. }
  366. }
  367. static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
  368. {
  369. int i;
  370. if (!sctx)
  371. return;
  372. scrub_free_wr_ctx(&sctx->wr_ctx);
  373. /* this can happen when scrub is cancelled */
  374. if (sctx->curr != -1) {
  375. struct scrub_bio *sbio = sctx->bios[sctx->curr];
  376. for (i = 0; i < sbio->page_count; i++) {
  377. WARN_ON(!sbio->pagev[i]->page);
  378. scrub_block_put(sbio->pagev[i]->sblock);
  379. }
  380. bio_put(sbio->bio);
  381. }
  382. for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
  383. struct scrub_bio *sbio = sctx->bios[i];
  384. if (!sbio)
  385. break;
  386. kfree(sbio);
  387. }
  388. scrub_free_csums(sctx);
  389. kfree(sctx);
  390. }
  391. static void scrub_put_ctx(struct scrub_ctx *sctx)
  392. {
  393. if (refcount_dec_and_test(&sctx->refs))
  394. scrub_free_ctx(sctx);
  395. }
  396. static noinline_for_stack
  397. struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
  398. {
  399. struct scrub_ctx *sctx;
  400. int i;
  401. struct btrfs_fs_info *fs_info = dev->fs_info;
  402. int ret;
  403. sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
  404. if (!sctx)
  405. goto nomem;
  406. refcount_set(&sctx->refs, 1);
  407. sctx->is_dev_replace = is_dev_replace;
  408. sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
  409. sctx->curr = -1;
  410. sctx->fs_info = dev->fs_info;
  411. for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
  412. struct scrub_bio *sbio;
  413. sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
  414. if (!sbio)
  415. goto nomem;
  416. sctx->bios[i] = sbio;
  417. sbio->index = i;
  418. sbio->sctx = sctx;
  419. sbio->page_count = 0;
  420. btrfs_init_work(&sbio->work, btrfs_scrub_helper,
  421. scrub_bio_end_io_worker, NULL, NULL);
  422. if (i != SCRUB_BIOS_PER_SCTX - 1)
  423. sctx->bios[i]->next_free = i + 1;
  424. else
  425. sctx->bios[i]->next_free = -1;
  426. }
  427. sctx->first_free = 0;
  428. sctx->nodesize = fs_info->nodesize;
  429. sctx->sectorsize = fs_info->sectorsize;
  430. atomic_set(&sctx->bios_in_flight, 0);
  431. atomic_set(&sctx->workers_pending, 0);
  432. atomic_set(&sctx->cancel_req, 0);
  433. sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
  434. INIT_LIST_HEAD(&sctx->csum_list);
  435. spin_lock_init(&sctx->list_lock);
  436. spin_lock_init(&sctx->stat_lock);
  437. init_waitqueue_head(&sctx->list_wait);
  438. ret = scrub_setup_wr_ctx(&sctx->wr_ctx,
  439. fs_info->dev_replace.tgtdev, is_dev_replace);
  440. if (ret) {
  441. scrub_free_ctx(sctx);
  442. return ERR_PTR(ret);
  443. }
  444. return sctx;
  445. nomem:
  446. scrub_free_ctx(sctx);
  447. return ERR_PTR(-ENOMEM);
  448. }
  449. static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
  450. void *warn_ctx)
  451. {
  452. u64 isize;
  453. u32 nlink;
  454. int ret;
  455. int i;
  456. struct extent_buffer *eb;
  457. struct btrfs_inode_item *inode_item;
  458. struct scrub_warning *swarn = warn_ctx;
  459. struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
  460. struct inode_fs_paths *ipath = NULL;
  461. struct btrfs_root *local_root;
  462. struct btrfs_key root_key;
  463. struct btrfs_key key;
  464. root_key.objectid = root;
  465. root_key.type = BTRFS_ROOT_ITEM_KEY;
  466. root_key.offset = (u64)-1;
  467. local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
  468. if (IS_ERR(local_root)) {
  469. ret = PTR_ERR(local_root);
  470. goto err;
  471. }
  472. /*
  473. * this makes the path point to (inum INODE_ITEM ioff)
  474. */
  475. key.objectid = inum;
  476. key.type = BTRFS_INODE_ITEM_KEY;
  477. key.offset = 0;
  478. ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
  479. if (ret) {
  480. btrfs_release_path(swarn->path);
  481. goto err;
  482. }
  483. eb = swarn->path->nodes[0];
  484. inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
  485. struct btrfs_inode_item);
  486. isize = btrfs_inode_size(eb, inode_item);
  487. nlink = btrfs_inode_nlink(eb, inode_item);
  488. btrfs_release_path(swarn->path);
  489. ipath = init_ipath(4096, local_root, swarn->path);
  490. if (IS_ERR(ipath)) {
  491. ret = PTR_ERR(ipath);
  492. ipath = NULL;
  493. goto err;
  494. }
  495. ret = paths_from_inode(inum, ipath);
  496. if (ret < 0)
  497. goto err;
  498. /*
  499. * we deliberately ignore the bit ipath might have been too small to
  500. * hold all of the paths here
  501. */
  502. for (i = 0; i < ipath->fspath->elem_cnt; ++i)
  503. btrfs_warn_in_rcu(fs_info,
  504. "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
  505. swarn->errstr, swarn->logical,
  506. rcu_str_deref(swarn->dev->name),
  507. (unsigned long long)swarn->sector,
  508. root, inum, offset,
  509. min(isize - offset, (u64)PAGE_SIZE), nlink,
  510. (char *)(unsigned long)ipath->fspath->val[i]);
  511. free_ipath(ipath);
  512. return 0;
  513. err:
  514. btrfs_warn_in_rcu(fs_info,
  515. "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
  516. swarn->errstr, swarn->logical,
  517. rcu_str_deref(swarn->dev->name),
  518. (unsigned long long)swarn->sector,
  519. root, inum, offset, ret);
  520. free_ipath(ipath);
  521. return 0;
  522. }
  523. static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
  524. {
  525. struct btrfs_device *dev;
  526. struct btrfs_fs_info *fs_info;
  527. struct btrfs_path *path;
  528. struct btrfs_key found_key;
  529. struct extent_buffer *eb;
  530. struct btrfs_extent_item *ei;
  531. struct scrub_warning swarn;
  532. unsigned long ptr = 0;
  533. u64 extent_item_pos;
  534. u64 flags = 0;
  535. u64 ref_root;
  536. u32 item_size;
  537. u8 ref_level = 0;
  538. int ret;
  539. WARN_ON(sblock->page_count < 1);
  540. dev = sblock->pagev[0]->dev;
  541. fs_info = sblock->sctx->fs_info;
  542. path = btrfs_alloc_path();
  543. if (!path)
  544. return;
  545. swarn.sector = (sblock->pagev[0]->physical) >> 9;
  546. swarn.logical = sblock->pagev[0]->logical;
  547. swarn.errstr = errstr;
  548. swarn.dev = NULL;
  549. ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
  550. &flags);
  551. if (ret < 0)
  552. goto out;
  553. extent_item_pos = swarn.logical - found_key.objectid;
  554. swarn.extent_item_size = found_key.offset;
  555. eb = path->nodes[0];
  556. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  557. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  558. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  559. do {
  560. ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
  561. item_size, &ref_root,
  562. &ref_level);
  563. btrfs_warn_in_rcu(fs_info,
  564. "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
  565. errstr, swarn.logical,
  566. rcu_str_deref(dev->name),
  567. (unsigned long long)swarn.sector,
  568. ref_level ? "node" : "leaf",
  569. ret < 0 ? -1 : ref_level,
  570. ret < 0 ? -1 : ref_root);
  571. } while (ret != 1);
  572. btrfs_release_path(path);
  573. } else {
  574. btrfs_release_path(path);
  575. swarn.path = path;
  576. swarn.dev = dev;
  577. iterate_extent_inodes(fs_info, found_key.objectid,
  578. extent_item_pos, 1,
  579. scrub_print_warning_inode, &swarn);
  580. }
  581. out:
  582. btrfs_free_path(path);
  583. }
  584. static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
  585. {
  586. struct page *page = NULL;
  587. unsigned long index;
  588. struct scrub_fixup_nodatasum *fixup = fixup_ctx;
  589. int ret;
  590. int corrected = 0;
  591. struct btrfs_key key;
  592. struct inode *inode = NULL;
  593. struct btrfs_fs_info *fs_info;
  594. u64 end = offset + PAGE_SIZE - 1;
  595. struct btrfs_root *local_root;
  596. int srcu_index;
  597. key.objectid = root;
  598. key.type = BTRFS_ROOT_ITEM_KEY;
  599. key.offset = (u64)-1;
  600. fs_info = fixup->root->fs_info;
  601. srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
  602. local_root = btrfs_read_fs_root_no_name(fs_info, &key);
  603. if (IS_ERR(local_root)) {
  604. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  605. return PTR_ERR(local_root);
  606. }
  607. key.type = BTRFS_INODE_ITEM_KEY;
  608. key.objectid = inum;
  609. key.offset = 0;
  610. inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
  611. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  612. if (IS_ERR(inode))
  613. return PTR_ERR(inode);
  614. index = offset >> PAGE_SHIFT;
  615. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  616. if (!page) {
  617. ret = -ENOMEM;
  618. goto out;
  619. }
  620. if (PageUptodate(page)) {
  621. if (PageDirty(page)) {
  622. /*
  623. * we need to write the data to the defect sector. the
  624. * data that was in that sector is not in memory,
  625. * because the page was modified. we must not write the
  626. * modified page to that sector.
  627. *
  628. * TODO: what could be done here: wait for the delalloc
  629. * runner to write out that page (might involve
  630. * COW) and see whether the sector is still
  631. * referenced afterwards.
  632. *
  633. * For the meantime, we'll treat this error
  634. * incorrectable, although there is a chance that a
  635. * later scrub will find the bad sector again and that
  636. * there's no dirty page in memory, then.
  637. */
  638. ret = -EIO;
  639. goto out;
  640. }
  641. ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE,
  642. fixup->logical, page,
  643. offset - page_offset(page),
  644. fixup->mirror_num);
  645. unlock_page(page);
  646. corrected = !ret;
  647. } else {
  648. /*
  649. * we need to get good data first. the general readpage path
  650. * will call repair_io_failure for us, we just have to make
  651. * sure we read the bad mirror.
  652. */
  653. ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
  654. EXTENT_DAMAGED);
  655. if (ret) {
  656. /* set_extent_bits should give proper error */
  657. WARN_ON(ret > 0);
  658. if (ret > 0)
  659. ret = -EFAULT;
  660. goto out;
  661. }
  662. ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
  663. btrfs_get_extent,
  664. fixup->mirror_num);
  665. wait_on_page_locked(page);
  666. corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
  667. end, EXTENT_DAMAGED, 0, NULL);
  668. if (!corrected)
  669. clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
  670. EXTENT_DAMAGED);
  671. }
  672. out:
  673. if (page)
  674. put_page(page);
  675. iput(inode);
  676. if (ret < 0)
  677. return ret;
  678. if (ret == 0 && corrected) {
  679. /*
  680. * we only need to call readpage for one of the inodes belonging
  681. * to this extent. so make iterate_extent_inodes stop
  682. */
  683. return 1;
  684. }
  685. return -EIO;
  686. }
  687. static void scrub_fixup_nodatasum(struct btrfs_work *work)
  688. {
  689. struct btrfs_fs_info *fs_info;
  690. int ret;
  691. struct scrub_fixup_nodatasum *fixup;
  692. struct scrub_ctx *sctx;
  693. struct btrfs_trans_handle *trans = NULL;
  694. struct btrfs_path *path;
  695. int uncorrectable = 0;
  696. fixup = container_of(work, struct scrub_fixup_nodatasum, work);
  697. sctx = fixup->sctx;
  698. fs_info = fixup->root->fs_info;
  699. path = btrfs_alloc_path();
  700. if (!path) {
  701. spin_lock(&sctx->stat_lock);
  702. ++sctx->stat.malloc_errors;
  703. spin_unlock(&sctx->stat_lock);
  704. uncorrectable = 1;
  705. goto out;
  706. }
  707. trans = btrfs_join_transaction(fixup->root);
  708. if (IS_ERR(trans)) {
  709. uncorrectable = 1;
  710. goto out;
  711. }
  712. /*
  713. * the idea is to trigger a regular read through the standard path. we
  714. * read a page from the (failed) logical address by specifying the
  715. * corresponding copynum of the failed sector. thus, that readpage is
  716. * expected to fail.
  717. * that is the point where on-the-fly error correction will kick in
  718. * (once it's finished) and rewrite the failed sector if a good copy
  719. * can be found.
  720. */
  721. ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
  722. scrub_fixup_readpage, fixup);
  723. if (ret < 0) {
  724. uncorrectable = 1;
  725. goto out;
  726. }
  727. WARN_ON(ret != 1);
  728. spin_lock(&sctx->stat_lock);
  729. ++sctx->stat.corrected_errors;
  730. spin_unlock(&sctx->stat_lock);
  731. out:
  732. if (trans && !IS_ERR(trans))
  733. btrfs_end_transaction(trans);
  734. if (uncorrectable) {
  735. spin_lock(&sctx->stat_lock);
  736. ++sctx->stat.uncorrectable_errors;
  737. spin_unlock(&sctx->stat_lock);
  738. btrfs_dev_replace_stats_inc(
  739. &fs_info->dev_replace.num_uncorrectable_read_errors);
  740. btrfs_err_rl_in_rcu(fs_info,
  741. "unable to fixup (nodatasum) error at logical %llu on dev %s",
  742. fixup->logical, rcu_str_deref(fixup->dev->name));
  743. }
  744. btrfs_free_path(path);
  745. kfree(fixup);
  746. scrub_pending_trans_workers_dec(sctx);
  747. }
  748. static inline void scrub_get_recover(struct scrub_recover *recover)
  749. {
  750. refcount_inc(&recover->refs);
  751. }
  752. static inline void scrub_put_recover(struct scrub_recover *recover)
  753. {
  754. if (refcount_dec_and_test(&recover->refs)) {
  755. btrfs_put_bbio(recover->bbio);
  756. kfree(recover);
  757. }
  758. }
  759. /*
  760. * scrub_handle_errored_block gets called when either verification of the
  761. * pages failed or the bio failed to read, e.g. with EIO. In the latter
  762. * case, this function handles all pages in the bio, even though only one
  763. * may be bad.
  764. * The goal of this function is to repair the errored block by using the
  765. * contents of one of the mirrors.
  766. */
  767. static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
  768. {
  769. struct scrub_ctx *sctx = sblock_to_check->sctx;
  770. struct btrfs_device *dev;
  771. struct btrfs_fs_info *fs_info;
  772. u64 length;
  773. u64 logical;
  774. unsigned int failed_mirror_index;
  775. unsigned int is_metadata;
  776. unsigned int have_csum;
  777. struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
  778. struct scrub_block *sblock_bad;
  779. int ret;
  780. int mirror_index;
  781. int page_num;
  782. int success;
  783. static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
  784. DEFAULT_RATELIMIT_BURST);
  785. BUG_ON(sblock_to_check->page_count < 1);
  786. fs_info = sctx->fs_info;
  787. if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
  788. /*
  789. * if we find an error in a super block, we just report it.
  790. * They will get written with the next transaction commit
  791. * anyway
  792. */
  793. spin_lock(&sctx->stat_lock);
  794. ++sctx->stat.super_errors;
  795. spin_unlock(&sctx->stat_lock);
  796. return 0;
  797. }
  798. length = sblock_to_check->page_count * PAGE_SIZE;
  799. logical = sblock_to_check->pagev[0]->logical;
  800. BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
  801. failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
  802. is_metadata = !(sblock_to_check->pagev[0]->flags &
  803. BTRFS_EXTENT_FLAG_DATA);
  804. have_csum = sblock_to_check->pagev[0]->have_csum;
  805. dev = sblock_to_check->pagev[0]->dev;
  806. if (sctx->is_dev_replace && !is_metadata && !have_csum) {
  807. sblocks_for_recheck = NULL;
  808. goto nodatasum_case;
  809. }
  810. /*
  811. * read all mirrors one after the other. This includes to
  812. * re-read the extent or metadata block that failed (that was
  813. * the cause that this fixup code is called) another time,
  814. * page by page this time in order to know which pages
  815. * caused I/O errors and which ones are good (for all mirrors).
  816. * It is the goal to handle the situation when more than one
  817. * mirror contains I/O errors, but the errors do not
  818. * overlap, i.e. the data can be repaired by selecting the
  819. * pages from those mirrors without I/O error on the
  820. * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
  821. * would be that mirror #1 has an I/O error on the first page,
  822. * the second page is good, and mirror #2 has an I/O error on
  823. * the second page, but the first page is good.
  824. * Then the first page of the first mirror can be repaired by
  825. * taking the first page of the second mirror, and the
  826. * second page of the second mirror can be repaired by
  827. * copying the contents of the 2nd page of the 1st mirror.
  828. * One more note: if the pages of one mirror contain I/O
  829. * errors, the checksum cannot be verified. In order to get
  830. * the best data for repairing, the first attempt is to find
  831. * a mirror without I/O errors and with a validated checksum.
  832. * Only if this is not possible, the pages are picked from
  833. * mirrors with I/O errors without considering the checksum.
  834. * If the latter is the case, at the end, the checksum of the
  835. * repaired area is verified in order to correctly maintain
  836. * the statistics.
  837. */
  838. sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
  839. sizeof(*sblocks_for_recheck), GFP_NOFS);
  840. if (!sblocks_for_recheck) {
  841. spin_lock(&sctx->stat_lock);
  842. sctx->stat.malloc_errors++;
  843. sctx->stat.read_errors++;
  844. sctx->stat.uncorrectable_errors++;
  845. spin_unlock(&sctx->stat_lock);
  846. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
  847. goto out;
  848. }
  849. /* setup the context, map the logical blocks and alloc the pages */
  850. ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
  851. if (ret) {
  852. spin_lock(&sctx->stat_lock);
  853. sctx->stat.read_errors++;
  854. sctx->stat.uncorrectable_errors++;
  855. spin_unlock(&sctx->stat_lock);
  856. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
  857. goto out;
  858. }
  859. BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
  860. sblock_bad = sblocks_for_recheck + failed_mirror_index;
  861. /* build and submit the bios for the failed mirror, check checksums */
  862. scrub_recheck_block(fs_info, sblock_bad, 1);
  863. if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
  864. sblock_bad->no_io_error_seen) {
  865. /*
  866. * the error disappeared after reading page by page, or
  867. * the area was part of a huge bio and other parts of the
  868. * bio caused I/O errors, or the block layer merged several
  869. * read requests into one and the error is caused by a
  870. * different bio (usually one of the two latter cases is
  871. * the cause)
  872. */
  873. spin_lock(&sctx->stat_lock);
  874. sctx->stat.unverified_errors++;
  875. sblock_to_check->data_corrected = 1;
  876. spin_unlock(&sctx->stat_lock);
  877. if (sctx->is_dev_replace)
  878. scrub_write_block_to_dev_replace(sblock_bad);
  879. goto out;
  880. }
  881. if (!sblock_bad->no_io_error_seen) {
  882. spin_lock(&sctx->stat_lock);
  883. sctx->stat.read_errors++;
  884. spin_unlock(&sctx->stat_lock);
  885. if (__ratelimit(&_rs))
  886. scrub_print_warning("i/o error", sblock_to_check);
  887. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
  888. } else if (sblock_bad->checksum_error) {
  889. spin_lock(&sctx->stat_lock);
  890. sctx->stat.csum_errors++;
  891. spin_unlock(&sctx->stat_lock);
  892. if (__ratelimit(&_rs))
  893. scrub_print_warning("checksum error", sblock_to_check);
  894. btrfs_dev_stat_inc_and_print(dev,
  895. BTRFS_DEV_STAT_CORRUPTION_ERRS);
  896. } else if (sblock_bad->header_error) {
  897. spin_lock(&sctx->stat_lock);
  898. sctx->stat.verify_errors++;
  899. spin_unlock(&sctx->stat_lock);
  900. if (__ratelimit(&_rs))
  901. scrub_print_warning("checksum/header error",
  902. sblock_to_check);
  903. if (sblock_bad->generation_error)
  904. btrfs_dev_stat_inc_and_print(dev,
  905. BTRFS_DEV_STAT_GENERATION_ERRS);
  906. else
  907. btrfs_dev_stat_inc_and_print(dev,
  908. BTRFS_DEV_STAT_CORRUPTION_ERRS);
  909. }
  910. if (sctx->readonly) {
  911. ASSERT(!sctx->is_dev_replace);
  912. goto out;
  913. }
  914. if (!is_metadata && !have_csum) {
  915. struct scrub_fixup_nodatasum *fixup_nodatasum;
  916. WARN_ON(sctx->is_dev_replace);
  917. nodatasum_case:
  918. /*
  919. * !is_metadata and !have_csum, this means that the data
  920. * might not be COWed, that it might be modified
  921. * concurrently. The general strategy to work on the
  922. * commit root does not help in the case when COW is not
  923. * used.
  924. */
  925. fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
  926. if (!fixup_nodatasum)
  927. goto did_not_correct_error;
  928. fixup_nodatasum->sctx = sctx;
  929. fixup_nodatasum->dev = dev;
  930. fixup_nodatasum->logical = logical;
  931. fixup_nodatasum->root = fs_info->extent_root;
  932. fixup_nodatasum->mirror_num = failed_mirror_index + 1;
  933. scrub_pending_trans_workers_inc(sctx);
  934. btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
  935. scrub_fixup_nodatasum, NULL, NULL);
  936. btrfs_queue_work(fs_info->scrub_workers,
  937. &fixup_nodatasum->work);
  938. goto out;
  939. }
  940. /*
  941. * now build and submit the bios for the other mirrors, check
  942. * checksums.
  943. * First try to pick the mirror which is completely without I/O
  944. * errors and also does not have a checksum error.
  945. * If one is found, and if a checksum is present, the full block
  946. * that is known to contain an error is rewritten. Afterwards
  947. * the block is known to be corrected.
  948. * If a mirror is found which is completely correct, and no
  949. * checksum is present, only those pages are rewritten that had
  950. * an I/O error in the block to be repaired, since it cannot be
  951. * determined, which copy of the other pages is better (and it
  952. * could happen otherwise that a correct page would be
  953. * overwritten by a bad one).
  954. */
  955. for (mirror_index = 0;
  956. mirror_index < BTRFS_MAX_MIRRORS &&
  957. sblocks_for_recheck[mirror_index].page_count > 0;
  958. mirror_index++) {
  959. struct scrub_block *sblock_other;
  960. if (mirror_index == failed_mirror_index)
  961. continue;
  962. sblock_other = sblocks_for_recheck + mirror_index;
  963. /* build and submit the bios, check checksums */
  964. scrub_recheck_block(fs_info, sblock_other, 0);
  965. if (!sblock_other->header_error &&
  966. !sblock_other->checksum_error &&
  967. sblock_other->no_io_error_seen) {
  968. if (sctx->is_dev_replace) {
  969. scrub_write_block_to_dev_replace(sblock_other);
  970. goto corrected_error;
  971. } else {
  972. ret = scrub_repair_block_from_good_copy(
  973. sblock_bad, sblock_other);
  974. if (!ret)
  975. goto corrected_error;
  976. }
  977. }
  978. }
  979. if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
  980. goto did_not_correct_error;
  981. /*
  982. * In case of I/O errors in the area that is supposed to be
  983. * repaired, continue by picking good copies of those pages.
  984. * Select the good pages from mirrors to rewrite bad pages from
  985. * the area to fix. Afterwards verify the checksum of the block
  986. * that is supposed to be repaired. This verification step is
  987. * only done for the purpose of statistic counting and for the
  988. * final scrub report, whether errors remain.
  989. * A perfect algorithm could make use of the checksum and try
  990. * all possible combinations of pages from the different mirrors
  991. * until the checksum verification succeeds. For example, when
  992. * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
  993. * of mirror #2 is readable but the final checksum test fails,
  994. * then the 2nd page of mirror #3 could be tried, whether now
  995. * the final checksum succeeds. But this would be a rare
  996. * exception and is therefore not implemented. At least it is
  997. * avoided that the good copy is overwritten.
  998. * A more useful improvement would be to pick the sectors
  999. * without I/O error based on sector sizes (512 bytes on legacy
  1000. * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
  1001. * mirror could be repaired by taking 512 byte of a different
  1002. * mirror, even if other 512 byte sectors in the same PAGE_SIZE
  1003. * area are unreadable.
  1004. */
  1005. success = 1;
  1006. for (page_num = 0; page_num < sblock_bad->page_count;
  1007. page_num++) {
  1008. struct scrub_page *page_bad = sblock_bad->pagev[page_num];
  1009. struct scrub_block *sblock_other = NULL;
  1010. /* skip no-io-error page in scrub */
  1011. if (!page_bad->io_error && !sctx->is_dev_replace)
  1012. continue;
  1013. /* try to find no-io-error page in mirrors */
  1014. if (page_bad->io_error) {
  1015. for (mirror_index = 0;
  1016. mirror_index < BTRFS_MAX_MIRRORS &&
  1017. sblocks_for_recheck[mirror_index].page_count > 0;
  1018. mirror_index++) {
  1019. if (!sblocks_for_recheck[mirror_index].
  1020. pagev[page_num]->io_error) {
  1021. sblock_other = sblocks_for_recheck +
  1022. mirror_index;
  1023. break;
  1024. }
  1025. }
  1026. if (!sblock_other)
  1027. success = 0;
  1028. }
  1029. if (sctx->is_dev_replace) {
  1030. /*
  1031. * did not find a mirror to fetch the page
  1032. * from. scrub_write_page_to_dev_replace()
  1033. * handles this case (page->io_error), by
  1034. * filling the block with zeros before
  1035. * submitting the write request
  1036. */
  1037. if (!sblock_other)
  1038. sblock_other = sblock_bad;
  1039. if (scrub_write_page_to_dev_replace(sblock_other,
  1040. page_num) != 0) {
  1041. btrfs_dev_replace_stats_inc(
  1042. &fs_info->dev_replace.num_write_errors);
  1043. success = 0;
  1044. }
  1045. } else if (sblock_other) {
  1046. ret = scrub_repair_page_from_good_copy(sblock_bad,
  1047. sblock_other,
  1048. page_num, 0);
  1049. if (0 == ret)
  1050. page_bad->io_error = 0;
  1051. else
  1052. success = 0;
  1053. }
  1054. }
  1055. if (success && !sctx->is_dev_replace) {
  1056. if (is_metadata || have_csum) {
  1057. /*
  1058. * need to verify the checksum now that all
  1059. * sectors on disk are repaired (the write
  1060. * request for data to be repaired is on its way).
  1061. * Just be lazy and use scrub_recheck_block()
  1062. * which re-reads the data before the checksum
  1063. * is verified, but most likely the data comes out
  1064. * of the page cache.
  1065. */
  1066. scrub_recheck_block(fs_info, sblock_bad, 1);
  1067. if (!sblock_bad->header_error &&
  1068. !sblock_bad->checksum_error &&
  1069. sblock_bad->no_io_error_seen)
  1070. goto corrected_error;
  1071. else
  1072. goto did_not_correct_error;
  1073. } else {
  1074. corrected_error:
  1075. spin_lock(&sctx->stat_lock);
  1076. sctx->stat.corrected_errors++;
  1077. sblock_to_check->data_corrected = 1;
  1078. spin_unlock(&sctx->stat_lock);
  1079. btrfs_err_rl_in_rcu(fs_info,
  1080. "fixed up error at logical %llu on dev %s",
  1081. logical, rcu_str_deref(dev->name));
  1082. }
  1083. } else {
  1084. did_not_correct_error:
  1085. spin_lock(&sctx->stat_lock);
  1086. sctx->stat.uncorrectable_errors++;
  1087. spin_unlock(&sctx->stat_lock);
  1088. btrfs_err_rl_in_rcu(fs_info,
  1089. "unable to fixup (regular) error at logical %llu on dev %s",
  1090. logical, rcu_str_deref(dev->name));
  1091. }
  1092. out:
  1093. if (sblocks_for_recheck) {
  1094. for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
  1095. mirror_index++) {
  1096. struct scrub_block *sblock = sblocks_for_recheck +
  1097. mirror_index;
  1098. struct scrub_recover *recover;
  1099. int page_index;
  1100. for (page_index = 0; page_index < sblock->page_count;
  1101. page_index++) {
  1102. sblock->pagev[page_index]->sblock = NULL;
  1103. recover = sblock->pagev[page_index]->recover;
  1104. if (recover) {
  1105. scrub_put_recover(recover);
  1106. sblock->pagev[page_index]->recover =
  1107. NULL;
  1108. }
  1109. scrub_page_put(sblock->pagev[page_index]);
  1110. }
  1111. }
  1112. kfree(sblocks_for_recheck);
  1113. }
  1114. return 0;
  1115. }
  1116. static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
  1117. {
  1118. if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
  1119. return 2;
  1120. else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
  1121. return 3;
  1122. else
  1123. return (int)bbio->num_stripes;
  1124. }
  1125. static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
  1126. u64 *raid_map,
  1127. u64 mapped_length,
  1128. int nstripes, int mirror,
  1129. int *stripe_index,
  1130. u64 *stripe_offset)
  1131. {
  1132. int i;
  1133. if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  1134. /* RAID5/6 */
  1135. for (i = 0; i < nstripes; i++) {
  1136. if (raid_map[i] == RAID6_Q_STRIPE ||
  1137. raid_map[i] == RAID5_P_STRIPE)
  1138. continue;
  1139. if (logical >= raid_map[i] &&
  1140. logical < raid_map[i] + mapped_length)
  1141. break;
  1142. }
  1143. *stripe_index = i;
  1144. *stripe_offset = logical - raid_map[i];
  1145. } else {
  1146. /* The other RAID type */
  1147. *stripe_index = mirror;
  1148. *stripe_offset = 0;
  1149. }
  1150. }
  1151. static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
  1152. struct scrub_block *sblocks_for_recheck)
  1153. {
  1154. struct scrub_ctx *sctx = original_sblock->sctx;
  1155. struct btrfs_fs_info *fs_info = sctx->fs_info;
  1156. u64 length = original_sblock->page_count * PAGE_SIZE;
  1157. u64 logical = original_sblock->pagev[0]->logical;
  1158. u64 generation = original_sblock->pagev[0]->generation;
  1159. u64 flags = original_sblock->pagev[0]->flags;
  1160. u64 have_csum = original_sblock->pagev[0]->have_csum;
  1161. struct scrub_recover *recover;
  1162. struct btrfs_bio *bbio;
  1163. u64 sublen;
  1164. u64 mapped_length;
  1165. u64 stripe_offset;
  1166. int stripe_index;
  1167. int page_index = 0;
  1168. int mirror_index;
  1169. int nmirrors;
  1170. int ret;
  1171. /*
  1172. * note: the two members refs and outstanding_pages
  1173. * are not used (and not set) in the blocks that are used for
  1174. * the recheck procedure
  1175. */
  1176. while (length > 0) {
  1177. sublen = min_t(u64, length, PAGE_SIZE);
  1178. mapped_length = sublen;
  1179. bbio = NULL;
  1180. /*
  1181. * with a length of PAGE_SIZE, each returned stripe
  1182. * represents one mirror
  1183. */
  1184. ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
  1185. logical, &mapped_length, &bbio);
  1186. if (ret || !bbio || mapped_length < sublen) {
  1187. btrfs_put_bbio(bbio);
  1188. return -EIO;
  1189. }
  1190. recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
  1191. if (!recover) {
  1192. btrfs_put_bbio(bbio);
  1193. return -ENOMEM;
  1194. }
  1195. refcount_set(&recover->refs, 1);
  1196. recover->bbio = bbio;
  1197. recover->map_length = mapped_length;
  1198. BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
  1199. nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
  1200. for (mirror_index = 0; mirror_index < nmirrors;
  1201. mirror_index++) {
  1202. struct scrub_block *sblock;
  1203. struct scrub_page *page;
  1204. sblock = sblocks_for_recheck + mirror_index;
  1205. sblock->sctx = sctx;
  1206. page = kzalloc(sizeof(*page), GFP_NOFS);
  1207. if (!page) {
  1208. leave_nomem:
  1209. spin_lock(&sctx->stat_lock);
  1210. sctx->stat.malloc_errors++;
  1211. spin_unlock(&sctx->stat_lock);
  1212. scrub_put_recover(recover);
  1213. return -ENOMEM;
  1214. }
  1215. scrub_page_get(page);
  1216. sblock->pagev[page_index] = page;
  1217. page->sblock = sblock;
  1218. page->flags = flags;
  1219. page->generation = generation;
  1220. page->logical = logical;
  1221. page->have_csum = have_csum;
  1222. if (have_csum)
  1223. memcpy(page->csum,
  1224. original_sblock->pagev[0]->csum,
  1225. sctx->csum_size);
  1226. scrub_stripe_index_and_offset(logical,
  1227. bbio->map_type,
  1228. bbio->raid_map,
  1229. mapped_length,
  1230. bbio->num_stripes -
  1231. bbio->num_tgtdevs,
  1232. mirror_index,
  1233. &stripe_index,
  1234. &stripe_offset);
  1235. page->physical = bbio->stripes[stripe_index].physical +
  1236. stripe_offset;
  1237. page->dev = bbio->stripes[stripe_index].dev;
  1238. BUG_ON(page_index >= original_sblock->page_count);
  1239. page->physical_for_dev_replace =
  1240. original_sblock->pagev[page_index]->
  1241. physical_for_dev_replace;
  1242. /* for missing devices, dev->bdev is NULL */
  1243. page->mirror_num = mirror_index + 1;
  1244. sblock->page_count++;
  1245. page->page = alloc_page(GFP_NOFS);
  1246. if (!page->page)
  1247. goto leave_nomem;
  1248. scrub_get_recover(recover);
  1249. page->recover = recover;
  1250. }
  1251. scrub_put_recover(recover);
  1252. length -= sublen;
  1253. logical += sublen;
  1254. page_index++;
  1255. }
  1256. return 0;
  1257. }
  1258. struct scrub_bio_ret {
  1259. struct completion event;
  1260. int error;
  1261. };
  1262. static void scrub_bio_wait_endio(struct bio *bio)
  1263. {
  1264. struct scrub_bio_ret *ret = bio->bi_private;
  1265. ret->error = bio->bi_error;
  1266. complete(&ret->event);
  1267. }
  1268. static inline int scrub_is_page_on_raid56(struct scrub_page *page)
  1269. {
  1270. return page->recover &&
  1271. (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
  1272. }
  1273. static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
  1274. struct bio *bio,
  1275. struct scrub_page *page)
  1276. {
  1277. struct scrub_bio_ret done;
  1278. int ret;
  1279. init_completion(&done.event);
  1280. done.error = 0;
  1281. bio->bi_iter.bi_sector = page->logical >> 9;
  1282. bio->bi_private = &done;
  1283. bio->bi_end_io = scrub_bio_wait_endio;
  1284. ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
  1285. page->recover->map_length,
  1286. page->mirror_num, 0);
  1287. if (ret)
  1288. return ret;
  1289. wait_for_completion(&done.event);
  1290. if (done.error)
  1291. return -EIO;
  1292. return 0;
  1293. }
  1294. /*
  1295. * this function will check the on disk data for checksum errors, header
  1296. * errors and read I/O errors. If any I/O errors happen, the exact pages
  1297. * which are errored are marked as being bad. The goal is to enable scrub
  1298. * to take those pages that are not errored from all the mirrors so that
  1299. * the pages that are errored in the just handled mirror can be repaired.
  1300. */
  1301. static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
  1302. struct scrub_block *sblock,
  1303. int retry_failed_mirror)
  1304. {
  1305. int page_num;
  1306. sblock->no_io_error_seen = 1;
  1307. for (page_num = 0; page_num < sblock->page_count; page_num++) {
  1308. struct bio *bio;
  1309. struct scrub_page *page = sblock->pagev[page_num];
  1310. if (page->dev->bdev == NULL) {
  1311. page->io_error = 1;
  1312. sblock->no_io_error_seen = 0;
  1313. continue;
  1314. }
  1315. WARN_ON(!page->page);
  1316. bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
  1317. if (!bio) {
  1318. page->io_error = 1;
  1319. sblock->no_io_error_seen = 0;
  1320. continue;
  1321. }
  1322. bio->bi_bdev = page->dev->bdev;
  1323. bio_add_page(bio, page->page, PAGE_SIZE, 0);
  1324. if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
  1325. if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
  1326. page->io_error = 1;
  1327. sblock->no_io_error_seen = 0;
  1328. }
  1329. } else {
  1330. bio->bi_iter.bi_sector = page->physical >> 9;
  1331. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  1332. if (btrfsic_submit_bio_wait(bio)) {
  1333. page->io_error = 1;
  1334. sblock->no_io_error_seen = 0;
  1335. }
  1336. }
  1337. bio_put(bio);
  1338. }
  1339. if (sblock->no_io_error_seen)
  1340. scrub_recheck_block_checksum(sblock);
  1341. }
  1342. static inline int scrub_check_fsid(u8 fsid[],
  1343. struct scrub_page *spage)
  1344. {
  1345. struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
  1346. int ret;
  1347. ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
  1348. return !ret;
  1349. }
  1350. static void scrub_recheck_block_checksum(struct scrub_block *sblock)
  1351. {
  1352. sblock->header_error = 0;
  1353. sblock->checksum_error = 0;
  1354. sblock->generation_error = 0;
  1355. if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
  1356. scrub_checksum_data(sblock);
  1357. else
  1358. scrub_checksum_tree_block(sblock);
  1359. }
  1360. static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
  1361. struct scrub_block *sblock_good)
  1362. {
  1363. int page_num;
  1364. int ret = 0;
  1365. for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
  1366. int ret_sub;
  1367. ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
  1368. sblock_good,
  1369. page_num, 1);
  1370. if (ret_sub)
  1371. ret = ret_sub;
  1372. }
  1373. return ret;
  1374. }
  1375. static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
  1376. struct scrub_block *sblock_good,
  1377. int page_num, int force_write)
  1378. {
  1379. struct scrub_page *page_bad = sblock_bad->pagev[page_num];
  1380. struct scrub_page *page_good = sblock_good->pagev[page_num];
  1381. struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
  1382. BUG_ON(page_bad->page == NULL);
  1383. BUG_ON(page_good->page == NULL);
  1384. if (force_write || sblock_bad->header_error ||
  1385. sblock_bad->checksum_error || page_bad->io_error) {
  1386. struct bio *bio;
  1387. int ret;
  1388. if (!page_bad->dev->bdev) {
  1389. btrfs_warn_rl(fs_info,
  1390. "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
  1391. return -EIO;
  1392. }
  1393. bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
  1394. if (!bio)
  1395. return -EIO;
  1396. bio->bi_bdev = page_bad->dev->bdev;
  1397. bio->bi_iter.bi_sector = page_bad->physical >> 9;
  1398. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  1399. ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
  1400. if (PAGE_SIZE != ret) {
  1401. bio_put(bio);
  1402. return -EIO;
  1403. }
  1404. if (btrfsic_submit_bio_wait(bio)) {
  1405. btrfs_dev_stat_inc_and_print(page_bad->dev,
  1406. BTRFS_DEV_STAT_WRITE_ERRS);
  1407. btrfs_dev_replace_stats_inc(
  1408. &fs_info->dev_replace.num_write_errors);
  1409. bio_put(bio);
  1410. return -EIO;
  1411. }
  1412. bio_put(bio);
  1413. }
  1414. return 0;
  1415. }
  1416. static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
  1417. {
  1418. struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
  1419. int page_num;
  1420. /*
  1421. * This block is used for the check of the parity on the source device,
  1422. * so the data needn't be written into the destination device.
  1423. */
  1424. if (sblock->sparity)
  1425. return;
  1426. for (page_num = 0; page_num < sblock->page_count; page_num++) {
  1427. int ret;
  1428. ret = scrub_write_page_to_dev_replace(sblock, page_num);
  1429. if (ret)
  1430. btrfs_dev_replace_stats_inc(
  1431. &fs_info->dev_replace.num_write_errors);
  1432. }
  1433. }
  1434. static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
  1435. int page_num)
  1436. {
  1437. struct scrub_page *spage = sblock->pagev[page_num];
  1438. BUG_ON(spage->page == NULL);
  1439. if (spage->io_error) {
  1440. void *mapped_buffer = kmap_atomic(spage->page);
  1441. memset(mapped_buffer, 0, PAGE_SIZE);
  1442. flush_dcache_page(spage->page);
  1443. kunmap_atomic(mapped_buffer);
  1444. }
  1445. return scrub_add_page_to_wr_bio(sblock->sctx, spage);
  1446. }
  1447. static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
  1448. struct scrub_page *spage)
  1449. {
  1450. struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
  1451. struct scrub_bio *sbio;
  1452. int ret;
  1453. mutex_lock(&wr_ctx->wr_lock);
  1454. again:
  1455. if (!wr_ctx->wr_curr_bio) {
  1456. wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
  1457. GFP_KERNEL);
  1458. if (!wr_ctx->wr_curr_bio) {
  1459. mutex_unlock(&wr_ctx->wr_lock);
  1460. return -ENOMEM;
  1461. }
  1462. wr_ctx->wr_curr_bio->sctx = sctx;
  1463. wr_ctx->wr_curr_bio->page_count = 0;
  1464. }
  1465. sbio = wr_ctx->wr_curr_bio;
  1466. if (sbio->page_count == 0) {
  1467. struct bio *bio;
  1468. sbio->physical = spage->physical_for_dev_replace;
  1469. sbio->logical = spage->logical;
  1470. sbio->dev = wr_ctx->tgtdev;
  1471. bio = sbio->bio;
  1472. if (!bio) {
  1473. bio = btrfs_io_bio_alloc(GFP_KERNEL,
  1474. wr_ctx->pages_per_wr_bio);
  1475. if (!bio) {
  1476. mutex_unlock(&wr_ctx->wr_lock);
  1477. return -ENOMEM;
  1478. }
  1479. sbio->bio = bio;
  1480. }
  1481. bio->bi_private = sbio;
  1482. bio->bi_end_io = scrub_wr_bio_end_io;
  1483. bio->bi_bdev = sbio->dev->bdev;
  1484. bio->bi_iter.bi_sector = sbio->physical >> 9;
  1485. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  1486. sbio->err = 0;
  1487. } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
  1488. spage->physical_for_dev_replace ||
  1489. sbio->logical + sbio->page_count * PAGE_SIZE !=
  1490. spage->logical) {
  1491. scrub_wr_submit(sctx);
  1492. goto again;
  1493. }
  1494. ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
  1495. if (ret != PAGE_SIZE) {
  1496. if (sbio->page_count < 1) {
  1497. bio_put(sbio->bio);
  1498. sbio->bio = NULL;
  1499. mutex_unlock(&wr_ctx->wr_lock);
  1500. return -EIO;
  1501. }
  1502. scrub_wr_submit(sctx);
  1503. goto again;
  1504. }
  1505. sbio->pagev[sbio->page_count] = spage;
  1506. scrub_page_get(spage);
  1507. sbio->page_count++;
  1508. if (sbio->page_count == wr_ctx->pages_per_wr_bio)
  1509. scrub_wr_submit(sctx);
  1510. mutex_unlock(&wr_ctx->wr_lock);
  1511. return 0;
  1512. }
  1513. static void scrub_wr_submit(struct scrub_ctx *sctx)
  1514. {
  1515. struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
  1516. struct scrub_bio *sbio;
  1517. if (!wr_ctx->wr_curr_bio)
  1518. return;
  1519. sbio = wr_ctx->wr_curr_bio;
  1520. wr_ctx->wr_curr_bio = NULL;
  1521. WARN_ON(!sbio->bio->bi_bdev);
  1522. scrub_pending_bio_inc(sctx);
  1523. /* process all writes in a single worker thread. Then the block layer
  1524. * orders the requests before sending them to the driver which
  1525. * doubled the write performance on spinning disks when measured
  1526. * with Linux 3.5 */
  1527. btrfsic_submit_bio(sbio->bio);
  1528. }
  1529. static void scrub_wr_bio_end_io(struct bio *bio)
  1530. {
  1531. struct scrub_bio *sbio = bio->bi_private;
  1532. struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
  1533. sbio->err = bio->bi_error;
  1534. sbio->bio = bio;
  1535. btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
  1536. scrub_wr_bio_end_io_worker, NULL, NULL);
  1537. btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
  1538. }
  1539. static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
  1540. {
  1541. struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
  1542. struct scrub_ctx *sctx = sbio->sctx;
  1543. int i;
  1544. WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
  1545. if (sbio->err) {
  1546. struct btrfs_dev_replace *dev_replace =
  1547. &sbio->sctx->fs_info->dev_replace;
  1548. for (i = 0; i < sbio->page_count; i++) {
  1549. struct scrub_page *spage = sbio->pagev[i];
  1550. spage->io_error = 1;
  1551. btrfs_dev_replace_stats_inc(&dev_replace->
  1552. num_write_errors);
  1553. }
  1554. }
  1555. for (i = 0; i < sbio->page_count; i++)
  1556. scrub_page_put(sbio->pagev[i]);
  1557. bio_put(sbio->bio);
  1558. kfree(sbio);
  1559. scrub_pending_bio_dec(sctx);
  1560. }
  1561. static int scrub_checksum(struct scrub_block *sblock)
  1562. {
  1563. u64 flags;
  1564. int ret;
  1565. /*
  1566. * No need to initialize these stats currently,
  1567. * because this function only use return value
  1568. * instead of these stats value.
  1569. *
  1570. * Todo:
  1571. * always use stats
  1572. */
  1573. sblock->header_error = 0;
  1574. sblock->generation_error = 0;
  1575. sblock->checksum_error = 0;
  1576. WARN_ON(sblock->page_count < 1);
  1577. flags = sblock->pagev[0]->flags;
  1578. ret = 0;
  1579. if (flags & BTRFS_EXTENT_FLAG_DATA)
  1580. ret = scrub_checksum_data(sblock);
  1581. else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  1582. ret = scrub_checksum_tree_block(sblock);
  1583. else if (flags & BTRFS_EXTENT_FLAG_SUPER)
  1584. (void)scrub_checksum_super(sblock);
  1585. else
  1586. WARN_ON(1);
  1587. if (ret)
  1588. scrub_handle_errored_block(sblock);
  1589. return ret;
  1590. }
  1591. static int scrub_checksum_data(struct scrub_block *sblock)
  1592. {
  1593. struct scrub_ctx *sctx = sblock->sctx;
  1594. u8 csum[BTRFS_CSUM_SIZE];
  1595. u8 *on_disk_csum;
  1596. struct page *page;
  1597. void *buffer;
  1598. u32 crc = ~(u32)0;
  1599. u64 len;
  1600. int index;
  1601. BUG_ON(sblock->page_count < 1);
  1602. if (!sblock->pagev[0]->have_csum)
  1603. return 0;
  1604. on_disk_csum = sblock->pagev[0]->csum;
  1605. page = sblock->pagev[0]->page;
  1606. buffer = kmap_atomic(page);
  1607. len = sctx->sectorsize;
  1608. index = 0;
  1609. for (;;) {
  1610. u64 l = min_t(u64, len, PAGE_SIZE);
  1611. crc = btrfs_csum_data(buffer, crc, l);
  1612. kunmap_atomic(buffer);
  1613. len -= l;
  1614. if (len == 0)
  1615. break;
  1616. index++;
  1617. BUG_ON(index >= sblock->page_count);
  1618. BUG_ON(!sblock->pagev[index]->page);
  1619. page = sblock->pagev[index]->page;
  1620. buffer = kmap_atomic(page);
  1621. }
  1622. btrfs_csum_final(crc, csum);
  1623. if (memcmp(csum, on_disk_csum, sctx->csum_size))
  1624. sblock->checksum_error = 1;
  1625. return sblock->checksum_error;
  1626. }
  1627. static int scrub_checksum_tree_block(struct scrub_block *sblock)
  1628. {
  1629. struct scrub_ctx *sctx = sblock->sctx;
  1630. struct btrfs_header *h;
  1631. struct btrfs_fs_info *fs_info = sctx->fs_info;
  1632. u8 calculated_csum[BTRFS_CSUM_SIZE];
  1633. u8 on_disk_csum[BTRFS_CSUM_SIZE];
  1634. struct page *page;
  1635. void *mapped_buffer;
  1636. u64 mapped_size;
  1637. void *p;
  1638. u32 crc = ~(u32)0;
  1639. u64 len;
  1640. int index;
  1641. BUG_ON(sblock->page_count < 1);
  1642. page = sblock->pagev[0]->page;
  1643. mapped_buffer = kmap_atomic(page);
  1644. h = (struct btrfs_header *)mapped_buffer;
  1645. memcpy(on_disk_csum, h->csum, sctx->csum_size);
  1646. /*
  1647. * we don't use the getter functions here, as we
  1648. * a) don't have an extent buffer and
  1649. * b) the page is already kmapped
  1650. */
  1651. if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
  1652. sblock->header_error = 1;
  1653. if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
  1654. sblock->header_error = 1;
  1655. sblock->generation_error = 1;
  1656. }
  1657. if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
  1658. sblock->header_error = 1;
  1659. if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
  1660. BTRFS_UUID_SIZE))
  1661. sblock->header_error = 1;
  1662. len = sctx->nodesize - BTRFS_CSUM_SIZE;
  1663. mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
  1664. p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
  1665. index = 0;
  1666. for (;;) {
  1667. u64 l = min_t(u64, len, mapped_size);
  1668. crc = btrfs_csum_data(p, crc, l);
  1669. kunmap_atomic(mapped_buffer);
  1670. len -= l;
  1671. if (len == 0)
  1672. break;
  1673. index++;
  1674. BUG_ON(index >= sblock->page_count);
  1675. BUG_ON(!sblock->pagev[index]->page);
  1676. page = sblock->pagev[index]->page;
  1677. mapped_buffer = kmap_atomic(page);
  1678. mapped_size = PAGE_SIZE;
  1679. p = mapped_buffer;
  1680. }
  1681. btrfs_csum_final(crc, calculated_csum);
  1682. if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
  1683. sblock->checksum_error = 1;
  1684. return sblock->header_error || sblock->checksum_error;
  1685. }
  1686. static int scrub_checksum_super(struct scrub_block *sblock)
  1687. {
  1688. struct btrfs_super_block *s;
  1689. struct scrub_ctx *sctx = sblock->sctx;
  1690. u8 calculated_csum[BTRFS_CSUM_SIZE];
  1691. u8 on_disk_csum[BTRFS_CSUM_SIZE];
  1692. struct page *page;
  1693. void *mapped_buffer;
  1694. u64 mapped_size;
  1695. void *p;
  1696. u32 crc = ~(u32)0;
  1697. int fail_gen = 0;
  1698. int fail_cor = 0;
  1699. u64 len;
  1700. int index;
  1701. BUG_ON(sblock->page_count < 1);
  1702. page = sblock->pagev[0]->page;
  1703. mapped_buffer = kmap_atomic(page);
  1704. s = (struct btrfs_super_block *)mapped_buffer;
  1705. memcpy(on_disk_csum, s->csum, sctx->csum_size);
  1706. if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
  1707. ++fail_cor;
  1708. if (sblock->pagev[0]->generation != btrfs_super_generation(s))
  1709. ++fail_gen;
  1710. if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
  1711. ++fail_cor;
  1712. len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
  1713. mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
  1714. p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
  1715. index = 0;
  1716. for (;;) {
  1717. u64 l = min_t(u64, len, mapped_size);
  1718. crc = btrfs_csum_data(p, crc, l);
  1719. kunmap_atomic(mapped_buffer);
  1720. len -= l;
  1721. if (len == 0)
  1722. break;
  1723. index++;
  1724. BUG_ON(index >= sblock->page_count);
  1725. BUG_ON(!sblock->pagev[index]->page);
  1726. page = sblock->pagev[index]->page;
  1727. mapped_buffer = kmap_atomic(page);
  1728. mapped_size = PAGE_SIZE;
  1729. p = mapped_buffer;
  1730. }
  1731. btrfs_csum_final(crc, calculated_csum);
  1732. if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
  1733. ++fail_cor;
  1734. if (fail_cor + fail_gen) {
  1735. /*
  1736. * if we find an error in a super block, we just report it.
  1737. * They will get written with the next transaction commit
  1738. * anyway
  1739. */
  1740. spin_lock(&sctx->stat_lock);
  1741. ++sctx->stat.super_errors;
  1742. spin_unlock(&sctx->stat_lock);
  1743. if (fail_cor)
  1744. btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
  1745. BTRFS_DEV_STAT_CORRUPTION_ERRS);
  1746. else
  1747. btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
  1748. BTRFS_DEV_STAT_GENERATION_ERRS);
  1749. }
  1750. return fail_cor + fail_gen;
  1751. }
  1752. static void scrub_block_get(struct scrub_block *sblock)
  1753. {
  1754. refcount_inc(&sblock->refs);
  1755. }
  1756. static void scrub_block_put(struct scrub_block *sblock)
  1757. {
  1758. if (refcount_dec_and_test(&sblock->refs)) {
  1759. int i;
  1760. if (sblock->sparity)
  1761. scrub_parity_put(sblock->sparity);
  1762. for (i = 0; i < sblock->page_count; i++)
  1763. scrub_page_put(sblock->pagev[i]);
  1764. kfree(sblock);
  1765. }
  1766. }
  1767. static void scrub_page_get(struct scrub_page *spage)
  1768. {
  1769. atomic_inc(&spage->refs);
  1770. }
  1771. static void scrub_page_put(struct scrub_page *spage)
  1772. {
  1773. if (atomic_dec_and_test(&spage->refs)) {
  1774. if (spage->page)
  1775. __free_page(spage->page);
  1776. kfree(spage);
  1777. }
  1778. }
  1779. static void scrub_submit(struct scrub_ctx *sctx)
  1780. {
  1781. struct scrub_bio *sbio;
  1782. if (sctx->curr == -1)
  1783. return;
  1784. sbio = sctx->bios[sctx->curr];
  1785. sctx->curr = -1;
  1786. scrub_pending_bio_inc(sctx);
  1787. btrfsic_submit_bio(sbio->bio);
  1788. }
  1789. static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
  1790. struct scrub_page *spage)
  1791. {
  1792. struct scrub_block *sblock = spage->sblock;
  1793. struct scrub_bio *sbio;
  1794. int ret;
  1795. again:
  1796. /*
  1797. * grab a fresh bio or wait for one to become available
  1798. */
  1799. while (sctx->curr == -1) {
  1800. spin_lock(&sctx->list_lock);
  1801. sctx->curr = sctx->first_free;
  1802. if (sctx->curr != -1) {
  1803. sctx->first_free = sctx->bios[sctx->curr]->next_free;
  1804. sctx->bios[sctx->curr]->next_free = -1;
  1805. sctx->bios[sctx->curr]->page_count = 0;
  1806. spin_unlock(&sctx->list_lock);
  1807. } else {
  1808. spin_unlock(&sctx->list_lock);
  1809. wait_event(sctx->list_wait, sctx->first_free != -1);
  1810. }
  1811. }
  1812. sbio = sctx->bios[sctx->curr];
  1813. if (sbio->page_count == 0) {
  1814. struct bio *bio;
  1815. sbio->physical = spage->physical;
  1816. sbio->logical = spage->logical;
  1817. sbio->dev = spage->dev;
  1818. bio = sbio->bio;
  1819. if (!bio) {
  1820. bio = btrfs_io_bio_alloc(GFP_KERNEL,
  1821. sctx->pages_per_rd_bio);
  1822. if (!bio)
  1823. return -ENOMEM;
  1824. sbio->bio = bio;
  1825. }
  1826. bio->bi_private = sbio;
  1827. bio->bi_end_io = scrub_bio_end_io;
  1828. bio->bi_bdev = sbio->dev->bdev;
  1829. bio->bi_iter.bi_sector = sbio->physical >> 9;
  1830. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  1831. sbio->err = 0;
  1832. } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
  1833. spage->physical ||
  1834. sbio->logical + sbio->page_count * PAGE_SIZE !=
  1835. spage->logical ||
  1836. sbio->dev != spage->dev) {
  1837. scrub_submit(sctx);
  1838. goto again;
  1839. }
  1840. sbio->pagev[sbio->page_count] = spage;
  1841. ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
  1842. if (ret != PAGE_SIZE) {
  1843. if (sbio->page_count < 1) {
  1844. bio_put(sbio->bio);
  1845. sbio->bio = NULL;
  1846. return -EIO;
  1847. }
  1848. scrub_submit(sctx);
  1849. goto again;
  1850. }
  1851. scrub_block_get(sblock); /* one for the page added to the bio */
  1852. atomic_inc(&sblock->outstanding_pages);
  1853. sbio->page_count++;
  1854. if (sbio->page_count == sctx->pages_per_rd_bio)
  1855. scrub_submit(sctx);
  1856. return 0;
  1857. }
  1858. static void scrub_missing_raid56_end_io(struct bio *bio)
  1859. {
  1860. struct scrub_block *sblock = bio->bi_private;
  1861. struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
  1862. if (bio->bi_error)
  1863. sblock->no_io_error_seen = 0;
  1864. bio_put(bio);
  1865. btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
  1866. }
  1867. static void scrub_missing_raid56_worker(struct btrfs_work *work)
  1868. {
  1869. struct scrub_block *sblock = container_of(work, struct scrub_block, work);
  1870. struct scrub_ctx *sctx = sblock->sctx;
  1871. struct btrfs_fs_info *fs_info = sctx->fs_info;
  1872. u64 logical;
  1873. struct btrfs_device *dev;
  1874. logical = sblock->pagev[0]->logical;
  1875. dev = sblock->pagev[0]->dev;
  1876. if (sblock->no_io_error_seen)
  1877. scrub_recheck_block_checksum(sblock);
  1878. if (!sblock->no_io_error_seen) {
  1879. spin_lock(&sctx->stat_lock);
  1880. sctx->stat.read_errors++;
  1881. spin_unlock(&sctx->stat_lock);
  1882. btrfs_err_rl_in_rcu(fs_info,
  1883. "IO error rebuilding logical %llu for dev %s",
  1884. logical, rcu_str_deref(dev->name));
  1885. } else if (sblock->header_error || sblock->checksum_error) {
  1886. spin_lock(&sctx->stat_lock);
  1887. sctx->stat.uncorrectable_errors++;
  1888. spin_unlock(&sctx->stat_lock);
  1889. btrfs_err_rl_in_rcu(fs_info,
  1890. "failed to rebuild valid logical %llu for dev %s",
  1891. logical, rcu_str_deref(dev->name));
  1892. } else {
  1893. scrub_write_block_to_dev_replace(sblock);
  1894. }
  1895. scrub_block_put(sblock);
  1896. if (sctx->is_dev_replace &&
  1897. atomic_read(&sctx->wr_ctx.flush_all_writes)) {
  1898. mutex_lock(&sctx->wr_ctx.wr_lock);
  1899. scrub_wr_submit(sctx);
  1900. mutex_unlock(&sctx->wr_ctx.wr_lock);
  1901. }
  1902. scrub_pending_bio_dec(sctx);
  1903. }
  1904. static void scrub_missing_raid56_pages(struct scrub_block *sblock)
  1905. {
  1906. struct scrub_ctx *sctx = sblock->sctx;
  1907. struct btrfs_fs_info *fs_info = sctx->fs_info;
  1908. u64 length = sblock->page_count * PAGE_SIZE;
  1909. u64 logical = sblock->pagev[0]->logical;
  1910. struct btrfs_bio *bbio = NULL;
  1911. struct bio *bio;
  1912. struct btrfs_raid_bio *rbio;
  1913. int ret;
  1914. int i;
  1915. btrfs_bio_counter_inc_blocked(fs_info);
  1916. ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
  1917. &length, &bbio);
  1918. if (ret || !bbio || !bbio->raid_map)
  1919. goto bbio_out;
  1920. if (WARN_ON(!sctx->is_dev_replace ||
  1921. !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
  1922. /*
  1923. * We shouldn't be scrubbing a missing device. Even for dev
  1924. * replace, we should only get here for RAID 5/6. We either
  1925. * managed to mount something with no mirrors remaining or
  1926. * there's a bug in scrub_remap_extent()/btrfs_map_block().
  1927. */
  1928. goto bbio_out;
  1929. }
  1930. bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
  1931. if (!bio)
  1932. goto bbio_out;
  1933. bio->bi_iter.bi_sector = logical >> 9;
  1934. bio->bi_private = sblock;
  1935. bio->bi_end_io = scrub_missing_raid56_end_io;
  1936. rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
  1937. if (!rbio)
  1938. goto rbio_out;
  1939. for (i = 0; i < sblock->page_count; i++) {
  1940. struct scrub_page *spage = sblock->pagev[i];
  1941. raid56_add_scrub_pages(rbio, spage->page, spage->logical);
  1942. }
  1943. btrfs_init_work(&sblock->work, btrfs_scrub_helper,
  1944. scrub_missing_raid56_worker, NULL, NULL);
  1945. scrub_block_get(sblock);
  1946. scrub_pending_bio_inc(sctx);
  1947. raid56_submit_missing_rbio(rbio);
  1948. return;
  1949. rbio_out:
  1950. bio_put(bio);
  1951. bbio_out:
  1952. btrfs_bio_counter_dec(fs_info);
  1953. btrfs_put_bbio(bbio);
  1954. spin_lock(&sctx->stat_lock);
  1955. sctx->stat.malloc_errors++;
  1956. spin_unlock(&sctx->stat_lock);
  1957. }
  1958. static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  1959. u64 physical, struct btrfs_device *dev, u64 flags,
  1960. u64 gen, int mirror_num, u8 *csum, int force,
  1961. u64 physical_for_dev_replace)
  1962. {
  1963. struct scrub_block *sblock;
  1964. int index;
  1965. sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
  1966. if (!sblock) {
  1967. spin_lock(&sctx->stat_lock);
  1968. sctx->stat.malloc_errors++;
  1969. spin_unlock(&sctx->stat_lock);
  1970. return -ENOMEM;
  1971. }
  1972. /* one ref inside this function, plus one for each page added to
  1973. * a bio later on */
  1974. refcount_set(&sblock->refs, 1);
  1975. sblock->sctx = sctx;
  1976. sblock->no_io_error_seen = 1;
  1977. for (index = 0; len > 0; index++) {
  1978. struct scrub_page *spage;
  1979. u64 l = min_t(u64, len, PAGE_SIZE);
  1980. spage = kzalloc(sizeof(*spage), GFP_KERNEL);
  1981. if (!spage) {
  1982. leave_nomem:
  1983. spin_lock(&sctx->stat_lock);
  1984. sctx->stat.malloc_errors++;
  1985. spin_unlock(&sctx->stat_lock);
  1986. scrub_block_put(sblock);
  1987. return -ENOMEM;
  1988. }
  1989. BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
  1990. scrub_page_get(spage);
  1991. sblock->pagev[index] = spage;
  1992. spage->sblock = sblock;
  1993. spage->dev = dev;
  1994. spage->flags = flags;
  1995. spage->generation = gen;
  1996. spage->logical = logical;
  1997. spage->physical = physical;
  1998. spage->physical_for_dev_replace = physical_for_dev_replace;
  1999. spage->mirror_num = mirror_num;
  2000. if (csum) {
  2001. spage->have_csum = 1;
  2002. memcpy(spage->csum, csum, sctx->csum_size);
  2003. } else {
  2004. spage->have_csum = 0;
  2005. }
  2006. sblock->page_count++;
  2007. spage->page = alloc_page(GFP_KERNEL);
  2008. if (!spage->page)
  2009. goto leave_nomem;
  2010. len -= l;
  2011. logical += l;
  2012. physical += l;
  2013. physical_for_dev_replace += l;
  2014. }
  2015. WARN_ON(sblock->page_count == 0);
  2016. if (dev->missing) {
  2017. /*
  2018. * This case should only be hit for RAID 5/6 device replace. See
  2019. * the comment in scrub_missing_raid56_pages() for details.
  2020. */
  2021. scrub_missing_raid56_pages(sblock);
  2022. } else {
  2023. for (index = 0; index < sblock->page_count; index++) {
  2024. struct scrub_page *spage = sblock->pagev[index];
  2025. int ret;
  2026. ret = scrub_add_page_to_rd_bio(sctx, spage);
  2027. if (ret) {
  2028. scrub_block_put(sblock);
  2029. return ret;
  2030. }
  2031. }
  2032. if (force)
  2033. scrub_submit(sctx);
  2034. }
  2035. /* last one frees, either here or in bio completion for last page */
  2036. scrub_block_put(sblock);
  2037. return 0;
  2038. }
  2039. static void scrub_bio_end_io(struct bio *bio)
  2040. {
  2041. struct scrub_bio *sbio = bio->bi_private;
  2042. struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
  2043. sbio->err = bio->bi_error;
  2044. sbio->bio = bio;
  2045. btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
  2046. }
  2047. static void scrub_bio_end_io_worker(struct btrfs_work *work)
  2048. {
  2049. struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
  2050. struct scrub_ctx *sctx = sbio->sctx;
  2051. int i;
  2052. BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
  2053. if (sbio->err) {
  2054. for (i = 0; i < sbio->page_count; i++) {
  2055. struct scrub_page *spage = sbio->pagev[i];
  2056. spage->io_error = 1;
  2057. spage->sblock->no_io_error_seen = 0;
  2058. }
  2059. }
  2060. /* now complete the scrub_block items that have all pages completed */
  2061. for (i = 0; i < sbio->page_count; i++) {
  2062. struct scrub_page *spage = sbio->pagev[i];
  2063. struct scrub_block *sblock = spage->sblock;
  2064. if (atomic_dec_and_test(&sblock->outstanding_pages))
  2065. scrub_block_complete(sblock);
  2066. scrub_block_put(sblock);
  2067. }
  2068. bio_put(sbio->bio);
  2069. sbio->bio = NULL;
  2070. spin_lock(&sctx->list_lock);
  2071. sbio->next_free = sctx->first_free;
  2072. sctx->first_free = sbio->index;
  2073. spin_unlock(&sctx->list_lock);
  2074. if (sctx->is_dev_replace &&
  2075. atomic_read(&sctx->wr_ctx.flush_all_writes)) {
  2076. mutex_lock(&sctx->wr_ctx.wr_lock);
  2077. scrub_wr_submit(sctx);
  2078. mutex_unlock(&sctx->wr_ctx.wr_lock);
  2079. }
  2080. scrub_pending_bio_dec(sctx);
  2081. }
  2082. static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
  2083. unsigned long *bitmap,
  2084. u64 start, u64 len)
  2085. {
  2086. u32 offset;
  2087. int nsectors;
  2088. int sectorsize = sparity->sctx->fs_info->sectorsize;
  2089. if (len >= sparity->stripe_len) {
  2090. bitmap_set(bitmap, 0, sparity->nsectors);
  2091. return;
  2092. }
  2093. start -= sparity->logic_start;
  2094. start = div_u64_rem(start, sparity->stripe_len, &offset);
  2095. offset /= sectorsize;
  2096. nsectors = (int)len / sectorsize;
  2097. if (offset + nsectors <= sparity->nsectors) {
  2098. bitmap_set(bitmap, offset, nsectors);
  2099. return;
  2100. }
  2101. bitmap_set(bitmap, offset, sparity->nsectors - offset);
  2102. bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
  2103. }
  2104. static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
  2105. u64 start, u64 len)
  2106. {
  2107. __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
  2108. }
  2109. static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
  2110. u64 start, u64 len)
  2111. {
  2112. __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
  2113. }
  2114. static void scrub_block_complete(struct scrub_block *sblock)
  2115. {
  2116. int corrupted = 0;
  2117. if (!sblock->no_io_error_seen) {
  2118. corrupted = 1;
  2119. scrub_handle_errored_block(sblock);
  2120. } else {
  2121. /*
  2122. * if has checksum error, write via repair mechanism in
  2123. * dev replace case, otherwise write here in dev replace
  2124. * case.
  2125. */
  2126. corrupted = scrub_checksum(sblock);
  2127. if (!corrupted && sblock->sctx->is_dev_replace)
  2128. scrub_write_block_to_dev_replace(sblock);
  2129. }
  2130. if (sblock->sparity && corrupted && !sblock->data_corrected) {
  2131. u64 start = sblock->pagev[0]->logical;
  2132. u64 end = sblock->pagev[sblock->page_count - 1]->logical +
  2133. PAGE_SIZE;
  2134. scrub_parity_mark_sectors_error(sblock->sparity,
  2135. start, end - start);
  2136. }
  2137. }
  2138. static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
  2139. {
  2140. struct btrfs_ordered_sum *sum = NULL;
  2141. unsigned long index;
  2142. unsigned long num_sectors;
  2143. while (!list_empty(&sctx->csum_list)) {
  2144. sum = list_first_entry(&sctx->csum_list,
  2145. struct btrfs_ordered_sum, list);
  2146. if (sum->bytenr > logical)
  2147. return 0;
  2148. if (sum->bytenr + sum->len > logical)
  2149. break;
  2150. ++sctx->stat.csum_discards;
  2151. list_del(&sum->list);
  2152. kfree(sum);
  2153. sum = NULL;
  2154. }
  2155. if (!sum)
  2156. return 0;
  2157. index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
  2158. num_sectors = sum->len / sctx->sectorsize;
  2159. memcpy(csum, sum->sums + index, sctx->csum_size);
  2160. if (index == num_sectors - 1) {
  2161. list_del(&sum->list);
  2162. kfree(sum);
  2163. }
  2164. return 1;
  2165. }
  2166. /* scrub extent tries to collect up to 64 kB for each bio */
  2167. static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
  2168. u64 physical, struct btrfs_device *dev, u64 flags,
  2169. u64 gen, int mirror_num, u64 physical_for_dev_replace)
  2170. {
  2171. int ret;
  2172. u8 csum[BTRFS_CSUM_SIZE];
  2173. u32 blocksize;
  2174. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  2175. blocksize = sctx->sectorsize;
  2176. spin_lock(&sctx->stat_lock);
  2177. sctx->stat.data_extents_scrubbed++;
  2178. sctx->stat.data_bytes_scrubbed += len;
  2179. spin_unlock(&sctx->stat_lock);
  2180. } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  2181. blocksize = sctx->nodesize;
  2182. spin_lock(&sctx->stat_lock);
  2183. sctx->stat.tree_extents_scrubbed++;
  2184. sctx->stat.tree_bytes_scrubbed += len;
  2185. spin_unlock(&sctx->stat_lock);
  2186. } else {
  2187. blocksize = sctx->sectorsize;
  2188. WARN_ON(1);
  2189. }
  2190. while (len) {
  2191. u64 l = min_t(u64, len, blocksize);
  2192. int have_csum = 0;
  2193. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  2194. /* push csums to sbio */
  2195. have_csum = scrub_find_csum(sctx, logical, csum);
  2196. if (have_csum == 0)
  2197. ++sctx->stat.no_csum;
  2198. if (sctx->is_dev_replace && !have_csum) {
  2199. ret = copy_nocow_pages(sctx, logical, l,
  2200. mirror_num,
  2201. physical_for_dev_replace);
  2202. goto behind_scrub_pages;
  2203. }
  2204. }
  2205. ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
  2206. mirror_num, have_csum ? csum : NULL, 0,
  2207. physical_for_dev_replace);
  2208. behind_scrub_pages:
  2209. if (ret)
  2210. return ret;
  2211. len -= l;
  2212. logical += l;
  2213. physical += l;
  2214. physical_for_dev_replace += l;
  2215. }
  2216. return 0;
  2217. }
  2218. static int scrub_pages_for_parity(struct scrub_parity *sparity,
  2219. u64 logical, u64 len,
  2220. u64 physical, struct btrfs_device *dev,
  2221. u64 flags, u64 gen, int mirror_num, u8 *csum)
  2222. {
  2223. struct scrub_ctx *sctx = sparity->sctx;
  2224. struct scrub_block *sblock;
  2225. int index;
  2226. sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
  2227. if (!sblock) {
  2228. spin_lock(&sctx->stat_lock);
  2229. sctx->stat.malloc_errors++;
  2230. spin_unlock(&sctx->stat_lock);
  2231. return -ENOMEM;
  2232. }
  2233. /* one ref inside this function, plus one for each page added to
  2234. * a bio later on */
  2235. refcount_set(&sblock->refs, 1);
  2236. sblock->sctx = sctx;
  2237. sblock->no_io_error_seen = 1;
  2238. sblock->sparity = sparity;
  2239. scrub_parity_get(sparity);
  2240. for (index = 0; len > 0; index++) {
  2241. struct scrub_page *spage;
  2242. u64 l = min_t(u64, len, PAGE_SIZE);
  2243. spage = kzalloc(sizeof(*spage), GFP_KERNEL);
  2244. if (!spage) {
  2245. leave_nomem:
  2246. spin_lock(&sctx->stat_lock);
  2247. sctx->stat.malloc_errors++;
  2248. spin_unlock(&sctx->stat_lock);
  2249. scrub_block_put(sblock);
  2250. return -ENOMEM;
  2251. }
  2252. BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
  2253. /* For scrub block */
  2254. scrub_page_get(spage);
  2255. sblock->pagev[index] = spage;
  2256. /* For scrub parity */
  2257. scrub_page_get(spage);
  2258. list_add_tail(&spage->list, &sparity->spages);
  2259. spage->sblock = sblock;
  2260. spage->dev = dev;
  2261. spage->flags = flags;
  2262. spage->generation = gen;
  2263. spage->logical = logical;
  2264. spage->physical = physical;
  2265. spage->mirror_num = mirror_num;
  2266. if (csum) {
  2267. spage->have_csum = 1;
  2268. memcpy(spage->csum, csum, sctx->csum_size);
  2269. } else {
  2270. spage->have_csum = 0;
  2271. }
  2272. sblock->page_count++;
  2273. spage->page = alloc_page(GFP_KERNEL);
  2274. if (!spage->page)
  2275. goto leave_nomem;
  2276. len -= l;
  2277. logical += l;
  2278. physical += l;
  2279. }
  2280. WARN_ON(sblock->page_count == 0);
  2281. for (index = 0; index < sblock->page_count; index++) {
  2282. struct scrub_page *spage = sblock->pagev[index];
  2283. int ret;
  2284. ret = scrub_add_page_to_rd_bio(sctx, spage);
  2285. if (ret) {
  2286. scrub_block_put(sblock);
  2287. return ret;
  2288. }
  2289. }
  2290. /* last one frees, either here or in bio completion for last page */
  2291. scrub_block_put(sblock);
  2292. return 0;
  2293. }
  2294. static int scrub_extent_for_parity(struct scrub_parity *sparity,
  2295. u64 logical, u64 len,
  2296. u64 physical, struct btrfs_device *dev,
  2297. u64 flags, u64 gen, int mirror_num)
  2298. {
  2299. struct scrub_ctx *sctx = sparity->sctx;
  2300. int ret;
  2301. u8 csum[BTRFS_CSUM_SIZE];
  2302. u32 blocksize;
  2303. if (dev->missing) {
  2304. scrub_parity_mark_sectors_error(sparity, logical, len);
  2305. return 0;
  2306. }
  2307. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  2308. blocksize = sctx->sectorsize;
  2309. } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  2310. blocksize = sctx->nodesize;
  2311. } else {
  2312. blocksize = sctx->sectorsize;
  2313. WARN_ON(1);
  2314. }
  2315. while (len) {
  2316. u64 l = min_t(u64, len, blocksize);
  2317. int have_csum = 0;
  2318. if (flags & BTRFS_EXTENT_FLAG_DATA) {
  2319. /* push csums to sbio */
  2320. have_csum = scrub_find_csum(sctx, logical, csum);
  2321. if (have_csum == 0)
  2322. goto skip;
  2323. }
  2324. ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
  2325. flags, gen, mirror_num,
  2326. have_csum ? csum : NULL);
  2327. if (ret)
  2328. return ret;
  2329. skip:
  2330. len -= l;
  2331. logical += l;
  2332. physical += l;
  2333. }
  2334. return 0;
  2335. }
  2336. /*
  2337. * Given a physical address, this will calculate it's
  2338. * logical offset. if this is a parity stripe, it will return
  2339. * the most left data stripe's logical offset.
  2340. *
  2341. * return 0 if it is a data stripe, 1 means parity stripe.
  2342. */
  2343. static int get_raid56_logic_offset(u64 physical, int num,
  2344. struct map_lookup *map, u64 *offset,
  2345. u64 *stripe_start)
  2346. {
  2347. int i;
  2348. int j = 0;
  2349. u64 stripe_nr;
  2350. u64 last_offset;
  2351. u32 stripe_index;
  2352. u32 rot;
  2353. last_offset = (physical - map->stripes[num].physical) *
  2354. nr_data_stripes(map);
  2355. if (stripe_start)
  2356. *stripe_start = last_offset;
  2357. *offset = last_offset;
  2358. for (i = 0; i < nr_data_stripes(map); i++) {
  2359. *offset = last_offset + i * map->stripe_len;
  2360. stripe_nr = div_u64(*offset, map->stripe_len);
  2361. stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
  2362. /* Work out the disk rotation on this stripe-set */
  2363. stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
  2364. /* calculate which stripe this data locates */
  2365. rot += i;
  2366. stripe_index = rot % map->num_stripes;
  2367. if (stripe_index == num)
  2368. return 0;
  2369. if (stripe_index < num)
  2370. j++;
  2371. }
  2372. *offset = last_offset + j * map->stripe_len;
  2373. return 1;
  2374. }
  2375. static void scrub_free_parity(struct scrub_parity *sparity)
  2376. {
  2377. struct scrub_ctx *sctx = sparity->sctx;
  2378. struct scrub_page *curr, *next;
  2379. int nbits;
  2380. nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
  2381. if (nbits) {
  2382. spin_lock(&sctx->stat_lock);
  2383. sctx->stat.read_errors += nbits;
  2384. sctx->stat.uncorrectable_errors += nbits;
  2385. spin_unlock(&sctx->stat_lock);
  2386. }
  2387. list_for_each_entry_safe(curr, next, &sparity->spages, list) {
  2388. list_del_init(&curr->list);
  2389. scrub_page_put(curr);
  2390. }
  2391. kfree(sparity);
  2392. }
  2393. static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
  2394. {
  2395. struct scrub_parity *sparity = container_of(work, struct scrub_parity,
  2396. work);
  2397. struct scrub_ctx *sctx = sparity->sctx;
  2398. scrub_free_parity(sparity);
  2399. scrub_pending_bio_dec(sctx);
  2400. }
  2401. static void scrub_parity_bio_endio(struct bio *bio)
  2402. {
  2403. struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
  2404. struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
  2405. if (bio->bi_error)
  2406. bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
  2407. sparity->nsectors);
  2408. bio_put(bio);
  2409. btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
  2410. scrub_parity_bio_endio_worker, NULL, NULL);
  2411. btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
  2412. }
  2413. static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
  2414. {
  2415. struct scrub_ctx *sctx = sparity->sctx;
  2416. struct btrfs_fs_info *fs_info = sctx->fs_info;
  2417. struct bio *bio;
  2418. struct btrfs_raid_bio *rbio;
  2419. struct btrfs_bio *bbio = NULL;
  2420. u64 length;
  2421. int ret;
  2422. if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
  2423. sparity->nsectors))
  2424. goto out;
  2425. length = sparity->logic_end - sparity->logic_start;
  2426. btrfs_bio_counter_inc_blocked(fs_info);
  2427. ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
  2428. &length, &bbio);
  2429. if (ret || !bbio || !bbio->raid_map)
  2430. goto bbio_out;
  2431. bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
  2432. if (!bio)
  2433. goto bbio_out;
  2434. bio->bi_iter.bi_sector = sparity->logic_start >> 9;
  2435. bio->bi_private = sparity;
  2436. bio->bi_end_io = scrub_parity_bio_endio;
  2437. rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
  2438. length, sparity->scrub_dev,
  2439. sparity->dbitmap,
  2440. sparity->nsectors);
  2441. if (!rbio)
  2442. goto rbio_out;
  2443. scrub_pending_bio_inc(sctx);
  2444. raid56_parity_submit_scrub_rbio(rbio);
  2445. return;
  2446. rbio_out:
  2447. bio_put(bio);
  2448. bbio_out:
  2449. btrfs_bio_counter_dec(fs_info);
  2450. btrfs_put_bbio(bbio);
  2451. bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
  2452. sparity->nsectors);
  2453. spin_lock(&sctx->stat_lock);
  2454. sctx->stat.malloc_errors++;
  2455. spin_unlock(&sctx->stat_lock);
  2456. out:
  2457. scrub_free_parity(sparity);
  2458. }
  2459. static inline int scrub_calc_parity_bitmap_len(int nsectors)
  2460. {
  2461. return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
  2462. }
  2463. static void scrub_parity_get(struct scrub_parity *sparity)
  2464. {
  2465. refcount_inc(&sparity->refs);
  2466. }
  2467. static void scrub_parity_put(struct scrub_parity *sparity)
  2468. {
  2469. if (!refcount_dec_and_test(&sparity->refs))
  2470. return;
  2471. scrub_parity_check_and_repair(sparity);
  2472. }
  2473. static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
  2474. struct map_lookup *map,
  2475. struct btrfs_device *sdev,
  2476. struct btrfs_path *path,
  2477. u64 logic_start,
  2478. u64 logic_end)
  2479. {
  2480. struct btrfs_fs_info *fs_info = sctx->fs_info;
  2481. struct btrfs_root *root = fs_info->extent_root;
  2482. struct btrfs_root *csum_root = fs_info->csum_root;
  2483. struct btrfs_extent_item *extent;
  2484. struct btrfs_bio *bbio = NULL;
  2485. u64 flags;
  2486. int ret;
  2487. int slot;
  2488. struct extent_buffer *l;
  2489. struct btrfs_key key;
  2490. u64 generation;
  2491. u64 extent_logical;
  2492. u64 extent_physical;
  2493. u64 extent_len;
  2494. u64 mapped_length;
  2495. struct btrfs_device *extent_dev;
  2496. struct scrub_parity *sparity;
  2497. int nsectors;
  2498. int bitmap_len;
  2499. int extent_mirror_num;
  2500. int stop_loop = 0;
  2501. nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
  2502. bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
  2503. sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
  2504. GFP_NOFS);
  2505. if (!sparity) {
  2506. spin_lock(&sctx->stat_lock);
  2507. sctx->stat.malloc_errors++;
  2508. spin_unlock(&sctx->stat_lock);
  2509. return -ENOMEM;
  2510. }
  2511. sparity->stripe_len = map->stripe_len;
  2512. sparity->nsectors = nsectors;
  2513. sparity->sctx = sctx;
  2514. sparity->scrub_dev = sdev;
  2515. sparity->logic_start = logic_start;
  2516. sparity->logic_end = logic_end;
  2517. refcount_set(&sparity->refs, 1);
  2518. INIT_LIST_HEAD(&sparity->spages);
  2519. sparity->dbitmap = sparity->bitmap;
  2520. sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
  2521. ret = 0;
  2522. while (logic_start < logic_end) {
  2523. if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  2524. key.type = BTRFS_METADATA_ITEM_KEY;
  2525. else
  2526. key.type = BTRFS_EXTENT_ITEM_KEY;
  2527. key.objectid = logic_start;
  2528. key.offset = (u64)-1;
  2529. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2530. if (ret < 0)
  2531. goto out;
  2532. if (ret > 0) {
  2533. ret = btrfs_previous_extent_item(root, path, 0);
  2534. if (ret < 0)
  2535. goto out;
  2536. if (ret > 0) {
  2537. btrfs_release_path(path);
  2538. ret = btrfs_search_slot(NULL, root, &key,
  2539. path, 0, 0);
  2540. if (ret < 0)
  2541. goto out;
  2542. }
  2543. }
  2544. stop_loop = 0;
  2545. while (1) {
  2546. u64 bytes;
  2547. l = path->nodes[0];
  2548. slot = path->slots[0];
  2549. if (slot >= btrfs_header_nritems(l)) {
  2550. ret = btrfs_next_leaf(root, path);
  2551. if (ret == 0)
  2552. continue;
  2553. if (ret < 0)
  2554. goto out;
  2555. stop_loop = 1;
  2556. break;
  2557. }
  2558. btrfs_item_key_to_cpu(l, &key, slot);
  2559. if (key.type != BTRFS_EXTENT_ITEM_KEY &&
  2560. key.type != BTRFS_METADATA_ITEM_KEY)
  2561. goto next;
  2562. if (key.type == BTRFS_METADATA_ITEM_KEY)
  2563. bytes = fs_info->nodesize;
  2564. else
  2565. bytes = key.offset;
  2566. if (key.objectid + bytes <= logic_start)
  2567. goto next;
  2568. if (key.objectid >= logic_end) {
  2569. stop_loop = 1;
  2570. break;
  2571. }
  2572. while (key.objectid >= logic_start + map->stripe_len)
  2573. logic_start += map->stripe_len;
  2574. extent = btrfs_item_ptr(l, slot,
  2575. struct btrfs_extent_item);
  2576. flags = btrfs_extent_flags(l, extent);
  2577. generation = btrfs_extent_generation(l, extent);
  2578. if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
  2579. (key.objectid < logic_start ||
  2580. key.objectid + bytes >
  2581. logic_start + map->stripe_len)) {
  2582. btrfs_err(fs_info,
  2583. "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
  2584. key.objectid, logic_start);
  2585. spin_lock(&sctx->stat_lock);
  2586. sctx->stat.uncorrectable_errors++;
  2587. spin_unlock(&sctx->stat_lock);
  2588. goto next;
  2589. }
  2590. again:
  2591. extent_logical = key.objectid;
  2592. extent_len = bytes;
  2593. if (extent_logical < logic_start) {
  2594. extent_len -= logic_start - extent_logical;
  2595. extent_logical = logic_start;
  2596. }
  2597. if (extent_logical + extent_len >
  2598. logic_start + map->stripe_len)
  2599. extent_len = logic_start + map->stripe_len -
  2600. extent_logical;
  2601. scrub_parity_mark_sectors_data(sparity, extent_logical,
  2602. extent_len);
  2603. mapped_length = extent_len;
  2604. bbio = NULL;
  2605. ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
  2606. extent_logical, &mapped_length, &bbio,
  2607. 0);
  2608. if (!ret) {
  2609. if (!bbio || mapped_length < extent_len)
  2610. ret = -EIO;
  2611. }
  2612. if (ret) {
  2613. btrfs_put_bbio(bbio);
  2614. goto out;
  2615. }
  2616. extent_physical = bbio->stripes[0].physical;
  2617. extent_mirror_num = bbio->mirror_num;
  2618. extent_dev = bbio->stripes[0].dev;
  2619. btrfs_put_bbio(bbio);
  2620. ret = btrfs_lookup_csums_range(csum_root,
  2621. extent_logical,
  2622. extent_logical + extent_len - 1,
  2623. &sctx->csum_list, 1);
  2624. if (ret)
  2625. goto out;
  2626. ret = scrub_extent_for_parity(sparity, extent_logical,
  2627. extent_len,
  2628. extent_physical,
  2629. extent_dev, flags,
  2630. generation,
  2631. extent_mirror_num);
  2632. scrub_free_csums(sctx);
  2633. if (ret)
  2634. goto out;
  2635. if (extent_logical + extent_len <
  2636. key.objectid + bytes) {
  2637. logic_start += map->stripe_len;
  2638. if (logic_start >= logic_end) {
  2639. stop_loop = 1;
  2640. break;
  2641. }
  2642. if (logic_start < key.objectid + bytes) {
  2643. cond_resched();
  2644. goto again;
  2645. }
  2646. }
  2647. next:
  2648. path->slots[0]++;
  2649. }
  2650. btrfs_release_path(path);
  2651. if (stop_loop)
  2652. break;
  2653. logic_start += map->stripe_len;
  2654. }
  2655. out:
  2656. if (ret < 0)
  2657. scrub_parity_mark_sectors_error(sparity, logic_start,
  2658. logic_end - logic_start);
  2659. scrub_parity_put(sparity);
  2660. scrub_submit(sctx);
  2661. mutex_lock(&sctx->wr_ctx.wr_lock);
  2662. scrub_wr_submit(sctx);
  2663. mutex_unlock(&sctx->wr_ctx.wr_lock);
  2664. btrfs_release_path(path);
  2665. return ret < 0 ? ret : 0;
  2666. }
  2667. static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
  2668. struct map_lookup *map,
  2669. struct btrfs_device *scrub_dev,
  2670. int num, u64 base, u64 length,
  2671. int is_dev_replace)
  2672. {
  2673. struct btrfs_path *path, *ppath;
  2674. struct btrfs_fs_info *fs_info = sctx->fs_info;
  2675. struct btrfs_root *root = fs_info->extent_root;
  2676. struct btrfs_root *csum_root = fs_info->csum_root;
  2677. struct btrfs_extent_item *extent;
  2678. struct blk_plug plug;
  2679. u64 flags;
  2680. int ret;
  2681. int slot;
  2682. u64 nstripes;
  2683. struct extent_buffer *l;
  2684. u64 physical;
  2685. u64 logical;
  2686. u64 logic_end;
  2687. u64 physical_end;
  2688. u64 generation;
  2689. int mirror_num;
  2690. struct reada_control *reada1;
  2691. struct reada_control *reada2;
  2692. struct btrfs_key key;
  2693. struct btrfs_key key_end;
  2694. u64 increment = map->stripe_len;
  2695. u64 offset;
  2696. u64 extent_logical;
  2697. u64 extent_physical;
  2698. u64 extent_len;
  2699. u64 stripe_logical;
  2700. u64 stripe_end;
  2701. struct btrfs_device *extent_dev;
  2702. int extent_mirror_num;
  2703. int stop_loop = 0;
  2704. physical = map->stripes[num].physical;
  2705. offset = 0;
  2706. nstripes = div_u64(length, map->stripe_len);
  2707. if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
  2708. offset = map->stripe_len * num;
  2709. increment = map->stripe_len * map->num_stripes;
  2710. mirror_num = 1;
  2711. } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
  2712. int factor = map->num_stripes / map->sub_stripes;
  2713. offset = map->stripe_len * (num / map->sub_stripes);
  2714. increment = map->stripe_len * factor;
  2715. mirror_num = num % map->sub_stripes + 1;
  2716. } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
  2717. increment = map->stripe_len;
  2718. mirror_num = num % map->num_stripes + 1;
  2719. } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
  2720. increment = map->stripe_len;
  2721. mirror_num = num % map->num_stripes + 1;
  2722. } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  2723. get_raid56_logic_offset(physical, num, map, &offset, NULL);
  2724. increment = map->stripe_len * nr_data_stripes(map);
  2725. mirror_num = 1;
  2726. } else {
  2727. increment = map->stripe_len;
  2728. mirror_num = 1;
  2729. }
  2730. path = btrfs_alloc_path();
  2731. if (!path)
  2732. return -ENOMEM;
  2733. ppath = btrfs_alloc_path();
  2734. if (!ppath) {
  2735. btrfs_free_path(path);
  2736. return -ENOMEM;
  2737. }
  2738. /*
  2739. * work on commit root. The related disk blocks are static as
  2740. * long as COW is applied. This means, it is save to rewrite
  2741. * them to repair disk errors without any race conditions
  2742. */
  2743. path->search_commit_root = 1;
  2744. path->skip_locking = 1;
  2745. ppath->search_commit_root = 1;
  2746. ppath->skip_locking = 1;
  2747. /*
  2748. * trigger the readahead for extent tree csum tree and wait for
  2749. * completion. During readahead, the scrub is officially paused
  2750. * to not hold off transaction commits
  2751. */
  2752. logical = base + offset;
  2753. physical_end = physical + nstripes * map->stripe_len;
  2754. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  2755. get_raid56_logic_offset(physical_end, num,
  2756. map, &logic_end, NULL);
  2757. logic_end += base;
  2758. } else {
  2759. logic_end = logical + increment * nstripes;
  2760. }
  2761. wait_event(sctx->list_wait,
  2762. atomic_read(&sctx->bios_in_flight) == 0);
  2763. scrub_blocked_if_needed(fs_info);
  2764. /* FIXME it might be better to start readahead at commit root */
  2765. key.objectid = logical;
  2766. key.type = BTRFS_EXTENT_ITEM_KEY;
  2767. key.offset = (u64)0;
  2768. key_end.objectid = logic_end;
  2769. key_end.type = BTRFS_METADATA_ITEM_KEY;
  2770. key_end.offset = (u64)-1;
  2771. reada1 = btrfs_reada_add(root, &key, &key_end);
  2772. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  2773. key.type = BTRFS_EXTENT_CSUM_KEY;
  2774. key.offset = logical;
  2775. key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  2776. key_end.type = BTRFS_EXTENT_CSUM_KEY;
  2777. key_end.offset = logic_end;
  2778. reada2 = btrfs_reada_add(csum_root, &key, &key_end);
  2779. if (!IS_ERR(reada1))
  2780. btrfs_reada_wait(reada1);
  2781. if (!IS_ERR(reada2))
  2782. btrfs_reada_wait(reada2);
  2783. /*
  2784. * collect all data csums for the stripe to avoid seeking during
  2785. * the scrub. This might currently (crc32) end up to be about 1MB
  2786. */
  2787. blk_start_plug(&plug);
  2788. /*
  2789. * now find all extents for each stripe and scrub them
  2790. */
  2791. ret = 0;
  2792. while (physical < physical_end) {
  2793. /*
  2794. * canceled?
  2795. */
  2796. if (atomic_read(&fs_info->scrub_cancel_req) ||
  2797. atomic_read(&sctx->cancel_req)) {
  2798. ret = -ECANCELED;
  2799. goto out;
  2800. }
  2801. /*
  2802. * check to see if we have to pause
  2803. */
  2804. if (atomic_read(&fs_info->scrub_pause_req)) {
  2805. /* push queued extents */
  2806. atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
  2807. scrub_submit(sctx);
  2808. mutex_lock(&sctx->wr_ctx.wr_lock);
  2809. scrub_wr_submit(sctx);
  2810. mutex_unlock(&sctx->wr_ctx.wr_lock);
  2811. wait_event(sctx->list_wait,
  2812. atomic_read(&sctx->bios_in_flight) == 0);
  2813. atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
  2814. scrub_blocked_if_needed(fs_info);
  2815. }
  2816. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  2817. ret = get_raid56_logic_offset(physical, num, map,
  2818. &logical,
  2819. &stripe_logical);
  2820. logical += base;
  2821. if (ret) {
  2822. /* it is parity strip */
  2823. stripe_logical += base;
  2824. stripe_end = stripe_logical + increment;
  2825. ret = scrub_raid56_parity(sctx, map, scrub_dev,
  2826. ppath, stripe_logical,
  2827. stripe_end);
  2828. if (ret)
  2829. goto out;
  2830. goto skip;
  2831. }
  2832. }
  2833. if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  2834. key.type = BTRFS_METADATA_ITEM_KEY;
  2835. else
  2836. key.type = BTRFS_EXTENT_ITEM_KEY;
  2837. key.objectid = logical;
  2838. key.offset = (u64)-1;
  2839. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2840. if (ret < 0)
  2841. goto out;
  2842. if (ret > 0) {
  2843. ret = btrfs_previous_extent_item(root, path, 0);
  2844. if (ret < 0)
  2845. goto out;
  2846. if (ret > 0) {
  2847. /* there's no smaller item, so stick with the
  2848. * larger one */
  2849. btrfs_release_path(path);
  2850. ret = btrfs_search_slot(NULL, root, &key,
  2851. path, 0, 0);
  2852. if (ret < 0)
  2853. goto out;
  2854. }
  2855. }
  2856. stop_loop = 0;
  2857. while (1) {
  2858. u64 bytes;
  2859. l = path->nodes[0];
  2860. slot = path->slots[0];
  2861. if (slot >= btrfs_header_nritems(l)) {
  2862. ret = btrfs_next_leaf(root, path);
  2863. if (ret == 0)
  2864. continue;
  2865. if (ret < 0)
  2866. goto out;
  2867. stop_loop = 1;
  2868. break;
  2869. }
  2870. btrfs_item_key_to_cpu(l, &key, slot);
  2871. if (key.type != BTRFS_EXTENT_ITEM_KEY &&
  2872. key.type != BTRFS_METADATA_ITEM_KEY)
  2873. goto next;
  2874. if (key.type == BTRFS_METADATA_ITEM_KEY)
  2875. bytes = fs_info->nodesize;
  2876. else
  2877. bytes = key.offset;
  2878. if (key.objectid + bytes <= logical)
  2879. goto next;
  2880. if (key.objectid >= logical + map->stripe_len) {
  2881. /* out of this device extent */
  2882. if (key.objectid >= logic_end)
  2883. stop_loop = 1;
  2884. break;
  2885. }
  2886. extent = btrfs_item_ptr(l, slot,
  2887. struct btrfs_extent_item);
  2888. flags = btrfs_extent_flags(l, extent);
  2889. generation = btrfs_extent_generation(l, extent);
  2890. if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
  2891. (key.objectid < logical ||
  2892. key.objectid + bytes >
  2893. logical + map->stripe_len)) {
  2894. btrfs_err(fs_info,
  2895. "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
  2896. key.objectid, logical);
  2897. spin_lock(&sctx->stat_lock);
  2898. sctx->stat.uncorrectable_errors++;
  2899. spin_unlock(&sctx->stat_lock);
  2900. goto next;
  2901. }
  2902. again:
  2903. extent_logical = key.objectid;
  2904. extent_len = bytes;
  2905. /*
  2906. * trim extent to this stripe
  2907. */
  2908. if (extent_logical < logical) {
  2909. extent_len -= logical - extent_logical;
  2910. extent_logical = logical;
  2911. }
  2912. if (extent_logical + extent_len >
  2913. logical + map->stripe_len) {
  2914. extent_len = logical + map->stripe_len -
  2915. extent_logical;
  2916. }
  2917. extent_physical = extent_logical - logical + physical;
  2918. extent_dev = scrub_dev;
  2919. extent_mirror_num = mirror_num;
  2920. if (is_dev_replace)
  2921. scrub_remap_extent(fs_info, extent_logical,
  2922. extent_len, &extent_physical,
  2923. &extent_dev,
  2924. &extent_mirror_num);
  2925. ret = btrfs_lookup_csums_range(csum_root,
  2926. extent_logical,
  2927. extent_logical +
  2928. extent_len - 1,
  2929. &sctx->csum_list, 1);
  2930. if (ret)
  2931. goto out;
  2932. ret = scrub_extent(sctx, extent_logical, extent_len,
  2933. extent_physical, extent_dev, flags,
  2934. generation, extent_mirror_num,
  2935. extent_logical - logical + physical);
  2936. scrub_free_csums(sctx);
  2937. if (ret)
  2938. goto out;
  2939. if (extent_logical + extent_len <
  2940. key.objectid + bytes) {
  2941. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  2942. /*
  2943. * loop until we find next data stripe
  2944. * or we have finished all stripes.
  2945. */
  2946. loop:
  2947. physical += map->stripe_len;
  2948. ret = get_raid56_logic_offset(physical,
  2949. num, map, &logical,
  2950. &stripe_logical);
  2951. logical += base;
  2952. if (ret && physical < physical_end) {
  2953. stripe_logical += base;
  2954. stripe_end = stripe_logical +
  2955. increment;
  2956. ret = scrub_raid56_parity(sctx,
  2957. map, scrub_dev, ppath,
  2958. stripe_logical,
  2959. stripe_end);
  2960. if (ret)
  2961. goto out;
  2962. goto loop;
  2963. }
  2964. } else {
  2965. physical += map->stripe_len;
  2966. logical += increment;
  2967. }
  2968. if (logical < key.objectid + bytes) {
  2969. cond_resched();
  2970. goto again;
  2971. }
  2972. if (physical >= physical_end) {
  2973. stop_loop = 1;
  2974. break;
  2975. }
  2976. }
  2977. next:
  2978. path->slots[0]++;
  2979. }
  2980. btrfs_release_path(path);
  2981. skip:
  2982. logical += increment;
  2983. physical += map->stripe_len;
  2984. spin_lock(&sctx->stat_lock);
  2985. if (stop_loop)
  2986. sctx->stat.last_physical = map->stripes[num].physical +
  2987. length;
  2988. else
  2989. sctx->stat.last_physical = physical;
  2990. spin_unlock(&sctx->stat_lock);
  2991. if (stop_loop)
  2992. break;
  2993. }
  2994. out:
  2995. /* push queued extents */
  2996. scrub_submit(sctx);
  2997. mutex_lock(&sctx->wr_ctx.wr_lock);
  2998. scrub_wr_submit(sctx);
  2999. mutex_unlock(&sctx->wr_ctx.wr_lock);
  3000. blk_finish_plug(&plug);
  3001. btrfs_free_path(path);
  3002. btrfs_free_path(ppath);
  3003. return ret < 0 ? ret : 0;
  3004. }
  3005. static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
  3006. struct btrfs_device *scrub_dev,
  3007. u64 chunk_offset, u64 length,
  3008. u64 dev_offset,
  3009. struct btrfs_block_group_cache *cache,
  3010. int is_dev_replace)
  3011. {
  3012. struct btrfs_fs_info *fs_info = sctx->fs_info;
  3013. struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
  3014. struct map_lookup *map;
  3015. struct extent_map *em;
  3016. int i;
  3017. int ret = 0;
  3018. read_lock(&map_tree->map_tree.lock);
  3019. em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
  3020. read_unlock(&map_tree->map_tree.lock);
  3021. if (!em) {
  3022. /*
  3023. * Might have been an unused block group deleted by the cleaner
  3024. * kthread or relocation.
  3025. */
  3026. spin_lock(&cache->lock);
  3027. if (!cache->removed)
  3028. ret = -EINVAL;
  3029. spin_unlock(&cache->lock);
  3030. return ret;
  3031. }
  3032. map = em->map_lookup;
  3033. if (em->start != chunk_offset)
  3034. goto out;
  3035. if (em->len < length)
  3036. goto out;
  3037. for (i = 0; i < map->num_stripes; ++i) {
  3038. if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
  3039. map->stripes[i].physical == dev_offset) {
  3040. ret = scrub_stripe(sctx, map, scrub_dev, i,
  3041. chunk_offset, length,
  3042. is_dev_replace);
  3043. if (ret)
  3044. goto out;
  3045. }
  3046. }
  3047. out:
  3048. free_extent_map(em);
  3049. return ret;
  3050. }
  3051. static noinline_for_stack
  3052. int scrub_enumerate_chunks(struct scrub_ctx *sctx,
  3053. struct btrfs_device *scrub_dev, u64 start, u64 end,
  3054. int is_dev_replace)
  3055. {
  3056. struct btrfs_dev_extent *dev_extent = NULL;
  3057. struct btrfs_path *path;
  3058. struct btrfs_fs_info *fs_info = sctx->fs_info;
  3059. struct btrfs_root *root = fs_info->dev_root;
  3060. u64 length;
  3061. u64 chunk_offset;
  3062. int ret = 0;
  3063. int ro_set;
  3064. int slot;
  3065. struct extent_buffer *l;
  3066. struct btrfs_key key;
  3067. struct btrfs_key found_key;
  3068. struct btrfs_block_group_cache *cache;
  3069. struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
  3070. path = btrfs_alloc_path();
  3071. if (!path)
  3072. return -ENOMEM;
  3073. path->reada = READA_FORWARD;
  3074. path->search_commit_root = 1;
  3075. path->skip_locking = 1;
  3076. key.objectid = scrub_dev->devid;
  3077. key.offset = 0ull;
  3078. key.type = BTRFS_DEV_EXTENT_KEY;
  3079. while (1) {
  3080. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3081. if (ret < 0)
  3082. break;
  3083. if (ret > 0) {
  3084. if (path->slots[0] >=
  3085. btrfs_header_nritems(path->nodes[0])) {
  3086. ret = btrfs_next_leaf(root, path);
  3087. if (ret < 0)
  3088. break;
  3089. if (ret > 0) {
  3090. ret = 0;
  3091. break;
  3092. }
  3093. } else {
  3094. ret = 0;
  3095. }
  3096. }
  3097. l = path->nodes[0];
  3098. slot = path->slots[0];
  3099. btrfs_item_key_to_cpu(l, &found_key, slot);
  3100. if (found_key.objectid != scrub_dev->devid)
  3101. break;
  3102. if (found_key.type != BTRFS_DEV_EXTENT_KEY)
  3103. break;
  3104. if (found_key.offset >= end)
  3105. break;
  3106. if (found_key.offset < key.offset)
  3107. break;
  3108. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  3109. length = btrfs_dev_extent_length(l, dev_extent);
  3110. if (found_key.offset + length <= start)
  3111. goto skip;
  3112. chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
  3113. /*
  3114. * get a reference on the corresponding block group to prevent
  3115. * the chunk from going away while we scrub it
  3116. */
  3117. cache = btrfs_lookup_block_group(fs_info, chunk_offset);
  3118. /* some chunks are removed but not committed to disk yet,
  3119. * continue scrubbing */
  3120. if (!cache)
  3121. goto skip;
  3122. /*
  3123. * we need call btrfs_inc_block_group_ro() with scrubs_paused,
  3124. * to avoid deadlock caused by:
  3125. * btrfs_inc_block_group_ro()
  3126. * -> btrfs_wait_for_commit()
  3127. * -> btrfs_commit_transaction()
  3128. * -> btrfs_scrub_pause()
  3129. */
  3130. scrub_pause_on(fs_info);
  3131. ret = btrfs_inc_block_group_ro(fs_info, cache);
  3132. if (!ret && is_dev_replace) {
  3133. /*
  3134. * If we are doing a device replace wait for any tasks
  3135. * that started dellaloc right before we set the block
  3136. * group to RO mode, as they might have just allocated
  3137. * an extent from it or decided they could do a nocow
  3138. * write. And if any such tasks did that, wait for their
  3139. * ordered extents to complete and then commit the
  3140. * current transaction, so that we can later see the new
  3141. * extent items in the extent tree - the ordered extents
  3142. * create delayed data references (for cow writes) when
  3143. * they complete, which will be run and insert the
  3144. * corresponding extent items into the extent tree when
  3145. * we commit the transaction they used when running
  3146. * inode.c:btrfs_finish_ordered_io(). We later use
  3147. * the commit root of the extent tree to find extents
  3148. * to copy from the srcdev into the tgtdev, and we don't
  3149. * want to miss any new extents.
  3150. */
  3151. btrfs_wait_block_group_reservations(cache);
  3152. btrfs_wait_nocow_writers(cache);
  3153. ret = btrfs_wait_ordered_roots(fs_info, -1,
  3154. cache->key.objectid,
  3155. cache->key.offset);
  3156. if (ret > 0) {
  3157. struct btrfs_trans_handle *trans;
  3158. trans = btrfs_join_transaction(root);
  3159. if (IS_ERR(trans))
  3160. ret = PTR_ERR(trans);
  3161. else
  3162. ret = btrfs_commit_transaction(trans);
  3163. if (ret) {
  3164. scrub_pause_off(fs_info);
  3165. btrfs_put_block_group(cache);
  3166. break;
  3167. }
  3168. }
  3169. }
  3170. scrub_pause_off(fs_info);
  3171. if (ret == 0) {
  3172. ro_set = 1;
  3173. } else if (ret == -ENOSPC) {
  3174. /*
  3175. * btrfs_inc_block_group_ro return -ENOSPC when it
  3176. * failed in creating new chunk for metadata.
  3177. * It is not a problem for scrub/replace, because
  3178. * metadata are always cowed, and our scrub paused
  3179. * commit_transactions.
  3180. */
  3181. ro_set = 0;
  3182. } else {
  3183. btrfs_warn(fs_info,
  3184. "failed setting block group ro, ret=%d\n",
  3185. ret);
  3186. btrfs_put_block_group(cache);
  3187. break;
  3188. }
  3189. btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
  3190. dev_replace->cursor_right = found_key.offset + length;
  3191. dev_replace->cursor_left = found_key.offset;
  3192. dev_replace->item_needs_writeback = 1;
  3193. btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
  3194. ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
  3195. found_key.offset, cache, is_dev_replace);
  3196. /*
  3197. * flush, submit all pending read and write bios, afterwards
  3198. * wait for them.
  3199. * Note that in the dev replace case, a read request causes
  3200. * write requests that are submitted in the read completion
  3201. * worker. Therefore in the current situation, it is required
  3202. * that all write requests are flushed, so that all read and
  3203. * write requests are really completed when bios_in_flight
  3204. * changes to 0.
  3205. */
  3206. atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
  3207. scrub_submit(sctx);
  3208. mutex_lock(&sctx->wr_ctx.wr_lock);
  3209. scrub_wr_submit(sctx);
  3210. mutex_unlock(&sctx->wr_ctx.wr_lock);
  3211. wait_event(sctx->list_wait,
  3212. atomic_read(&sctx->bios_in_flight) == 0);
  3213. scrub_pause_on(fs_info);
  3214. /*
  3215. * must be called before we decrease @scrub_paused.
  3216. * make sure we don't block transaction commit while
  3217. * we are waiting pending workers finished.
  3218. */
  3219. wait_event(sctx->list_wait,
  3220. atomic_read(&sctx->workers_pending) == 0);
  3221. atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
  3222. scrub_pause_off(fs_info);
  3223. btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
  3224. dev_replace->cursor_left = dev_replace->cursor_right;
  3225. dev_replace->item_needs_writeback = 1;
  3226. btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
  3227. if (ro_set)
  3228. btrfs_dec_block_group_ro(cache);
  3229. /*
  3230. * We might have prevented the cleaner kthread from deleting
  3231. * this block group if it was already unused because we raced
  3232. * and set it to RO mode first. So add it back to the unused
  3233. * list, otherwise it might not ever be deleted unless a manual
  3234. * balance is triggered or it becomes used and unused again.
  3235. */
  3236. spin_lock(&cache->lock);
  3237. if (!cache->removed && !cache->ro && cache->reserved == 0 &&
  3238. btrfs_block_group_used(&cache->item) == 0) {
  3239. spin_unlock(&cache->lock);
  3240. spin_lock(&fs_info->unused_bgs_lock);
  3241. if (list_empty(&cache->bg_list)) {
  3242. btrfs_get_block_group(cache);
  3243. list_add_tail(&cache->bg_list,
  3244. &fs_info->unused_bgs);
  3245. }
  3246. spin_unlock(&fs_info->unused_bgs_lock);
  3247. } else {
  3248. spin_unlock(&cache->lock);
  3249. }
  3250. btrfs_put_block_group(cache);
  3251. if (ret)
  3252. break;
  3253. if (is_dev_replace &&
  3254. atomic64_read(&dev_replace->num_write_errors) > 0) {
  3255. ret = -EIO;
  3256. break;
  3257. }
  3258. if (sctx->stat.malloc_errors > 0) {
  3259. ret = -ENOMEM;
  3260. break;
  3261. }
  3262. skip:
  3263. key.offset = found_key.offset + length;
  3264. btrfs_release_path(path);
  3265. }
  3266. btrfs_free_path(path);
  3267. return ret;
  3268. }
  3269. static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
  3270. struct btrfs_device *scrub_dev)
  3271. {
  3272. int i;
  3273. u64 bytenr;
  3274. u64 gen;
  3275. int ret;
  3276. struct btrfs_fs_info *fs_info = sctx->fs_info;
  3277. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
  3278. return -EIO;
  3279. /* Seed devices of a new filesystem has their own generation. */
  3280. if (scrub_dev->fs_devices != fs_info->fs_devices)
  3281. gen = scrub_dev->generation;
  3282. else
  3283. gen = fs_info->last_trans_committed;
  3284. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  3285. bytenr = btrfs_sb_offset(i);
  3286. if (bytenr + BTRFS_SUPER_INFO_SIZE >
  3287. scrub_dev->commit_total_bytes)
  3288. break;
  3289. ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
  3290. scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
  3291. NULL, 1, bytenr);
  3292. if (ret)
  3293. return ret;
  3294. }
  3295. wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
  3296. return 0;
  3297. }
  3298. /*
  3299. * get a reference count on fs_info->scrub_workers. start worker if necessary
  3300. */
  3301. static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
  3302. int is_dev_replace)
  3303. {
  3304. unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
  3305. int max_active = fs_info->thread_pool_size;
  3306. if (fs_info->scrub_workers_refcnt == 0) {
  3307. if (is_dev_replace)
  3308. fs_info->scrub_workers =
  3309. btrfs_alloc_workqueue(fs_info, "scrub", flags,
  3310. 1, 4);
  3311. else
  3312. fs_info->scrub_workers =
  3313. btrfs_alloc_workqueue(fs_info, "scrub", flags,
  3314. max_active, 4);
  3315. if (!fs_info->scrub_workers)
  3316. goto fail_scrub_workers;
  3317. fs_info->scrub_wr_completion_workers =
  3318. btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
  3319. max_active, 2);
  3320. if (!fs_info->scrub_wr_completion_workers)
  3321. goto fail_scrub_wr_completion_workers;
  3322. fs_info->scrub_nocow_workers =
  3323. btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
  3324. if (!fs_info->scrub_nocow_workers)
  3325. goto fail_scrub_nocow_workers;
  3326. fs_info->scrub_parity_workers =
  3327. btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
  3328. max_active, 2);
  3329. if (!fs_info->scrub_parity_workers)
  3330. goto fail_scrub_parity_workers;
  3331. }
  3332. ++fs_info->scrub_workers_refcnt;
  3333. return 0;
  3334. fail_scrub_parity_workers:
  3335. btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
  3336. fail_scrub_nocow_workers:
  3337. btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
  3338. fail_scrub_wr_completion_workers:
  3339. btrfs_destroy_workqueue(fs_info->scrub_workers);
  3340. fail_scrub_workers:
  3341. return -ENOMEM;
  3342. }
  3343. static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
  3344. {
  3345. if (--fs_info->scrub_workers_refcnt == 0) {
  3346. btrfs_destroy_workqueue(fs_info->scrub_workers);
  3347. btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
  3348. btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
  3349. btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
  3350. }
  3351. WARN_ON(fs_info->scrub_workers_refcnt < 0);
  3352. }
  3353. int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
  3354. u64 end, struct btrfs_scrub_progress *progress,
  3355. int readonly, int is_dev_replace)
  3356. {
  3357. struct scrub_ctx *sctx;
  3358. int ret;
  3359. struct btrfs_device *dev;
  3360. struct rcu_string *name;
  3361. if (btrfs_fs_closing(fs_info))
  3362. return -EINVAL;
  3363. if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
  3364. /*
  3365. * in this case scrub is unable to calculate the checksum
  3366. * the way scrub is implemented. Do not handle this
  3367. * situation at all because it won't ever happen.
  3368. */
  3369. btrfs_err(fs_info,
  3370. "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
  3371. fs_info->nodesize,
  3372. BTRFS_STRIPE_LEN);
  3373. return -EINVAL;
  3374. }
  3375. if (fs_info->sectorsize != PAGE_SIZE) {
  3376. /* not supported for data w/o checksums */
  3377. btrfs_err_rl(fs_info,
  3378. "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
  3379. fs_info->sectorsize, PAGE_SIZE);
  3380. return -EINVAL;
  3381. }
  3382. if (fs_info->nodesize >
  3383. PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
  3384. fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
  3385. /*
  3386. * would exhaust the array bounds of pagev member in
  3387. * struct scrub_block
  3388. */
  3389. btrfs_err(fs_info,
  3390. "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
  3391. fs_info->nodesize,
  3392. SCRUB_MAX_PAGES_PER_BLOCK,
  3393. fs_info->sectorsize,
  3394. SCRUB_MAX_PAGES_PER_BLOCK);
  3395. return -EINVAL;
  3396. }
  3397. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  3398. dev = btrfs_find_device(fs_info, devid, NULL, NULL);
  3399. if (!dev || (dev->missing && !is_dev_replace)) {
  3400. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3401. return -ENODEV;
  3402. }
  3403. if (!is_dev_replace && !readonly && !dev->writeable) {
  3404. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3405. rcu_read_lock();
  3406. name = rcu_dereference(dev->name);
  3407. btrfs_err(fs_info, "scrub: device %s is not writable",
  3408. name->str);
  3409. rcu_read_unlock();
  3410. return -EROFS;
  3411. }
  3412. mutex_lock(&fs_info->scrub_lock);
  3413. if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
  3414. mutex_unlock(&fs_info->scrub_lock);
  3415. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3416. return -EIO;
  3417. }
  3418. btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
  3419. if (dev->scrub_device ||
  3420. (!is_dev_replace &&
  3421. btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
  3422. btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
  3423. mutex_unlock(&fs_info->scrub_lock);
  3424. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3425. return -EINPROGRESS;
  3426. }
  3427. btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
  3428. ret = scrub_workers_get(fs_info, is_dev_replace);
  3429. if (ret) {
  3430. mutex_unlock(&fs_info->scrub_lock);
  3431. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3432. return ret;
  3433. }
  3434. sctx = scrub_setup_ctx(dev, is_dev_replace);
  3435. if (IS_ERR(sctx)) {
  3436. mutex_unlock(&fs_info->scrub_lock);
  3437. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3438. scrub_workers_put(fs_info);
  3439. return PTR_ERR(sctx);
  3440. }
  3441. sctx->readonly = readonly;
  3442. dev->scrub_device = sctx;
  3443. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3444. /*
  3445. * checking @scrub_pause_req here, we can avoid
  3446. * race between committing transaction and scrubbing.
  3447. */
  3448. __scrub_blocked_if_needed(fs_info);
  3449. atomic_inc(&fs_info->scrubs_running);
  3450. mutex_unlock(&fs_info->scrub_lock);
  3451. if (!is_dev_replace) {
  3452. /*
  3453. * by holding device list mutex, we can
  3454. * kick off writing super in log tree sync.
  3455. */
  3456. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  3457. ret = scrub_supers(sctx, dev);
  3458. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3459. }
  3460. if (!ret)
  3461. ret = scrub_enumerate_chunks(sctx, dev, start, end,
  3462. is_dev_replace);
  3463. wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
  3464. atomic_dec(&fs_info->scrubs_running);
  3465. wake_up(&fs_info->scrub_pause_wait);
  3466. wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
  3467. if (progress)
  3468. memcpy(progress, &sctx->stat, sizeof(*progress));
  3469. mutex_lock(&fs_info->scrub_lock);
  3470. dev->scrub_device = NULL;
  3471. scrub_workers_put(fs_info);
  3472. mutex_unlock(&fs_info->scrub_lock);
  3473. scrub_put_ctx(sctx);
  3474. return ret;
  3475. }
  3476. void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
  3477. {
  3478. mutex_lock(&fs_info->scrub_lock);
  3479. atomic_inc(&fs_info->scrub_pause_req);
  3480. while (atomic_read(&fs_info->scrubs_paused) !=
  3481. atomic_read(&fs_info->scrubs_running)) {
  3482. mutex_unlock(&fs_info->scrub_lock);
  3483. wait_event(fs_info->scrub_pause_wait,
  3484. atomic_read(&fs_info->scrubs_paused) ==
  3485. atomic_read(&fs_info->scrubs_running));
  3486. mutex_lock(&fs_info->scrub_lock);
  3487. }
  3488. mutex_unlock(&fs_info->scrub_lock);
  3489. }
  3490. void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
  3491. {
  3492. atomic_dec(&fs_info->scrub_pause_req);
  3493. wake_up(&fs_info->scrub_pause_wait);
  3494. }
  3495. int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
  3496. {
  3497. mutex_lock(&fs_info->scrub_lock);
  3498. if (!atomic_read(&fs_info->scrubs_running)) {
  3499. mutex_unlock(&fs_info->scrub_lock);
  3500. return -ENOTCONN;
  3501. }
  3502. atomic_inc(&fs_info->scrub_cancel_req);
  3503. while (atomic_read(&fs_info->scrubs_running)) {
  3504. mutex_unlock(&fs_info->scrub_lock);
  3505. wait_event(fs_info->scrub_pause_wait,
  3506. atomic_read(&fs_info->scrubs_running) == 0);
  3507. mutex_lock(&fs_info->scrub_lock);
  3508. }
  3509. atomic_dec(&fs_info->scrub_cancel_req);
  3510. mutex_unlock(&fs_info->scrub_lock);
  3511. return 0;
  3512. }
  3513. int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
  3514. struct btrfs_device *dev)
  3515. {
  3516. struct scrub_ctx *sctx;
  3517. mutex_lock(&fs_info->scrub_lock);
  3518. sctx = dev->scrub_device;
  3519. if (!sctx) {
  3520. mutex_unlock(&fs_info->scrub_lock);
  3521. return -ENOTCONN;
  3522. }
  3523. atomic_inc(&sctx->cancel_req);
  3524. while (dev->scrub_device) {
  3525. mutex_unlock(&fs_info->scrub_lock);
  3526. wait_event(fs_info->scrub_pause_wait,
  3527. dev->scrub_device == NULL);
  3528. mutex_lock(&fs_info->scrub_lock);
  3529. }
  3530. mutex_unlock(&fs_info->scrub_lock);
  3531. return 0;
  3532. }
  3533. int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
  3534. struct btrfs_scrub_progress *progress)
  3535. {
  3536. struct btrfs_device *dev;
  3537. struct scrub_ctx *sctx = NULL;
  3538. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  3539. dev = btrfs_find_device(fs_info, devid, NULL, NULL);
  3540. if (dev)
  3541. sctx = dev->scrub_device;
  3542. if (sctx)
  3543. memcpy(progress, &sctx->stat, sizeof(*progress));
  3544. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  3545. return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
  3546. }
  3547. static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
  3548. u64 extent_logical, u64 extent_len,
  3549. u64 *extent_physical,
  3550. struct btrfs_device **extent_dev,
  3551. int *extent_mirror_num)
  3552. {
  3553. u64 mapped_length;
  3554. struct btrfs_bio *bbio = NULL;
  3555. int ret;
  3556. mapped_length = extent_len;
  3557. ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
  3558. &mapped_length, &bbio, 0);
  3559. if (ret || !bbio || mapped_length < extent_len ||
  3560. !bbio->stripes[0].dev->bdev) {
  3561. btrfs_put_bbio(bbio);
  3562. return;
  3563. }
  3564. *extent_physical = bbio->stripes[0].physical;
  3565. *extent_mirror_num = bbio->mirror_num;
  3566. *extent_dev = bbio->stripes[0].dev;
  3567. btrfs_put_bbio(bbio);
  3568. }
  3569. static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
  3570. struct btrfs_device *dev,
  3571. int is_dev_replace)
  3572. {
  3573. WARN_ON(wr_ctx->wr_curr_bio != NULL);
  3574. mutex_init(&wr_ctx->wr_lock);
  3575. wr_ctx->wr_curr_bio = NULL;
  3576. if (!is_dev_replace)
  3577. return 0;
  3578. WARN_ON(!dev->bdev);
  3579. wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
  3580. wr_ctx->tgtdev = dev;
  3581. atomic_set(&wr_ctx->flush_all_writes, 0);
  3582. return 0;
  3583. }
  3584. static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
  3585. {
  3586. mutex_lock(&wr_ctx->wr_lock);
  3587. kfree(wr_ctx->wr_curr_bio);
  3588. wr_ctx->wr_curr_bio = NULL;
  3589. mutex_unlock(&wr_ctx->wr_lock);
  3590. }
  3591. static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
  3592. int mirror_num, u64 physical_for_dev_replace)
  3593. {
  3594. struct scrub_copy_nocow_ctx *nocow_ctx;
  3595. struct btrfs_fs_info *fs_info = sctx->fs_info;
  3596. nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
  3597. if (!nocow_ctx) {
  3598. spin_lock(&sctx->stat_lock);
  3599. sctx->stat.malloc_errors++;
  3600. spin_unlock(&sctx->stat_lock);
  3601. return -ENOMEM;
  3602. }
  3603. scrub_pending_trans_workers_inc(sctx);
  3604. nocow_ctx->sctx = sctx;
  3605. nocow_ctx->logical = logical;
  3606. nocow_ctx->len = len;
  3607. nocow_ctx->mirror_num = mirror_num;
  3608. nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
  3609. btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
  3610. copy_nocow_pages_worker, NULL, NULL);
  3611. INIT_LIST_HEAD(&nocow_ctx->inodes);
  3612. btrfs_queue_work(fs_info->scrub_nocow_workers,
  3613. &nocow_ctx->work);
  3614. return 0;
  3615. }
  3616. static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
  3617. {
  3618. struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
  3619. struct scrub_nocow_inode *nocow_inode;
  3620. nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
  3621. if (!nocow_inode)
  3622. return -ENOMEM;
  3623. nocow_inode->inum = inum;
  3624. nocow_inode->offset = offset;
  3625. nocow_inode->root = root;
  3626. list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
  3627. return 0;
  3628. }
  3629. #define COPY_COMPLETE 1
  3630. static void copy_nocow_pages_worker(struct btrfs_work *work)
  3631. {
  3632. struct scrub_copy_nocow_ctx *nocow_ctx =
  3633. container_of(work, struct scrub_copy_nocow_ctx, work);
  3634. struct scrub_ctx *sctx = nocow_ctx->sctx;
  3635. struct btrfs_fs_info *fs_info = sctx->fs_info;
  3636. struct btrfs_root *root = fs_info->extent_root;
  3637. u64 logical = nocow_ctx->logical;
  3638. u64 len = nocow_ctx->len;
  3639. int mirror_num = nocow_ctx->mirror_num;
  3640. u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
  3641. int ret;
  3642. struct btrfs_trans_handle *trans = NULL;
  3643. struct btrfs_path *path;
  3644. int not_written = 0;
  3645. path = btrfs_alloc_path();
  3646. if (!path) {
  3647. spin_lock(&sctx->stat_lock);
  3648. sctx->stat.malloc_errors++;
  3649. spin_unlock(&sctx->stat_lock);
  3650. not_written = 1;
  3651. goto out;
  3652. }
  3653. trans = btrfs_join_transaction(root);
  3654. if (IS_ERR(trans)) {
  3655. not_written = 1;
  3656. goto out;
  3657. }
  3658. ret = iterate_inodes_from_logical(logical, fs_info, path,
  3659. record_inode_for_nocow, nocow_ctx);
  3660. if (ret != 0 && ret != -ENOENT) {
  3661. btrfs_warn(fs_info,
  3662. "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
  3663. logical, physical_for_dev_replace, len, mirror_num,
  3664. ret);
  3665. not_written = 1;
  3666. goto out;
  3667. }
  3668. btrfs_end_transaction(trans);
  3669. trans = NULL;
  3670. while (!list_empty(&nocow_ctx->inodes)) {
  3671. struct scrub_nocow_inode *entry;
  3672. entry = list_first_entry(&nocow_ctx->inodes,
  3673. struct scrub_nocow_inode,
  3674. list);
  3675. list_del_init(&entry->list);
  3676. ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
  3677. entry->root, nocow_ctx);
  3678. kfree(entry);
  3679. if (ret == COPY_COMPLETE) {
  3680. ret = 0;
  3681. break;
  3682. } else if (ret) {
  3683. break;
  3684. }
  3685. }
  3686. out:
  3687. while (!list_empty(&nocow_ctx->inodes)) {
  3688. struct scrub_nocow_inode *entry;
  3689. entry = list_first_entry(&nocow_ctx->inodes,
  3690. struct scrub_nocow_inode,
  3691. list);
  3692. list_del_init(&entry->list);
  3693. kfree(entry);
  3694. }
  3695. if (trans && !IS_ERR(trans))
  3696. btrfs_end_transaction(trans);
  3697. if (not_written)
  3698. btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
  3699. num_uncorrectable_read_errors);
  3700. btrfs_free_path(path);
  3701. kfree(nocow_ctx);
  3702. scrub_pending_trans_workers_dec(sctx);
  3703. }
  3704. static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
  3705. u64 logical)
  3706. {
  3707. struct extent_state *cached_state = NULL;
  3708. struct btrfs_ordered_extent *ordered;
  3709. struct extent_io_tree *io_tree;
  3710. struct extent_map *em;
  3711. u64 lockstart = start, lockend = start + len - 1;
  3712. int ret = 0;
  3713. io_tree = &inode->io_tree;
  3714. lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
  3715. ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
  3716. if (ordered) {
  3717. btrfs_put_ordered_extent(ordered);
  3718. ret = 1;
  3719. goto out_unlock;
  3720. }
  3721. em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
  3722. if (IS_ERR(em)) {
  3723. ret = PTR_ERR(em);
  3724. goto out_unlock;
  3725. }
  3726. /*
  3727. * This extent does not actually cover the logical extent anymore,
  3728. * move on to the next inode.
  3729. */
  3730. if (em->block_start > logical ||
  3731. em->block_start + em->block_len < logical + len) {
  3732. free_extent_map(em);
  3733. ret = 1;
  3734. goto out_unlock;
  3735. }
  3736. free_extent_map(em);
  3737. out_unlock:
  3738. unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
  3739. GFP_NOFS);
  3740. return ret;
  3741. }
  3742. static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
  3743. struct scrub_copy_nocow_ctx *nocow_ctx)
  3744. {
  3745. struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
  3746. struct btrfs_key key;
  3747. struct inode *inode;
  3748. struct page *page;
  3749. struct btrfs_root *local_root;
  3750. struct extent_io_tree *io_tree;
  3751. u64 physical_for_dev_replace;
  3752. u64 nocow_ctx_logical;
  3753. u64 len = nocow_ctx->len;
  3754. unsigned long index;
  3755. int srcu_index;
  3756. int ret = 0;
  3757. int err = 0;
  3758. key.objectid = root;
  3759. key.type = BTRFS_ROOT_ITEM_KEY;
  3760. key.offset = (u64)-1;
  3761. srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
  3762. local_root = btrfs_read_fs_root_no_name(fs_info, &key);
  3763. if (IS_ERR(local_root)) {
  3764. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  3765. return PTR_ERR(local_root);
  3766. }
  3767. key.type = BTRFS_INODE_ITEM_KEY;
  3768. key.objectid = inum;
  3769. key.offset = 0;
  3770. inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
  3771. srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
  3772. if (IS_ERR(inode))
  3773. return PTR_ERR(inode);
  3774. /* Avoid truncate/dio/punch hole.. */
  3775. inode_lock(inode);
  3776. inode_dio_wait(inode);
  3777. physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
  3778. io_tree = &BTRFS_I(inode)->io_tree;
  3779. nocow_ctx_logical = nocow_ctx->logical;
  3780. ret = check_extent_to_block(BTRFS_I(inode), offset, len,
  3781. nocow_ctx_logical);
  3782. if (ret) {
  3783. ret = ret > 0 ? 0 : ret;
  3784. goto out;
  3785. }
  3786. while (len >= PAGE_SIZE) {
  3787. index = offset >> PAGE_SHIFT;
  3788. again:
  3789. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  3790. if (!page) {
  3791. btrfs_err(fs_info, "find_or_create_page() failed");
  3792. ret = -ENOMEM;
  3793. goto out;
  3794. }
  3795. if (PageUptodate(page)) {
  3796. if (PageDirty(page))
  3797. goto next_page;
  3798. } else {
  3799. ClearPageError(page);
  3800. err = extent_read_full_page(io_tree, page,
  3801. btrfs_get_extent,
  3802. nocow_ctx->mirror_num);
  3803. if (err) {
  3804. ret = err;
  3805. goto next_page;
  3806. }
  3807. lock_page(page);
  3808. /*
  3809. * If the page has been remove from the page cache,
  3810. * the data on it is meaningless, because it may be
  3811. * old one, the new data may be written into the new
  3812. * page in the page cache.
  3813. */
  3814. if (page->mapping != inode->i_mapping) {
  3815. unlock_page(page);
  3816. put_page(page);
  3817. goto again;
  3818. }
  3819. if (!PageUptodate(page)) {
  3820. ret = -EIO;
  3821. goto next_page;
  3822. }
  3823. }
  3824. ret = check_extent_to_block(BTRFS_I(inode), offset, len,
  3825. nocow_ctx_logical);
  3826. if (ret) {
  3827. ret = ret > 0 ? 0 : ret;
  3828. goto next_page;
  3829. }
  3830. err = write_page_nocow(nocow_ctx->sctx,
  3831. physical_for_dev_replace, page);
  3832. if (err)
  3833. ret = err;
  3834. next_page:
  3835. unlock_page(page);
  3836. put_page(page);
  3837. if (ret)
  3838. break;
  3839. offset += PAGE_SIZE;
  3840. physical_for_dev_replace += PAGE_SIZE;
  3841. nocow_ctx_logical += PAGE_SIZE;
  3842. len -= PAGE_SIZE;
  3843. }
  3844. ret = COPY_COMPLETE;
  3845. out:
  3846. inode_unlock(inode);
  3847. iput(inode);
  3848. return ret;
  3849. }
  3850. static int write_page_nocow(struct scrub_ctx *sctx,
  3851. u64 physical_for_dev_replace, struct page *page)
  3852. {
  3853. struct bio *bio;
  3854. struct btrfs_device *dev;
  3855. int ret;
  3856. dev = sctx->wr_ctx.tgtdev;
  3857. if (!dev)
  3858. return -EIO;
  3859. if (!dev->bdev) {
  3860. btrfs_warn_rl(dev->fs_info,
  3861. "scrub write_page_nocow(bdev == NULL) is unexpected");
  3862. return -EIO;
  3863. }
  3864. bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
  3865. if (!bio) {
  3866. spin_lock(&sctx->stat_lock);
  3867. sctx->stat.malloc_errors++;
  3868. spin_unlock(&sctx->stat_lock);
  3869. return -ENOMEM;
  3870. }
  3871. bio->bi_iter.bi_size = 0;
  3872. bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
  3873. bio->bi_bdev = dev->bdev;
  3874. bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
  3875. ret = bio_add_page(bio, page, PAGE_SIZE, 0);
  3876. if (ret != PAGE_SIZE) {
  3877. leave_with_eio:
  3878. bio_put(bio);
  3879. btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
  3880. return -EIO;
  3881. }
  3882. if (btrfsic_submit_bio_wait(bio))
  3883. goto leave_with_eio;
  3884. bio_put(bio);
  3885. return 0;
  3886. }