xfs_inode.c 98 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/log2.h>
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_shared.h"
  22. #include "xfs_format.h"
  23. #include "xfs_log_format.h"
  24. #include "xfs_trans_resv.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_defer.h"
  28. #include "xfs_inode.h"
  29. #include "xfs_da_format.h"
  30. #include "xfs_da_btree.h"
  31. #include "xfs_dir2.h"
  32. #include "xfs_attr_sf.h"
  33. #include "xfs_attr.h"
  34. #include "xfs_trans_space.h"
  35. #include "xfs_trans.h"
  36. #include "xfs_buf_item.h"
  37. #include "xfs_inode_item.h"
  38. #include "xfs_ialloc.h"
  39. #include "xfs_bmap.h"
  40. #include "xfs_bmap_util.h"
  41. #include "xfs_error.h"
  42. #include "xfs_quota.h"
  43. #include "xfs_filestream.h"
  44. #include "xfs_cksum.h"
  45. #include "xfs_trace.h"
  46. #include "xfs_icache.h"
  47. #include "xfs_symlink.h"
  48. #include "xfs_trans_priv.h"
  49. #include "xfs_log.h"
  50. #include "xfs_bmap_btree.h"
  51. #include "xfs_reflink.h"
  52. kmem_zone_t *xfs_inode_zone;
  53. /*
  54. * Used in xfs_itruncate_extents(). This is the maximum number of extents
  55. * freed from a file in a single transaction.
  56. */
  57. #define XFS_ITRUNC_MAX_EXTENTS 2
  58. STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
  59. STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  60. STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
  61. /*
  62. * helper function to extract extent size hint from inode
  63. */
  64. xfs_extlen_t
  65. xfs_get_extsz_hint(
  66. struct xfs_inode *ip)
  67. {
  68. if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  69. return ip->i_d.di_extsize;
  70. if (XFS_IS_REALTIME_INODE(ip))
  71. return ip->i_mount->m_sb.sb_rextsize;
  72. return 0;
  73. }
  74. /*
  75. * Helper function to extract CoW extent size hint from inode.
  76. * Between the extent size hint and the CoW extent size hint, we
  77. * return the greater of the two. If the value is zero (automatic),
  78. * use the default size.
  79. */
  80. xfs_extlen_t
  81. xfs_get_cowextsz_hint(
  82. struct xfs_inode *ip)
  83. {
  84. xfs_extlen_t a, b;
  85. a = 0;
  86. if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
  87. a = ip->i_d.di_cowextsize;
  88. b = xfs_get_extsz_hint(ip);
  89. a = max(a, b);
  90. if (a == 0)
  91. return XFS_DEFAULT_COWEXTSZ_HINT;
  92. return a;
  93. }
  94. /*
  95. * These two are wrapper routines around the xfs_ilock() routine used to
  96. * centralize some grungy code. They are used in places that wish to lock the
  97. * inode solely for reading the extents. The reason these places can't just
  98. * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  99. * bringing in of the extents from disk for a file in b-tree format. If the
  100. * inode is in b-tree format, then we need to lock the inode exclusively until
  101. * the extents are read in. Locking it exclusively all the time would limit
  102. * our parallelism unnecessarily, though. What we do instead is check to see
  103. * if the extents have been read in yet, and only lock the inode exclusively
  104. * if they have not.
  105. *
  106. * The functions return a value which should be given to the corresponding
  107. * xfs_iunlock() call.
  108. */
  109. uint
  110. xfs_ilock_data_map_shared(
  111. struct xfs_inode *ip)
  112. {
  113. uint lock_mode = XFS_ILOCK_SHARED;
  114. if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
  115. (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
  116. lock_mode = XFS_ILOCK_EXCL;
  117. xfs_ilock(ip, lock_mode);
  118. return lock_mode;
  119. }
  120. uint
  121. xfs_ilock_attr_map_shared(
  122. struct xfs_inode *ip)
  123. {
  124. uint lock_mode = XFS_ILOCK_SHARED;
  125. if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
  126. (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
  127. lock_mode = XFS_ILOCK_EXCL;
  128. xfs_ilock(ip, lock_mode);
  129. return lock_mode;
  130. }
  131. /*
  132. * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
  133. * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
  134. * various combinations of the locks to be obtained.
  135. *
  136. * The 3 locks should always be ordered so that the IO lock is obtained first,
  137. * the mmap lock second and the ilock last in order to prevent deadlock.
  138. *
  139. * Basic locking order:
  140. *
  141. * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
  142. *
  143. * mmap_sem locking order:
  144. *
  145. * i_rwsem -> page lock -> mmap_sem
  146. * mmap_sem -> i_mmap_lock -> page_lock
  147. *
  148. * The difference in mmap_sem locking order mean that we cannot hold the
  149. * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
  150. * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
  151. * in get_user_pages() to map the user pages into the kernel address space for
  152. * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
  153. * page faults already hold the mmap_sem.
  154. *
  155. * Hence to serialise fully against both syscall and mmap based IO, we need to
  156. * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
  157. * taken in places where we need to invalidate the page cache in a race
  158. * free manner (e.g. truncate, hole punch and other extent manipulation
  159. * functions).
  160. */
  161. void
  162. xfs_ilock(
  163. xfs_inode_t *ip,
  164. uint lock_flags)
  165. {
  166. trace_xfs_ilock(ip, lock_flags, _RET_IP_);
  167. /*
  168. * You can't set both SHARED and EXCL for the same lock,
  169. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  170. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  171. */
  172. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  173. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  174. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  175. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  176. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  177. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  178. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  179. if (lock_flags & XFS_IOLOCK_EXCL) {
  180. down_write_nested(&VFS_I(ip)->i_rwsem,
  181. XFS_IOLOCK_DEP(lock_flags));
  182. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  183. down_read_nested(&VFS_I(ip)->i_rwsem,
  184. XFS_IOLOCK_DEP(lock_flags));
  185. }
  186. if (lock_flags & XFS_MMAPLOCK_EXCL)
  187. mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
  188. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  189. mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
  190. if (lock_flags & XFS_ILOCK_EXCL)
  191. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  192. else if (lock_flags & XFS_ILOCK_SHARED)
  193. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  194. }
  195. /*
  196. * This is just like xfs_ilock(), except that the caller
  197. * is guaranteed not to sleep. It returns 1 if it gets
  198. * the requested locks and 0 otherwise. If the IO lock is
  199. * obtained but the inode lock cannot be, then the IO lock
  200. * is dropped before returning.
  201. *
  202. * ip -- the inode being locked
  203. * lock_flags -- this parameter indicates the inode's locks to be
  204. * to be locked. See the comment for xfs_ilock() for a list
  205. * of valid values.
  206. */
  207. int
  208. xfs_ilock_nowait(
  209. xfs_inode_t *ip,
  210. uint lock_flags)
  211. {
  212. trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
  213. /*
  214. * You can't set both SHARED and EXCL for the same lock,
  215. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  216. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  217. */
  218. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  219. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  220. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  221. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  222. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  223. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  224. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  225. if (lock_flags & XFS_IOLOCK_EXCL) {
  226. if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
  227. goto out;
  228. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  229. if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
  230. goto out;
  231. }
  232. if (lock_flags & XFS_MMAPLOCK_EXCL) {
  233. if (!mrtryupdate(&ip->i_mmaplock))
  234. goto out_undo_iolock;
  235. } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
  236. if (!mrtryaccess(&ip->i_mmaplock))
  237. goto out_undo_iolock;
  238. }
  239. if (lock_flags & XFS_ILOCK_EXCL) {
  240. if (!mrtryupdate(&ip->i_lock))
  241. goto out_undo_mmaplock;
  242. } else if (lock_flags & XFS_ILOCK_SHARED) {
  243. if (!mrtryaccess(&ip->i_lock))
  244. goto out_undo_mmaplock;
  245. }
  246. return 1;
  247. out_undo_mmaplock:
  248. if (lock_flags & XFS_MMAPLOCK_EXCL)
  249. mrunlock_excl(&ip->i_mmaplock);
  250. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  251. mrunlock_shared(&ip->i_mmaplock);
  252. out_undo_iolock:
  253. if (lock_flags & XFS_IOLOCK_EXCL)
  254. up_write(&VFS_I(ip)->i_rwsem);
  255. else if (lock_flags & XFS_IOLOCK_SHARED)
  256. up_read(&VFS_I(ip)->i_rwsem);
  257. out:
  258. return 0;
  259. }
  260. /*
  261. * xfs_iunlock() is used to drop the inode locks acquired with
  262. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  263. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  264. * that we know which locks to drop.
  265. *
  266. * ip -- the inode being unlocked
  267. * lock_flags -- this parameter indicates the inode's locks to be
  268. * to be unlocked. See the comment for xfs_ilock() for a list
  269. * of valid values for this parameter.
  270. *
  271. */
  272. void
  273. xfs_iunlock(
  274. xfs_inode_t *ip,
  275. uint lock_flags)
  276. {
  277. /*
  278. * You can't set both SHARED and EXCL for the same lock,
  279. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  280. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  281. */
  282. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  283. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  284. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  285. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  286. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  287. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  288. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  289. ASSERT(lock_flags != 0);
  290. if (lock_flags & XFS_IOLOCK_EXCL)
  291. up_write(&VFS_I(ip)->i_rwsem);
  292. else if (lock_flags & XFS_IOLOCK_SHARED)
  293. up_read(&VFS_I(ip)->i_rwsem);
  294. if (lock_flags & XFS_MMAPLOCK_EXCL)
  295. mrunlock_excl(&ip->i_mmaplock);
  296. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  297. mrunlock_shared(&ip->i_mmaplock);
  298. if (lock_flags & XFS_ILOCK_EXCL)
  299. mrunlock_excl(&ip->i_lock);
  300. else if (lock_flags & XFS_ILOCK_SHARED)
  301. mrunlock_shared(&ip->i_lock);
  302. trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
  303. }
  304. /*
  305. * give up write locks. the i/o lock cannot be held nested
  306. * if it is being demoted.
  307. */
  308. void
  309. xfs_ilock_demote(
  310. xfs_inode_t *ip,
  311. uint lock_flags)
  312. {
  313. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
  314. ASSERT((lock_flags &
  315. ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  316. if (lock_flags & XFS_ILOCK_EXCL)
  317. mrdemote(&ip->i_lock);
  318. if (lock_flags & XFS_MMAPLOCK_EXCL)
  319. mrdemote(&ip->i_mmaplock);
  320. if (lock_flags & XFS_IOLOCK_EXCL)
  321. downgrade_write(&VFS_I(ip)->i_rwsem);
  322. trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
  323. }
  324. #if defined(DEBUG) || defined(XFS_WARN)
  325. int
  326. xfs_isilocked(
  327. xfs_inode_t *ip,
  328. uint lock_flags)
  329. {
  330. if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
  331. if (!(lock_flags & XFS_ILOCK_SHARED))
  332. return !!ip->i_lock.mr_writer;
  333. return rwsem_is_locked(&ip->i_lock.mr_lock);
  334. }
  335. if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
  336. if (!(lock_flags & XFS_MMAPLOCK_SHARED))
  337. return !!ip->i_mmaplock.mr_writer;
  338. return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
  339. }
  340. if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
  341. if (!(lock_flags & XFS_IOLOCK_SHARED))
  342. return !debug_locks ||
  343. lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
  344. return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
  345. }
  346. ASSERT(0);
  347. return 0;
  348. }
  349. #endif
  350. #ifdef DEBUG
  351. int xfs_locked_n;
  352. int xfs_small_retries;
  353. int xfs_middle_retries;
  354. int xfs_lots_retries;
  355. int xfs_lock_delays;
  356. #endif
  357. /*
  358. * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
  359. * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
  360. * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
  361. * errors and warnings.
  362. */
  363. #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
  364. static bool
  365. xfs_lockdep_subclass_ok(
  366. int subclass)
  367. {
  368. return subclass < MAX_LOCKDEP_SUBCLASSES;
  369. }
  370. #else
  371. #define xfs_lockdep_subclass_ok(subclass) (true)
  372. #endif
  373. /*
  374. * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
  375. * value. This can be called for any type of inode lock combination, including
  376. * parent locking. Care must be taken to ensure we don't overrun the subclass
  377. * storage fields in the class mask we build.
  378. */
  379. static inline int
  380. xfs_lock_inumorder(int lock_mode, int subclass)
  381. {
  382. int class = 0;
  383. ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
  384. XFS_ILOCK_RTSUM)));
  385. ASSERT(xfs_lockdep_subclass_ok(subclass));
  386. if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
  387. ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
  388. class += subclass << XFS_IOLOCK_SHIFT;
  389. }
  390. if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
  391. ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
  392. class += subclass << XFS_MMAPLOCK_SHIFT;
  393. }
  394. if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
  395. ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
  396. class += subclass << XFS_ILOCK_SHIFT;
  397. }
  398. return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
  399. }
  400. /*
  401. * The following routine will lock n inodes in exclusive mode. We assume the
  402. * caller calls us with the inodes in i_ino order.
  403. *
  404. * We need to detect deadlock where an inode that we lock is in the AIL and we
  405. * start waiting for another inode that is locked by a thread in a long running
  406. * transaction (such as truncate). This can result in deadlock since the long
  407. * running trans might need to wait for the inode we just locked in order to
  408. * push the tail and free space in the log.
  409. *
  410. * xfs_lock_inodes() can only be used to lock one type of lock at a time -
  411. * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
  412. * lock more than one at a time, lockdep will report false positives saying we
  413. * have violated locking orders.
  414. */
  415. static void
  416. xfs_lock_inodes(
  417. xfs_inode_t **ips,
  418. int inodes,
  419. uint lock_mode)
  420. {
  421. int attempts = 0, i, j, try_lock;
  422. xfs_log_item_t *lp;
  423. /*
  424. * Currently supports between 2 and 5 inodes with exclusive locking. We
  425. * support an arbitrary depth of locking here, but absolute limits on
  426. * inodes depend on the the type of locking and the limits placed by
  427. * lockdep annotations in xfs_lock_inumorder. These are all checked by
  428. * the asserts.
  429. */
  430. ASSERT(ips && inodes >= 2 && inodes <= 5);
  431. ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
  432. XFS_ILOCK_EXCL));
  433. ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
  434. XFS_ILOCK_SHARED)));
  435. ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
  436. inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
  437. ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
  438. inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
  439. if (lock_mode & XFS_IOLOCK_EXCL) {
  440. ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
  441. } else if (lock_mode & XFS_MMAPLOCK_EXCL)
  442. ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
  443. try_lock = 0;
  444. i = 0;
  445. again:
  446. for (; i < inodes; i++) {
  447. ASSERT(ips[i]);
  448. if (i && (ips[i] == ips[i - 1])) /* Already locked */
  449. continue;
  450. /*
  451. * If try_lock is not set yet, make sure all locked inodes are
  452. * not in the AIL. If any are, set try_lock to be used later.
  453. */
  454. if (!try_lock) {
  455. for (j = (i - 1); j >= 0 && !try_lock; j--) {
  456. lp = (xfs_log_item_t *)ips[j]->i_itemp;
  457. if (lp && (lp->li_flags & XFS_LI_IN_AIL))
  458. try_lock++;
  459. }
  460. }
  461. /*
  462. * If any of the previous locks we have locked is in the AIL,
  463. * we must TRY to get the second and subsequent locks. If
  464. * we can't get any, we must release all we have
  465. * and try again.
  466. */
  467. if (!try_lock) {
  468. xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
  469. continue;
  470. }
  471. /* try_lock means we have an inode locked that is in the AIL. */
  472. ASSERT(i != 0);
  473. if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
  474. continue;
  475. /*
  476. * Unlock all previous guys and try again. xfs_iunlock will try
  477. * to push the tail if the inode is in the AIL.
  478. */
  479. attempts++;
  480. for (j = i - 1; j >= 0; j--) {
  481. /*
  482. * Check to see if we've already unlocked this one. Not
  483. * the first one going back, and the inode ptr is the
  484. * same.
  485. */
  486. if (j != (i - 1) && ips[j] == ips[j + 1])
  487. continue;
  488. xfs_iunlock(ips[j], lock_mode);
  489. }
  490. if ((attempts % 5) == 0) {
  491. delay(1); /* Don't just spin the CPU */
  492. #ifdef DEBUG
  493. xfs_lock_delays++;
  494. #endif
  495. }
  496. i = 0;
  497. try_lock = 0;
  498. goto again;
  499. }
  500. #ifdef DEBUG
  501. if (attempts) {
  502. if (attempts < 5) xfs_small_retries++;
  503. else if (attempts < 100) xfs_middle_retries++;
  504. else xfs_lots_retries++;
  505. } else {
  506. xfs_locked_n++;
  507. }
  508. #endif
  509. }
  510. /*
  511. * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
  512. * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
  513. * lock more than one at a time, lockdep will report false positives saying we
  514. * have violated locking orders.
  515. */
  516. void
  517. xfs_lock_two_inodes(
  518. xfs_inode_t *ip0,
  519. xfs_inode_t *ip1,
  520. uint lock_mode)
  521. {
  522. xfs_inode_t *temp;
  523. int attempts = 0;
  524. xfs_log_item_t *lp;
  525. ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
  526. if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
  527. ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
  528. ASSERT(ip0->i_ino != ip1->i_ino);
  529. if (ip0->i_ino > ip1->i_ino) {
  530. temp = ip0;
  531. ip0 = ip1;
  532. ip1 = temp;
  533. }
  534. again:
  535. xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
  536. /*
  537. * If the first lock we have locked is in the AIL, we must TRY to get
  538. * the second lock. If we can't get it, we must release the first one
  539. * and try again.
  540. */
  541. lp = (xfs_log_item_t *)ip0->i_itemp;
  542. if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
  543. if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
  544. xfs_iunlock(ip0, lock_mode);
  545. if ((++attempts % 5) == 0)
  546. delay(1); /* Don't just spin the CPU */
  547. goto again;
  548. }
  549. } else {
  550. xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
  551. }
  552. }
  553. void
  554. __xfs_iflock(
  555. struct xfs_inode *ip)
  556. {
  557. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
  558. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
  559. do {
  560. prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  561. if (xfs_isiflocked(ip))
  562. io_schedule();
  563. } while (!xfs_iflock_nowait(ip));
  564. finish_wait(wq, &wait.wait);
  565. }
  566. STATIC uint
  567. _xfs_dic2xflags(
  568. __uint16_t di_flags,
  569. uint64_t di_flags2,
  570. bool has_attr)
  571. {
  572. uint flags = 0;
  573. if (di_flags & XFS_DIFLAG_ANY) {
  574. if (di_flags & XFS_DIFLAG_REALTIME)
  575. flags |= FS_XFLAG_REALTIME;
  576. if (di_flags & XFS_DIFLAG_PREALLOC)
  577. flags |= FS_XFLAG_PREALLOC;
  578. if (di_flags & XFS_DIFLAG_IMMUTABLE)
  579. flags |= FS_XFLAG_IMMUTABLE;
  580. if (di_flags & XFS_DIFLAG_APPEND)
  581. flags |= FS_XFLAG_APPEND;
  582. if (di_flags & XFS_DIFLAG_SYNC)
  583. flags |= FS_XFLAG_SYNC;
  584. if (di_flags & XFS_DIFLAG_NOATIME)
  585. flags |= FS_XFLAG_NOATIME;
  586. if (di_flags & XFS_DIFLAG_NODUMP)
  587. flags |= FS_XFLAG_NODUMP;
  588. if (di_flags & XFS_DIFLAG_RTINHERIT)
  589. flags |= FS_XFLAG_RTINHERIT;
  590. if (di_flags & XFS_DIFLAG_PROJINHERIT)
  591. flags |= FS_XFLAG_PROJINHERIT;
  592. if (di_flags & XFS_DIFLAG_NOSYMLINKS)
  593. flags |= FS_XFLAG_NOSYMLINKS;
  594. if (di_flags & XFS_DIFLAG_EXTSIZE)
  595. flags |= FS_XFLAG_EXTSIZE;
  596. if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
  597. flags |= FS_XFLAG_EXTSZINHERIT;
  598. if (di_flags & XFS_DIFLAG_NODEFRAG)
  599. flags |= FS_XFLAG_NODEFRAG;
  600. if (di_flags & XFS_DIFLAG_FILESTREAM)
  601. flags |= FS_XFLAG_FILESTREAM;
  602. }
  603. if (di_flags2 & XFS_DIFLAG2_ANY) {
  604. if (di_flags2 & XFS_DIFLAG2_DAX)
  605. flags |= FS_XFLAG_DAX;
  606. if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
  607. flags |= FS_XFLAG_COWEXTSIZE;
  608. }
  609. if (has_attr)
  610. flags |= FS_XFLAG_HASATTR;
  611. return flags;
  612. }
  613. uint
  614. xfs_ip2xflags(
  615. struct xfs_inode *ip)
  616. {
  617. struct xfs_icdinode *dic = &ip->i_d;
  618. return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
  619. }
  620. /*
  621. * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
  622. * is allowed, otherwise it has to be an exact match. If a CI match is found,
  623. * ci_name->name will point to a the actual name (caller must free) or
  624. * will be set to NULL if an exact match is found.
  625. */
  626. int
  627. xfs_lookup(
  628. xfs_inode_t *dp,
  629. struct xfs_name *name,
  630. xfs_inode_t **ipp,
  631. struct xfs_name *ci_name)
  632. {
  633. xfs_ino_t inum;
  634. int error;
  635. trace_xfs_lookup(dp, name);
  636. if (XFS_FORCED_SHUTDOWN(dp->i_mount))
  637. return -EIO;
  638. error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
  639. if (error)
  640. goto out_unlock;
  641. error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
  642. if (error)
  643. goto out_free_name;
  644. return 0;
  645. out_free_name:
  646. if (ci_name)
  647. kmem_free(ci_name->name);
  648. out_unlock:
  649. *ipp = NULL;
  650. return error;
  651. }
  652. /*
  653. * Allocate an inode on disk and return a copy of its in-core version.
  654. * The in-core inode is locked exclusively. Set mode, nlink, and rdev
  655. * appropriately within the inode. The uid and gid for the inode are
  656. * set according to the contents of the given cred structure.
  657. *
  658. * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
  659. * has a free inode available, call xfs_iget() to obtain the in-core
  660. * version of the allocated inode. Finally, fill in the inode and
  661. * log its initial contents. In this case, ialloc_context would be
  662. * set to NULL.
  663. *
  664. * If xfs_dialloc() does not have an available inode, it will replenish
  665. * its supply by doing an allocation. Since we can only do one
  666. * allocation within a transaction without deadlocks, we must commit
  667. * the current transaction before returning the inode itself.
  668. * In this case, therefore, we will set ialloc_context and return.
  669. * The caller should then commit the current transaction, start a new
  670. * transaction, and call xfs_ialloc() again to actually get the inode.
  671. *
  672. * To ensure that some other process does not grab the inode that
  673. * was allocated during the first call to xfs_ialloc(), this routine
  674. * also returns the [locked] bp pointing to the head of the freelist
  675. * as ialloc_context. The caller should hold this buffer across
  676. * the commit and pass it back into this routine on the second call.
  677. *
  678. * If we are allocating quota inodes, we do not have a parent inode
  679. * to attach to or associate with (i.e. pip == NULL) because they
  680. * are not linked into the directory structure - they are attached
  681. * directly to the superblock - and so have no parent.
  682. */
  683. static int
  684. xfs_ialloc(
  685. xfs_trans_t *tp,
  686. xfs_inode_t *pip,
  687. umode_t mode,
  688. xfs_nlink_t nlink,
  689. xfs_dev_t rdev,
  690. prid_t prid,
  691. int okalloc,
  692. xfs_buf_t **ialloc_context,
  693. xfs_inode_t **ipp)
  694. {
  695. struct xfs_mount *mp = tp->t_mountp;
  696. xfs_ino_t ino;
  697. xfs_inode_t *ip;
  698. uint flags;
  699. int error;
  700. struct timespec tv;
  701. struct inode *inode;
  702. /*
  703. * Call the space management code to pick
  704. * the on-disk inode to be allocated.
  705. */
  706. error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
  707. ialloc_context, &ino);
  708. if (error)
  709. return error;
  710. if (*ialloc_context || ino == NULLFSINO) {
  711. *ipp = NULL;
  712. return 0;
  713. }
  714. ASSERT(*ialloc_context == NULL);
  715. /*
  716. * Get the in-core inode with the lock held exclusively.
  717. * This is because we're setting fields here we need
  718. * to prevent others from looking at until we're done.
  719. */
  720. error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
  721. XFS_ILOCK_EXCL, &ip);
  722. if (error)
  723. return error;
  724. ASSERT(ip != NULL);
  725. inode = VFS_I(ip);
  726. /*
  727. * We always convert v1 inodes to v2 now - we only support filesystems
  728. * with >= v2 inode capability, so there is no reason for ever leaving
  729. * an inode in v1 format.
  730. */
  731. if (ip->i_d.di_version == 1)
  732. ip->i_d.di_version = 2;
  733. inode->i_mode = mode;
  734. set_nlink(inode, nlink);
  735. ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
  736. ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
  737. xfs_set_projid(ip, prid);
  738. if (pip && XFS_INHERIT_GID(pip)) {
  739. ip->i_d.di_gid = pip->i_d.di_gid;
  740. if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
  741. inode->i_mode |= S_ISGID;
  742. }
  743. /*
  744. * If the group ID of the new file does not match the effective group
  745. * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
  746. * (and only if the irix_sgid_inherit compatibility variable is set).
  747. */
  748. if ((irix_sgid_inherit) &&
  749. (inode->i_mode & S_ISGID) &&
  750. (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
  751. inode->i_mode &= ~S_ISGID;
  752. ip->i_d.di_size = 0;
  753. ip->i_d.di_nextents = 0;
  754. ASSERT(ip->i_d.di_nblocks == 0);
  755. tv = current_time(inode);
  756. inode->i_mtime = tv;
  757. inode->i_atime = tv;
  758. inode->i_ctime = tv;
  759. ip->i_d.di_extsize = 0;
  760. ip->i_d.di_dmevmask = 0;
  761. ip->i_d.di_dmstate = 0;
  762. ip->i_d.di_flags = 0;
  763. if (ip->i_d.di_version == 3) {
  764. inode->i_version = 1;
  765. ip->i_d.di_flags2 = 0;
  766. ip->i_d.di_cowextsize = 0;
  767. ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec;
  768. ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec;
  769. }
  770. flags = XFS_ILOG_CORE;
  771. switch (mode & S_IFMT) {
  772. case S_IFIFO:
  773. case S_IFCHR:
  774. case S_IFBLK:
  775. case S_IFSOCK:
  776. ip->i_d.di_format = XFS_DINODE_FMT_DEV;
  777. ip->i_df.if_u2.if_rdev = rdev;
  778. ip->i_df.if_flags = 0;
  779. flags |= XFS_ILOG_DEV;
  780. break;
  781. case S_IFREG:
  782. case S_IFDIR:
  783. if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
  784. uint64_t di_flags2 = 0;
  785. uint di_flags = 0;
  786. if (S_ISDIR(mode)) {
  787. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  788. di_flags |= XFS_DIFLAG_RTINHERIT;
  789. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  790. di_flags |= XFS_DIFLAG_EXTSZINHERIT;
  791. ip->i_d.di_extsize = pip->i_d.di_extsize;
  792. }
  793. if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
  794. di_flags |= XFS_DIFLAG_PROJINHERIT;
  795. } else if (S_ISREG(mode)) {
  796. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  797. di_flags |= XFS_DIFLAG_REALTIME;
  798. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  799. di_flags |= XFS_DIFLAG_EXTSIZE;
  800. ip->i_d.di_extsize = pip->i_d.di_extsize;
  801. }
  802. }
  803. if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
  804. xfs_inherit_noatime)
  805. di_flags |= XFS_DIFLAG_NOATIME;
  806. if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
  807. xfs_inherit_nodump)
  808. di_flags |= XFS_DIFLAG_NODUMP;
  809. if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
  810. xfs_inherit_sync)
  811. di_flags |= XFS_DIFLAG_SYNC;
  812. if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
  813. xfs_inherit_nosymlinks)
  814. di_flags |= XFS_DIFLAG_NOSYMLINKS;
  815. if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
  816. xfs_inherit_nodefrag)
  817. di_flags |= XFS_DIFLAG_NODEFRAG;
  818. if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
  819. di_flags |= XFS_DIFLAG_FILESTREAM;
  820. if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
  821. di_flags2 |= XFS_DIFLAG2_DAX;
  822. ip->i_d.di_flags |= di_flags;
  823. ip->i_d.di_flags2 |= di_flags2;
  824. }
  825. if (pip &&
  826. (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
  827. pip->i_d.di_version == 3 &&
  828. ip->i_d.di_version == 3) {
  829. if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
  830. ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
  831. ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
  832. }
  833. }
  834. /* FALLTHROUGH */
  835. case S_IFLNK:
  836. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  837. ip->i_df.if_flags = XFS_IFEXTENTS;
  838. ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
  839. ip->i_df.if_u1.if_extents = NULL;
  840. break;
  841. default:
  842. ASSERT(0);
  843. }
  844. /*
  845. * Attribute fork settings for new inode.
  846. */
  847. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  848. ip->i_d.di_anextents = 0;
  849. /*
  850. * Log the new values stuffed into the inode.
  851. */
  852. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  853. xfs_trans_log_inode(tp, ip, flags);
  854. /* now that we have an i_mode we can setup the inode structure */
  855. xfs_setup_inode(ip);
  856. *ipp = ip;
  857. return 0;
  858. }
  859. /*
  860. * Allocates a new inode from disk and return a pointer to the
  861. * incore copy. This routine will internally commit the current
  862. * transaction and allocate a new one if the Space Manager needed
  863. * to do an allocation to replenish the inode free-list.
  864. *
  865. * This routine is designed to be called from xfs_create and
  866. * xfs_create_dir.
  867. *
  868. */
  869. int
  870. xfs_dir_ialloc(
  871. xfs_trans_t **tpp, /* input: current transaction;
  872. output: may be a new transaction. */
  873. xfs_inode_t *dp, /* directory within whose allocate
  874. the inode. */
  875. umode_t mode,
  876. xfs_nlink_t nlink,
  877. xfs_dev_t rdev,
  878. prid_t prid, /* project id */
  879. int okalloc, /* ok to allocate new space */
  880. xfs_inode_t **ipp, /* pointer to inode; it will be
  881. locked. */
  882. int *committed)
  883. {
  884. xfs_trans_t *tp;
  885. xfs_inode_t *ip;
  886. xfs_buf_t *ialloc_context = NULL;
  887. int code;
  888. void *dqinfo;
  889. uint tflags;
  890. tp = *tpp;
  891. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  892. /*
  893. * xfs_ialloc will return a pointer to an incore inode if
  894. * the Space Manager has an available inode on the free
  895. * list. Otherwise, it will do an allocation and replenish
  896. * the freelist. Since we can only do one allocation per
  897. * transaction without deadlocks, we will need to commit the
  898. * current transaction and start a new one. We will then
  899. * need to call xfs_ialloc again to get the inode.
  900. *
  901. * If xfs_ialloc did an allocation to replenish the freelist,
  902. * it returns the bp containing the head of the freelist as
  903. * ialloc_context. We will hold a lock on it across the
  904. * transaction commit so that no other process can steal
  905. * the inode(s) that we've just allocated.
  906. */
  907. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
  908. &ialloc_context, &ip);
  909. /*
  910. * Return an error if we were unable to allocate a new inode.
  911. * This should only happen if we run out of space on disk or
  912. * encounter a disk error.
  913. */
  914. if (code) {
  915. *ipp = NULL;
  916. return code;
  917. }
  918. if (!ialloc_context && !ip) {
  919. *ipp = NULL;
  920. return -ENOSPC;
  921. }
  922. /*
  923. * If the AGI buffer is non-NULL, then we were unable to get an
  924. * inode in one operation. We need to commit the current
  925. * transaction and call xfs_ialloc() again. It is guaranteed
  926. * to succeed the second time.
  927. */
  928. if (ialloc_context) {
  929. /*
  930. * Normally, xfs_trans_commit releases all the locks.
  931. * We call bhold to hang on to the ialloc_context across
  932. * the commit. Holding this buffer prevents any other
  933. * processes from doing any allocations in this
  934. * allocation group.
  935. */
  936. xfs_trans_bhold(tp, ialloc_context);
  937. /*
  938. * We want the quota changes to be associated with the next
  939. * transaction, NOT this one. So, detach the dqinfo from this
  940. * and attach it to the next transaction.
  941. */
  942. dqinfo = NULL;
  943. tflags = 0;
  944. if (tp->t_dqinfo) {
  945. dqinfo = (void *)tp->t_dqinfo;
  946. tp->t_dqinfo = NULL;
  947. tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
  948. tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
  949. }
  950. code = xfs_trans_roll(&tp, NULL);
  951. if (committed != NULL)
  952. *committed = 1;
  953. /*
  954. * Re-attach the quota info that we detached from prev trx.
  955. */
  956. if (dqinfo) {
  957. tp->t_dqinfo = dqinfo;
  958. tp->t_flags |= tflags;
  959. }
  960. if (code) {
  961. xfs_buf_relse(ialloc_context);
  962. *tpp = tp;
  963. *ipp = NULL;
  964. return code;
  965. }
  966. xfs_trans_bjoin(tp, ialloc_context);
  967. /*
  968. * Call ialloc again. Since we've locked out all
  969. * other allocations in this allocation group,
  970. * this call should always succeed.
  971. */
  972. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
  973. okalloc, &ialloc_context, &ip);
  974. /*
  975. * If we get an error at this point, return to the caller
  976. * so that the current transaction can be aborted.
  977. */
  978. if (code) {
  979. *tpp = tp;
  980. *ipp = NULL;
  981. return code;
  982. }
  983. ASSERT(!ialloc_context && ip);
  984. } else {
  985. if (committed != NULL)
  986. *committed = 0;
  987. }
  988. *ipp = ip;
  989. *tpp = tp;
  990. return 0;
  991. }
  992. /*
  993. * Decrement the link count on an inode & log the change. If this causes the
  994. * link count to go to zero, move the inode to AGI unlinked list so that it can
  995. * be freed when the last active reference goes away via xfs_inactive().
  996. */
  997. static int /* error */
  998. xfs_droplink(
  999. xfs_trans_t *tp,
  1000. xfs_inode_t *ip)
  1001. {
  1002. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  1003. drop_nlink(VFS_I(ip));
  1004. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1005. if (VFS_I(ip)->i_nlink)
  1006. return 0;
  1007. return xfs_iunlink(tp, ip);
  1008. }
  1009. /*
  1010. * Increment the link count on an inode & log the change.
  1011. */
  1012. static int
  1013. xfs_bumplink(
  1014. xfs_trans_t *tp,
  1015. xfs_inode_t *ip)
  1016. {
  1017. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  1018. ASSERT(ip->i_d.di_version > 1);
  1019. inc_nlink(VFS_I(ip));
  1020. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1021. return 0;
  1022. }
  1023. int
  1024. xfs_create(
  1025. xfs_inode_t *dp,
  1026. struct xfs_name *name,
  1027. umode_t mode,
  1028. xfs_dev_t rdev,
  1029. xfs_inode_t **ipp)
  1030. {
  1031. int is_dir = S_ISDIR(mode);
  1032. struct xfs_mount *mp = dp->i_mount;
  1033. struct xfs_inode *ip = NULL;
  1034. struct xfs_trans *tp = NULL;
  1035. int error;
  1036. struct xfs_defer_ops dfops;
  1037. xfs_fsblock_t first_block;
  1038. bool unlock_dp_on_error = false;
  1039. prid_t prid;
  1040. struct xfs_dquot *udqp = NULL;
  1041. struct xfs_dquot *gdqp = NULL;
  1042. struct xfs_dquot *pdqp = NULL;
  1043. struct xfs_trans_res *tres;
  1044. uint resblks;
  1045. trace_xfs_create(dp, name);
  1046. if (XFS_FORCED_SHUTDOWN(mp))
  1047. return -EIO;
  1048. prid = xfs_get_initial_prid(dp);
  1049. /*
  1050. * Make sure that we have allocated dquot(s) on disk.
  1051. */
  1052. error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
  1053. xfs_kgid_to_gid(current_fsgid()), prid,
  1054. XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
  1055. &udqp, &gdqp, &pdqp);
  1056. if (error)
  1057. return error;
  1058. if (is_dir) {
  1059. rdev = 0;
  1060. resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
  1061. tres = &M_RES(mp)->tr_mkdir;
  1062. } else {
  1063. resblks = XFS_CREATE_SPACE_RES(mp, name->len);
  1064. tres = &M_RES(mp)->tr_create;
  1065. }
  1066. /*
  1067. * Initially assume that the file does not exist and
  1068. * reserve the resources for that case. If that is not
  1069. * the case we'll drop the one we have and get a more
  1070. * appropriate transaction later.
  1071. */
  1072. error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
  1073. if (error == -ENOSPC) {
  1074. /* flush outstanding delalloc blocks and retry */
  1075. xfs_flush_inodes(mp);
  1076. error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
  1077. }
  1078. if (error == -ENOSPC) {
  1079. /* No space at all so try a "no-allocation" reservation */
  1080. resblks = 0;
  1081. error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
  1082. }
  1083. if (error)
  1084. goto out_release_inode;
  1085. xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
  1086. unlock_dp_on_error = true;
  1087. xfs_defer_init(&dfops, &first_block);
  1088. /*
  1089. * Reserve disk quota and the inode.
  1090. */
  1091. error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
  1092. pdqp, resblks, 1, 0);
  1093. if (error)
  1094. goto out_trans_cancel;
  1095. if (!resblks) {
  1096. error = xfs_dir_canenter(tp, dp, name);
  1097. if (error)
  1098. goto out_trans_cancel;
  1099. }
  1100. /*
  1101. * A newly created regular or special file just has one directory
  1102. * entry pointing to them, but a directory also the "." entry
  1103. * pointing to itself.
  1104. */
  1105. error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
  1106. prid, resblks > 0, &ip, NULL);
  1107. if (error)
  1108. goto out_trans_cancel;
  1109. /*
  1110. * Now we join the directory inode to the transaction. We do not do it
  1111. * earlier because xfs_dir_ialloc might commit the previous transaction
  1112. * (and release all the locks). An error from here on will result in
  1113. * the transaction cancel unlocking dp so don't do it explicitly in the
  1114. * error path.
  1115. */
  1116. xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
  1117. unlock_dp_on_error = false;
  1118. error = xfs_dir_createname(tp, dp, name, ip->i_ino,
  1119. &first_block, &dfops, resblks ?
  1120. resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
  1121. if (error) {
  1122. ASSERT(error != -ENOSPC);
  1123. goto out_trans_cancel;
  1124. }
  1125. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1126. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  1127. if (is_dir) {
  1128. error = xfs_dir_init(tp, ip, dp);
  1129. if (error)
  1130. goto out_bmap_cancel;
  1131. error = xfs_bumplink(tp, dp);
  1132. if (error)
  1133. goto out_bmap_cancel;
  1134. }
  1135. /*
  1136. * If this is a synchronous mount, make sure that the
  1137. * create transaction goes to disk before returning to
  1138. * the user.
  1139. */
  1140. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  1141. xfs_trans_set_sync(tp);
  1142. /*
  1143. * Attach the dquot(s) to the inodes and modify them incore.
  1144. * These ids of the inode couldn't have changed since the new
  1145. * inode has been locked ever since it was created.
  1146. */
  1147. xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
  1148. error = xfs_defer_finish(&tp, &dfops, NULL);
  1149. if (error)
  1150. goto out_bmap_cancel;
  1151. error = xfs_trans_commit(tp);
  1152. if (error)
  1153. goto out_release_inode;
  1154. xfs_qm_dqrele(udqp);
  1155. xfs_qm_dqrele(gdqp);
  1156. xfs_qm_dqrele(pdqp);
  1157. *ipp = ip;
  1158. return 0;
  1159. out_bmap_cancel:
  1160. xfs_defer_cancel(&dfops);
  1161. out_trans_cancel:
  1162. xfs_trans_cancel(tp);
  1163. out_release_inode:
  1164. /*
  1165. * Wait until after the current transaction is aborted to finish the
  1166. * setup of the inode and release the inode. This prevents recursive
  1167. * transactions and deadlocks from xfs_inactive.
  1168. */
  1169. if (ip) {
  1170. xfs_finish_inode_setup(ip);
  1171. IRELE(ip);
  1172. }
  1173. xfs_qm_dqrele(udqp);
  1174. xfs_qm_dqrele(gdqp);
  1175. xfs_qm_dqrele(pdqp);
  1176. if (unlock_dp_on_error)
  1177. xfs_iunlock(dp, XFS_ILOCK_EXCL);
  1178. return error;
  1179. }
  1180. int
  1181. xfs_create_tmpfile(
  1182. struct xfs_inode *dp,
  1183. struct dentry *dentry,
  1184. umode_t mode,
  1185. struct xfs_inode **ipp)
  1186. {
  1187. struct xfs_mount *mp = dp->i_mount;
  1188. struct xfs_inode *ip = NULL;
  1189. struct xfs_trans *tp = NULL;
  1190. int error;
  1191. prid_t prid;
  1192. struct xfs_dquot *udqp = NULL;
  1193. struct xfs_dquot *gdqp = NULL;
  1194. struct xfs_dquot *pdqp = NULL;
  1195. struct xfs_trans_res *tres;
  1196. uint resblks;
  1197. if (XFS_FORCED_SHUTDOWN(mp))
  1198. return -EIO;
  1199. prid = xfs_get_initial_prid(dp);
  1200. /*
  1201. * Make sure that we have allocated dquot(s) on disk.
  1202. */
  1203. error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
  1204. xfs_kgid_to_gid(current_fsgid()), prid,
  1205. XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
  1206. &udqp, &gdqp, &pdqp);
  1207. if (error)
  1208. return error;
  1209. resblks = XFS_IALLOC_SPACE_RES(mp);
  1210. tres = &M_RES(mp)->tr_create_tmpfile;
  1211. error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
  1212. if (error == -ENOSPC) {
  1213. /* No space at all so try a "no-allocation" reservation */
  1214. resblks = 0;
  1215. error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
  1216. }
  1217. if (error)
  1218. goto out_release_inode;
  1219. error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
  1220. pdqp, resblks, 1, 0);
  1221. if (error)
  1222. goto out_trans_cancel;
  1223. error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
  1224. prid, resblks > 0, &ip, NULL);
  1225. if (error)
  1226. goto out_trans_cancel;
  1227. if (mp->m_flags & XFS_MOUNT_WSYNC)
  1228. xfs_trans_set_sync(tp);
  1229. /*
  1230. * Attach the dquot(s) to the inodes and modify them incore.
  1231. * These ids of the inode couldn't have changed since the new
  1232. * inode has been locked ever since it was created.
  1233. */
  1234. xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
  1235. error = xfs_iunlink(tp, ip);
  1236. if (error)
  1237. goto out_trans_cancel;
  1238. error = xfs_trans_commit(tp);
  1239. if (error)
  1240. goto out_release_inode;
  1241. xfs_qm_dqrele(udqp);
  1242. xfs_qm_dqrele(gdqp);
  1243. xfs_qm_dqrele(pdqp);
  1244. *ipp = ip;
  1245. return 0;
  1246. out_trans_cancel:
  1247. xfs_trans_cancel(tp);
  1248. out_release_inode:
  1249. /*
  1250. * Wait until after the current transaction is aborted to finish the
  1251. * setup of the inode and release the inode. This prevents recursive
  1252. * transactions and deadlocks from xfs_inactive.
  1253. */
  1254. if (ip) {
  1255. xfs_finish_inode_setup(ip);
  1256. IRELE(ip);
  1257. }
  1258. xfs_qm_dqrele(udqp);
  1259. xfs_qm_dqrele(gdqp);
  1260. xfs_qm_dqrele(pdqp);
  1261. return error;
  1262. }
  1263. int
  1264. xfs_link(
  1265. xfs_inode_t *tdp,
  1266. xfs_inode_t *sip,
  1267. struct xfs_name *target_name)
  1268. {
  1269. xfs_mount_t *mp = tdp->i_mount;
  1270. xfs_trans_t *tp;
  1271. int error;
  1272. struct xfs_defer_ops dfops;
  1273. xfs_fsblock_t first_block;
  1274. int resblks;
  1275. trace_xfs_link(tdp, target_name);
  1276. ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
  1277. if (XFS_FORCED_SHUTDOWN(mp))
  1278. return -EIO;
  1279. error = xfs_qm_dqattach(sip, 0);
  1280. if (error)
  1281. goto std_return;
  1282. error = xfs_qm_dqattach(tdp, 0);
  1283. if (error)
  1284. goto std_return;
  1285. resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
  1286. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
  1287. if (error == -ENOSPC) {
  1288. resblks = 0;
  1289. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
  1290. }
  1291. if (error)
  1292. goto std_return;
  1293. xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
  1294. xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
  1295. xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
  1296. /*
  1297. * If we are using project inheritance, we only allow hard link
  1298. * creation in our tree when the project IDs are the same; else
  1299. * the tree quota mechanism could be circumvented.
  1300. */
  1301. if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  1302. (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
  1303. error = -EXDEV;
  1304. goto error_return;
  1305. }
  1306. if (!resblks) {
  1307. error = xfs_dir_canenter(tp, tdp, target_name);
  1308. if (error)
  1309. goto error_return;
  1310. }
  1311. xfs_defer_init(&dfops, &first_block);
  1312. /*
  1313. * Handle initial link state of O_TMPFILE inode
  1314. */
  1315. if (VFS_I(sip)->i_nlink == 0) {
  1316. error = xfs_iunlink_remove(tp, sip);
  1317. if (error)
  1318. goto error_return;
  1319. }
  1320. error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
  1321. &first_block, &dfops, resblks);
  1322. if (error)
  1323. goto error_return;
  1324. xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1325. xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
  1326. error = xfs_bumplink(tp, sip);
  1327. if (error)
  1328. goto error_return;
  1329. /*
  1330. * If this is a synchronous mount, make sure that the
  1331. * link transaction goes to disk before returning to
  1332. * the user.
  1333. */
  1334. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  1335. xfs_trans_set_sync(tp);
  1336. error = xfs_defer_finish(&tp, &dfops, NULL);
  1337. if (error) {
  1338. xfs_defer_cancel(&dfops);
  1339. goto error_return;
  1340. }
  1341. return xfs_trans_commit(tp);
  1342. error_return:
  1343. xfs_trans_cancel(tp);
  1344. std_return:
  1345. return error;
  1346. }
  1347. /*
  1348. * Free up the underlying blocks past new_size. The new size must be smaller
  1349. * than the current size. This routine can be used both for the attribute and
  1350. * data fork, and does not modify the inode size, which is left to the caller.
  1351. *
  1352. * The transaction passed to this routine must have made a permanent log
  1353. * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
  1354. * given transaction and start new ones, so make sure everything involved in
  1355. * the transaction is tidy before calling here. Some transaction will be
  1356. * returned to the caller to be committed. The incoming transaction must
  1357. * already include the inode, and both inode locks must be held exclusively.
  1358. * The inode must also be "held" within the transaction. On return the inode
  1359. * will be "held" within the returned transaction. This routine does NOT
  1360. * require any disk space to be reserved for it within the transaction.
  1361. *
  1362. * If we get an error, we must return with the inode locked and linked into the
  1363. * current transaction. This keeps things simple for the higher level code,
  1364. * because it always knows that the inode is locked and held in the transaction
  1365. * that returns to it whether errors occur or not. We don't mark the inode
  1366. * dirty on error so that transactions can be easily aborted if possible.
  1367. */
  1368. int
  1369. xfs_itruncate_extents(
  1370. struct xfs_trans **tpp,
  1371. struct xfs_inode *ip,
  1372. int whichfork,
  1373. xfs_fsize_t new_size)
  1374. {
  1375. struct xfs_mount *mp = ip->i_mount;
  1376. struct xfs_trans *tp = *tpp;
  1377. struct xfs_defer_ops dfops;
  1378. xfs_fsblock_t first_block;
  1379. xfs_fileoff_t first_unmap_block;
  1380. xfs_fileoff_t last_block;
  1381. xfs_filblks_t unmap_len;
  1382. int error = 0;
  1383. int done = 0;
  1384. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1385. ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
  1386. xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1387. ASSERT(new_size <= XFS_ISIZE(ip));
  1388. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  1389. ASSERT(ip->i_itemp != NULL);
  1390. ASSERT(ip->i_itemp->ili_lock_flags == 0);
  1391. ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
  1392. trace_xfs_itruncate_extents_start(ip, new_size);
  1393. /*
  1394. * Since it is possible for space to become allocated beyond
  1395. * the end of the file (in a crash where the space is allocated
  1396. * but the inode size is not yet updated), simply remove any
  1397. * blocks which show up between the new EOF and the maximum
  1398. * possible file size. If the first block to be removed is
  1399. * beyond the maximum file size (ie it is the same as last_block),
  1400. * then there is nothing to do.
  1401. */
  1402. first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
  1403. last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  1404. if (first_unmap_block == last_block)
  1405. return 0;
  1406. ASSERT(first_unmap_block < last_block);
  1407. unmap_len = last_block - first_unmap_block + 1;
  1408. while (!done) {
  1409. xfs_defer_init(&dfops, &first_block);
  1410. error = xfs_bunmapi(tp, ip,
  1411. first_unmap_block, unmap_len,
  1412. xfs_bmapi_aflag(whichfork),
  1413. XFS_ITRUNC_MAX_EXTENTS,
  1414. &first_block, &dfops,
  1415. &done);
  1416. if (error)
  1417. goto out_bmap_cancel;
  1418. /*
  1419. * Duplicate the transaction that has the permanent
  1420. * reservation and commit the old transaction.
  1421. */
  1422. error = xfs_defer_finish(&tp, &dfops, ip);
  1423. if (error)
  1424. goto out_bmap_cancel;
  1425. error = xfs_trans_roll(&tp, ip);
  1426. if (error)
  1427. goto out;
  1428. }
  1429. /* Remove all pending CoW reservations. */
  1430. error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
  1431. last_block);
  1432. if (error)
  1433. goto out;
  1434. /*
  1435. * Clear the reflink flag if we truncated everything.
  1436. */
  1437. if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
  1438. ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
  1439. xfs_inode_clear_cowblocks_tag(ip);
  1440. }
  1441. /*
  1442. * Always re-log the inode so that our permanent transaction can keep
  1443. * on rolling it forward in the log.
  1444. */
  1445. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1446. trace_xfs_itruncate_extents_end(ip, new_size);
  1447. out:
  1448. *tpp = tp;
  1449. return error;
  1450. out_bmap_cancel:
  1451. /*
  1452. * If the bunmapi call encounters an error, return to the caller where
  1453. * the transaction can be properly aborted. We just need to make sure
  1454. * we're not holding any resources that we were not when we came in.
  1455. */
  1456. xfs_defer_cancel(&dfops);
  1457. goto out;
  1458. }
  1459. int
  1460. xfs_release(
  1461. xfs_inode_t *ip)
  1462. {
  1463. xfs_mount_t *mp = ip->i_mount;
  1464. int error;
  1465. if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
  1466. return 0;
  1467. /* If this is a read-only mount, don't do this (would generate I/O) */
  1468. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1469. return 0;
  1470. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1471. int truncated;
  1472. /*
  1473. * If we previously truncated this file and removed old data
  1474. * in the process, we want to initiate "early" writeout on
  1475. * the last close. This is an attempt to combat the notorious
  1476. * NULL files problem which is particularly noticeable from a
  1477. * truncate down, buffered (re-)write (delalloc), followed by
  1478. * a crash. What we are effectively doing here is
  1479. * significantly reducing the time window where we'd otherwise
  1480. * be exposed to that problem.
  1481. */
  1482. truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
  1483. if (truncated) {
  1484. xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
  1485. if (ip->i_delayed_blks > 0) {
  1486. error = filemap_flush(VFS_I(ip)->i_mapping);
  1487. if (error)
  1488. return error;
  1489. }
  1490. }
  1491. }
  1492. if (VFS_I(ip)->i_nlink == 0)
  1493. return 0;
  1494. if (xfs_can_free_eofblocks(ip, false)) {
  1495. /*
  1496. * If we can't get the iolock just skip truncating the blocks
  1497. * past EOF because we could deadlock with the mmap_sem
  1498. * otherwise. We'll get another chance to drop them once the
  1499. * last reference to the inode is dropped, so we'll never leak
  1500. * blocks permanently.
  1501. *
  1502. * Further, check if the inode is being opened, written and
  1503. * closed frequently and we have delayed allocation blocks
  1504. * outstanding (e.g. streaming writes from the NFS server),
  1505. * truncating the blocks past EOF will cause fragmentation to
  1506. * occur.
  1507. *
  1508. * In this case don't do the truncation, either, but we have to
  1509. * be careful how we detect this case. Blocks beyond EOF show
  1510. * up as i_delayed_blks even when the inode is clean, so we
  1511. * need to truncate them away first before checking for a dirty
  1512. * release. Hence on the first dirty close we will still remove
  1513. * the speculative allocation, but after that we will leave it
  1514. * in place.
  1515. */
  1516. if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
  1517. return 0;
  1518. error = xfs_free_eofblocks(mp, ip, true);
  1519. if (error && error != -EAGAIN)
  1520. return error;
  1521. /* delalloc blocks after truncation means it really is dirty */
  1522. if (ip->i_delayed_blks)
  1523. xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
  1524. }
  1525. return 0;
  1526. }
  1527. /*
  1528. * xfs_inactive_truncate
  1529. *
  1530. * Called to perform a truncate when an inode becomes unlinked.
  1531. */
  1532. STATIC int
  1533. xfs_inactive_truncate(
  1534. struct xfs_inode *ip)
  1535. {
  1536. struct xfs_mount *mp = ip->i_mount;
  1537. struct xfs_trans *tp;
  1538. int error;
  1539. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
  1540. if (error) {
  1541. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1542. return error;
  1543. }
  1544. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1545. xfs_trans_ijoin(tp, ip, 0);
  1546. /*
  1547. * Log the inode size first to prevent stale data exposure in the event
  1548. * of a system crash before the truncate completes. See the related
  1549. * comment in xfs_vn_setattr_size() for details.
  1550. */
  1551. ip->i_d.di_size = 0;
  1552. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1553. error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
  1554. if (error)
  1555. goto error_trans_cancel;
  1556. ASSERT(ip->i_d.di_nextents == 0);
  1557. error = xfs_trans_commit(tp);
  1558. if (error)
  1559. goto error_unlock;
  1560. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1561. return 0;
  1562. error_trans_cancel:
  1563. xfs_trans_cancel(tp);
  1564. error_unlock:
  1565. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1566. return error;
  1567. }
  1568. /*
  1569. * xfs_inactive_ifree()
  1570. *
  1571. * Perform the inode free when an inode is unlinked.
  1572. */
  1573. STATIC int
  1574. xfs_inactive_ifree(
  1575. struct xfs_inode *ip)
  1576. {
  1577. struct xfs_defer_ops dfops;
  1578. xfs_fsblock_t first_block;
  1579. struct xfs_mount *mp = ip->i_mount;
  1580. struct xfs_trans *tp;
  1581. int error;
  1582. /*
  1583. * The ifree transaction might need to allocate blocks for record
  1584. * insertion to the finobt. We don't want to fail here at ENOSPC, so
  1585. * allow ifree to dip into the reserved block pool if necessary.
  1586. *
  1587. * Freeing large sets of inodes generally means freeing inode chunks,
  1588. * directory and file data blocks, so this should be relatively safe.
  1589. * Only under severe circumstances should it be possible to free enough
  1590. * inodes to exhaust the reserve block pool via finobt expansion while
  1591. * at the same time not creating free space in the filesystem.
  1592. *
  1593. * Send a warning if the reservation does happen to fail, as the inode
  1594. * now remains allocated and sits on the unlinked list until the fs is
  1595. * repaired.
  1596. */
  1597. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
  1598. XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
  1599. if (error) {
  1600. if (error == -ENOSPC) {
  1601. xfs_warn_ratelimited(mp,
  1602. "Failed to remove inode(s) from unlinked list. "
  1603. "Please free space, unmount and run xfs_repair.");
  1604. } else {
  1605. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1606. }
  1607. return error;
  1608. }
  1609. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1610. xfs_trans_ijoin(tp, ip, 0);
  1611. xfs_defer_init(&dfops, &first_block);
  1612. error = xfs_ifree(tp, ip, &dfops);
  1613. if (error) {
  1614. /*
  1615. * If we fail to free the inode, shut down. The cancel
  1616. * might do that, we need to make sure. Otherwise the
  1617. * inode might be lost for a long time or forever.
  1618. */
  1619. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1620. xfs_notice(mp, "%s: xfs_ifree returned error %d",
  1621. __func__, error);
  1622. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  1623. }
  1624. xfs_trans_cancel(tp);
  1625. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1626. return error;
  1627. }
  1628. /*
  1629. * Credit the quota account(s). The inode is gone.
  1630. */
  1631. xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
  1632. /*
  1633. * Just ignore errors at this point. There is nothing we can do except
  1634. * to try to keep going. Make sure it's not a silent error.
  1635. */
  1636. error = xfs_defer_finish(&tp, &dfops, NULL);
  1637. if (error) {
  1638. xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
  1639. __func__, error);
  1640. xfs_defer_cancel(&dfops);
  1641. }
  1642. error = xfs_trans_commit(tp);
  1643. if (error)
  1644. xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
  1645. __func__, error);
  1646. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1647. return 0;
  1648. }
  1649. /*
  1650. * xfs_inactive
  1651. *
  1652. * This is called when the vnode reference count for the vnode
  1653. * goes to zero. If the file has been unlinked, then it must
  1654. * now be truncated. Also, we clear all of the read-ahead state
  1655. * kept for the inode here since the file is now closed.
  1656. */
  1657. void
  1658. xfs_inactive(
  1659. xfs_inode_t *ip)
  1660. {
  1661. struct xfs_mount *mp;
  1662. int error;
  1663. int truncate = 0;
  1664. /*
  1665. * If the inode is already free, then there can be nothing
  1666. * to clean up here.
  1667. */
  1668. if (VFS_I(ip)->i_mode == 0) {
  1669. ASSERT(ip->i_df.if_real_bytes == 0);
  1670. ASSERT(ip->i_df.if_broot_bytes == 0);
  1671. return;
  1672. }
  1673. mp = ip->i_mount;
  1674. ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
  1675. /* If this is a read-only mount, don't do this (would generate I/O) */
  1676. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1677. return;
  1678. if (VFS_I(ip)->i_nlink != 0) {
  1679. /*
  1680. * force is true because we are evicting an inode from the
  1681. * cache. Post-eof blocks must be freed, lest we end up with
  1682. * broken free space accounting.
  1683. */
  1684. if (xfs_can_free_eofblocks(ip, true))
  1685. xfs_free_eofblocks(mp, ip, false);
  1686. return;
  1687. }
  1688. if (S_ISREG(VFS_I(ip)->i_mode) &&
  1689. (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
  1690. ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
  1691. truncate = 1;
  1692. error = xfs_qm_dqattach(ip, 0);
  1693. if (error)
  1694. return;
  1695. if (S_ISLNK(VFS_I(ip)->i_mode))
  1696. error = xfs_inactive_symlink(ip);
  1697. else if (truncate)
  1698. error = xfs_inactive_truncate(ip);
  1699. if (error)
  1700. return;
  1701. /*
  1702. * If there are attributes associated with the file then blow them away
  1703. * now. The code calls a routine that recursively deconstructs the
  1704. * attribute fork. If also blows away the in-core attribute fork.
  1705. */
  1706. if (XFS_IFORK_Q(ip)) {
  1707. error = xfs_attr_inactive(ip);
  1708. if (error)
  1709. return;
  1710. }
  1711. ASSERT(!ip->i_afp);
  1712. ASSERT(ip->i_d.di_anextents == 0);
  1713. ASSERT(ip->i_d.di_forkoff == 0);
  1714. /*
  1715. * Free the inode.
  1716. */
  1717. error = xfs_inactive_ifree(ip);
  1718. if (error)
  1719. return;
  1720. /*
  1721. * Release the dquots held by inode, if any.
  1722. */
  1723. xfs_qm_dqdetach(ip);
  1724. }
  1725. /*
  1726. * This is called when the inode's link count goes to 0 or we are creating a
  1727. * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
  1728. * set to true as the link count is dropped to zero by the VFS after we've
  1729. * created the file successfully, so we have to add it to the unlinked list
  1730. * while the link count is non-zero.
  1731. *
  1732. * We place the on-disk inode on a list in the AGI. It will be pulled from this
  1733. * list when the inode is freed.
  1734. */
  1735. STATIC int
  1736. xfs_iunlink(
  1737. struct xfs_trans *tp,
  1738. struct xfs_inode *ip)
  1739. {
  1740. xfs_mount_t *mp = tp->t_mountp;
  1741. xfs_agi_t *agi;
  1742. xfs_dinode_t *dip;
  1743. xfs_buf_t *agibp;
  1744. xfs_buf_t *ibp;
  1745. xfs_agino_t agino;
  1746. short bucket_index;
  1747. int offset;
  1748. int error;
  1749. ASSERT(VFS_I(ip)->i_mode != 0);
  1750. /*
  1751. * Get the agi buffer first. It ensures lock ordering
  1752. * on the list.
  1753. */
  1754. error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
  1755. if (error)
  1756. return error;
  1757. agi = XFS_BUF_TO_AGI(agibp);
  1758. /*
  1759. * Get the index into the agi hash table for the
  1760. * list this inode will go on.
  1761. */
  1762. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1763. ASSERT(agino != 0);
  1764. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1765. ASSERT(agi->agi_unlinked[bucket_index]);
  1766. ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
  1767. if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
  1768. /*
  1769. * There is already another inode in the bucket we need
  1770. * to add ourselves to. Add us at the front of the list.
  1771. * Here we put the head pointer into our next pointer,
  1772. * and then we fall through to point the head at us.
  1773. */
  1774. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1775. 0, 0);
  1776. if (error)
  1777. return error;
  1778. ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
  1779. dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
  1780. offset = ip->i_imap.im_boffset +
  1781. offsetof(xfs_dinode_t, di_next_unlinked);
  1782. /* need to recalc the inode CRC if appropriate */
  1783. xfs_dinode_calc_crc(mp, dip);
  1784. xfs_trans_inode_buf(tp, ibp);
  1785. xfs_trans_log_buf(tp, ibp, offset,
  1786. (offset + sizeof(xfs_agino_t) - 1));
  1787. xfs_inobp_check(mp, ibp);
  1788. }
  1789. /*
  1790. * Point the bucket head pointer at the inode being inserted.
  1791. */
  1792. ASSERT(agino != 0);
  1793. agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
  1794. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1795. (sizeof(xfs_agino_t) * bucket_index);
  1796. xfs_trans_log_buf(tp, agibp, offset,
  1797. (offset + sizeof(xfs_agino_t) - 1));
  1798. return 0;
  1799. }
  1800. /*
  1801. * Pull the on-disk inode from the AGI unlinked list.
  1802. */
  1803. STATIC int
  1804. xfs_iunlink_remove(
  1805. xfs_trans_t *tp,
  1806. xfs_inode_t *ip)
  1807. {
  1808. xfs_ino_t next_ino;
  1809. xfs_mount_t *mp;
  1810. xfs_agi_t *agi;
  1811. xfs_dinode_t *dip;
  1812. xfs_buf_t *agibp;
  1813. xfs_buf_t *ibp;
  1814. xfs_agnumber_t agno;
  1815. xfs_agino_t agino;
  1816. xfs_agino_t next_agino;
  1817. xfs_buf_t *last_ibp;
  1818. xfs_dinode_t *last_dip = NULL;
  1819. short bucket_index;
  1820. int offset, last_offset = 0;
  1821. int error;
  1822. mp = tp->t_mountp;
  1823. agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
  1824. /*
  1825. * Get the agi buffer first. It ensures lock ordering
  1826. * on the list.
  1827. */
  1828. error = xfs_read_agi(mp, tp, agno, &agibp);
  1829. if (error)
  1830. return error;
  1831. agi = XFS_BUF_TO_AGI(agibp);
  1832. /*
  1833. * Get the index into the agi hash table for the
  1834. * list this inode will go on.
  1835. */
  1836. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1837. ASSERT(agino != 0);
  1838. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1839. ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
  1840. ASSERT(agi->agi_unlinked[bucket_index]);
  1841. if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
  1842. /*
  1843. * We're at the head of the list. Get the inode's on-disk
  1844. * buffer to see if there is anyone after us on the list.
  1845. * Only modify our next pointer if it is not already NULLAGINO.
  1846. * This saves us the overhead of dealing with the buffer when
  1847. * there is no need to change it.
  1848. */
  1849. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1850. 0, 0);
  1851. if (error) {
  1852. xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
  1853. __func__, error);
  1854. return error;
  1855. }
  1856. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1857. ASSERT(next_agino != 0);
  1858. if (next_agino != NULLAGINO) {
  1859. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1860. offset = ip->i_imap.im_boffset +
  1861. offsetof(xfs_dinode_t, di_next_unlinked);
  1862. /* need to recalc the inode CRC if appropriate */
  1863. xfs_dinode_calc_crc(mp, dip);
  1864. xfs_trans_inode_buf(tp, ibp);
  1865. xfs_trans_log_buf(tp, ibp, offset,
  1866. (offset + sizeof(xfs_agino_t) - 1));
  1867. xfs_inobp_check(mp, ibp);
  1868. } else {
  1869. xfs_trans_brelse(tp, ibp);
  1870. }
  1871. /*
  1872. * Point the bucket head pointer at the next inode.
  1873. */
  1874. ASSERT(next_agino != 0);
  1875. ASSERT(next_agino != agino);
  1876. agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
  1877. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1878. (sizeof(xfs_agino_t) * bucket_index);
  1879. xfs_trans_log_buf(tp, agibp, offset,
  1880. (offset + sizeof(xfs_agino_t) - 1));
  1881. } else {
  1882. /*
  1883. * We need to search the list for the inode being freed.
  1884. */
  1885. next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
  1886. last_ibp = NULL;
  1887. while (next_agino != agino) {
  1888. struct xfs_imap imap;
  1889. if (last_ibp)
  1890. xfs_trans_brelse(tp, last_ibp);
  1891. imap.im_blkno = 0;
  1892. next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
  1893. error = xfs_imap(mp, tp, next_ino, &imap, 0);
  1894. if (error) {
  1895. xfs_warn(mp,
  1896. "%s: xfs_imap returned error %d.",
  1897. __func__, error);
  1898. return error;
  1899. }
  1900. error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
  1901. &last_ibp, 0, 0);
  1902. if (error) {
  1903. xfs_warn(mp,
  1904. "%s: xfs_imap_to_bp returned error %d.",
  1905. __func__, error);
  1906. return error;
  1907. }
  1908. last_offset = imap.im_boffset;
  1909. next_agino = be32_to_cpu(last_dip->di_next_unlinked);
  1910. ASSERT(next_agino != NULLAGINO);
  1911. ASSERT(next_agino != 0);
  1912. }
  1913. /*
  1914. * Now last_ibp points to the buffer previous to us on the
  1915. * unlinked list. Pull us from the list.
  1916. */
  1917. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1918. 0, 0);
  1919. if (error) {
  1920. xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
  1921. __func__, error);
  1922. return error;
  1923. }
  1924. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1925. ASSERT(next_agino != 0);
  1926. ASSERT(next_agino != agino);
  1927. if (next_agino != NULLAGINO) {
  1928. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1929. offset = ip->i_imap.im_boffset +
  1930. offsetof(xfs_dinode_t, di_next_unlinked);
  1931. /* need to recalc the inode CRC if appropriate */
  1932. xfs_dinode_calc_crc(mp, dip);
  1933. xfs_trans_inode_buf(tp, ibp);
  1934. xfs_trans_log_buf(tp, ibp, offset,
  1935. (offset + sizeof(xfs_agino_t) - 1));
  1936. xfs_inobp_check(mp, ibp);
  1937. } else {
  1938. xfs_trans_brelse(tp, ibp);
  1939. }
  1940. /*
  1941. * Point the previous inode on the list to the next inode.
  1942. */
  1943. last_dip->di_next_unlinked = cpu_to_be32(next_agino);
  1944. ASSERT(next_agino != 0);
  1945. offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
  1946. /* need to recalc the inode CRC if appropriate */
  1947. xfs_dinode_calc_crc(mp, last_dip);
  1948. xfs_trans_inode_buf(tp, last_ibp);
  1949. xfs_trans_log_buf(tp, last_ibp, offset,
  1950. (offset + sizeof(xfs_agino_t) - 1));
  1951. xfs_inobp_check(mp, last_ibp);
  1952. }
  1953. return 0;
  1954. }
  1955. /*
  1956. * A big issue when freeing the inode cluster is that we _cannot_ skip any
  1957. * inodes that are in memory - they all must be marked stale and attached to
  1958. * the cluster buffer.
  1959. */
  1960. STATIC int
  1961. xfs_ifree_cluster(
  1962. xfs_inode_t *free_ip,
  1963. xfs_trans_t *tp,
  1964. struct xfs_icluster *xic)
  1965. {
  1966. xfs_mount_t *mp = free_ip->i_mount;
  1967. int blks_per_cluster;
  1968. int inodes_per_cluster;
  1969. int nbufs;
  1970. int i, j;
  1971. int ioffset;
  1972. xfs_daddr_t blkno;
  1973. xfs_buf_t *bp;
  1974. xfs_inode_t *ip;
  1975. xfs_inode_log_item_t *iip;
  1976. xfs_log_item_t *lip;
  1977. struct xfs_perag *pag;
  1978. xfs_ino_t inum;
  1979. inum = xic->first_ino;
  1980. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
  1981. blks_per_cluster = xfs_icluster_size_fsb(mp);
  1982. inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
  1983. nbufs = mp->m_ialloc_blks / blks_per_cluster;
  1984. for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
  1985. /*
  1986. * The allocation bitmap tells us which inodes of the chunk were
  1987. * physically allocated. Skip the cluster if an inode falls into
  1988. * a sparse region.
  1989. */
  1990. ioffset = inum - xic->first_ino;
  1991. if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
  1992. ASSERT(do_mod(ioffset, inodes_per_cluster) == 0);
  1993. continue;
  1994. }
  1995. blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
  1996. XFS_INO_TO_AGBNO(mp, inum));
  1997. /*
  1998. * We obtain and lock the backing buffer first in the process
  1999. * here, as we have to ensure that any dirty inode that we
  2000. * can't get the flush lock on is attached to the buffer.
  2001. * If we scan the in-memory inodes first, then buffer IO can
  2002. * complete before we get a lock on it, and hence we may fail
  2003. * to mark all the active inodes on the buffer stale.
  2004. */
  2005. bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
  2006. mp->m_bsize * blks_per_cluster,
  2007. XBF_UNMAPPED);
  2008. if (!bp)
  2009. return -ENOMEM;
  2010. /*
  2011. * This buffer may not have been correctly initialised as we
  2012. * didn't read it from disk. That's not important because we are
  2013. * only using to mark the buffer as stale in the log, and to
  2014. * attach stale cached inodes on it. That means it will never be
  2015. * dispatched for IO. If it is, we want to know about it, and we
  2016. * want it to fail. We can acheive this by adding a write
  2017. * verifier to the buffer.
  2018. */
  2019. bp->b_ops = &xfs_inode_buf_ops;
  2020. /*
  2021. * Walk the inodes already attached to the buffer and mark them
  2022. * stale. These will all have the flush locks held, so an
  2023. * in-memory inode walk can't lock them. By marking them all
  2024. * stale first, we will not attempt to lock them in the loop
  2025. * below as the XFS_ISTALE flag will be set.
  2026. */
  2027. lip = bp->b_fspriv;
  2028. while (lip) {
  2029. if (lip->li_type == XFS_LI_INODE) {
  2030. iip = (xfs_inode_log_item_t *)lip;
  2031. ASSERT(iip->ili_logged == 1);
  2032. lip->li_cb = xfs_istale_done;
  2033. xfs_trans_ail_copy_lsn(mp->m_ail,
  2034. &iip->ili_flush_lsn,
  2035. &iip->ili_item.li_lsn);
  2036. xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
  2037. }
  2038. lip = lip->li_bio_list;
  2039. }
  2040. /*
  2041. * For each inode in memory attempt to add it to the inode
  2042. * buffer and set it up for being staled on buffer IO
  2043. * completion. This is safe as we've locked out tail pushing
  2044. * and flushing by locking the buffer.
  2045. *
  2046. * We have already marked every inode that was part of a
  2047. * transaction stale above, which means there is no point in
  2048. * even trying to lock them.
  2049. */
  2050. for (i = 0; i < inodes_per_cluster; i++) {
  2051. retry:
  2052. rcu_read_lock();
  2053. ip = radix_tree_lookup(&pag->pag_ici_root,
  2054. XFS_INO_TO_AGINO(mp, (inum + i)));
  2055. /* Inode not in memory, nothing to do */
  2056. if (!ip) {
  2057. rcu_read_unlock();
  2058. continue;
  2059. }
  2060. /*
  2061. * because this is an RCU protected lookup, we could
  2062. * find a recently freed or even reallocated inode
  2063. * during the lookup. We need to check under the
  2064. * i_flags_lock for a valid inode here. Skip it if it
  2065. * is not valid, the wrong inode or stale.
  2066. */
  2067. spin_lock(&ip->i_flags_lock);
  2068. if (ip->i_ino != inum + i ||
  2069. __xfs_iflags_test(ip, XFS_ISTALE)) {
  2070. spin_unlock(&ip->i_flags_lock);
  2071. rcu_read_unlock();
  2072. continue;
  2073. }
  2074. spin_unlock(&ip->i_flags_lock);
  2075. /*
  2076. * Don't try to lock/unlock the current inode, but we
  2077. * _cannot_ skip the other inodes that we did not find
  2078. * in the list attached to the buffer and are not
  2079. * already marked stale. If we can't lock it, back off
  2080. * and retry.
  2081. */
  2082. if (ip != free_ip &&
  2083. !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
  2084. rcu_read_unlock();
  2085. delay(1);
  2086. goto retry;
  2087. }
  2088. rcu_read_unlock();
  2089. xfs_iflock(ip);
  2090. xfs_iflags_set(ip, XFS_ISTALE);
  2091. /*
  2092. * we don't need to attach clean inodes or those only
  2093. * with unlogged changes (which we throw away, anyway).
  2094. */
  2095. iip = ip->i_itemp;
  2096. if (!iip || xfs_inode_clean(ip)) {
  2097. ASSERT(ip != free_ip);
  2098. xfs_ifunlock(ip);
  2099. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2100. continue;
  2101. }
  2102. iip->ili_last_fields = iip->ili_fields;
  2103. iip->ili_fields = 0;
  2104. iip->ili_fsync_fields = 0;
  2105. iip->ili_logged = 1;
  2106. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  2107. &iip->ili_item.li_lsn);
  2108. xfs_buf_attach_iodone(bp, xfs_istale_done,
  2109. &iip->ili_item);
  2110. if (ip != free_ip)
  2111. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2112. }
  2113. xfs_trans_stale_inode_buf(tp, bp);
  2114. xfs_trans_binval(tp, bp);
  2115. }
  2116. xfs_perag_put(pag);
  2117. return 0;
  2118. }
  2119. /*
  2120. * This is called to return an inode to the inode free list.
  2121. * The inode should already be truncated to 0 length and have
  2122. * no pages associated with it. This routine also assumes that
  2123. * the inode is already a part of the transaction.
  2124. *
  2125. * The on-disk copy of the inode will have been added to the list
  2126. * of unlinked inodes in the AGI. We need to remove the inode from
  2127. * that list atomically with respect to freeing it here.
  2128. */
  2129. int
  2130. xfs_ifree(
  2131. xfs_trans_t *tp,
  2132. xfs_inode_t *ip,
  2133. struct xfs_defer_ops *dfops)
  2134. {
  2135. int error;
  2136. struct xfs_icluster xic = { 0 };
  2137. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  2138. ASSERT(VFS_I(ip)->i_nlink == 0);
  2139. ASSERT(ip->i_d.di_nextents == 0);
  2140. ASSERT(ip->i_d.di_anextents == 0);
  2141. ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
  2142. ASSERT(ip->i_d.di_nblocks == 0);
  2143. /*
  2144. * Pull the on-disk inode from the AGI unlinked list.
  2145. */
  2146. error = xfs_iunlink_remove(tp, ip);
  2147. if (error)
  2148. return error;
  2149. error = xfs_difree(tp, ip->i_ino, dfops, &xic);
  2150. if (error)
  2151. return error;
  2152. VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
  2153. ip->i_d.di_flags = 0;
  2154. ip->i_d.di_dmevmask = 0;
  2155. ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
  2156. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  2157. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  2158. /*
  2159. * Bump the generation count so no one will be confused
  2160. * by reincarnations of this inode.
  2161. */
  2162. VFS_I(ip)->i_generation++;
  2163. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  2164. if (xic.deleted)
  2165. error = xfs_ifree_cluster(ip, tp, &xic);
  2166. return error;
  2167. }
  2168. /*
  2169. * This is called to unpin an inode. The caller must have the inode locked
  2170. * in at least shared mode so that the buffer cannot be subsequently pinned
  2171. * once someone is waiting for it to be unpinned.
  2172. */
  2173. static void
  2174. xfs_iunpin(
  2175. struct xfs_inode *ip)
  2176. {
  2177. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2178. trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
  2179. /* Give the log a push to start the unpinning I/O */
  2180. xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
  2181. }
  2182. static void
  2183. __xfs_iunpin_wait(
  2184. struct xfs_inode *ip)
  2185. {
  2186. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
  2187. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
  2188. xfs_iunpin(ip);
  2189. do {
  2190. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  2191. if (xfs_ipincount(ip))
  2192. io_schedule();
  2193. } while (xfs_ipincount(ip));
  2194. finish_wait(wq, &wait.wait);
  2195. }
  2196. void
  2197. xfs_iunpin_wait(
  2198. struct xfs_inode *ip)
  2199. {
  2200. if (xfs_ipincount(ip))
  2201. __xfs_iunpin_wait(ip);
  2202. }
  2203. /*
  2204. * Removing an inode from the namespace involves removing the directory entry
  2205. * and dropping the link count on the inode. Removing the directory entry can
  2206. * result in locking an AGF (directory blocks were freed) and removing a link
  2207. * count can result in placing the inode on an unlinked list which results in
  2208. * locking an AGI.
  2209. *
  2210. * The big problem here is that we have an ordering constraint on AGF and AGI
  2211. * locking - inode allocation locks the AGI, then can allocate a new extent for
  2212. * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
  2213. * removes the inode from the unlinked list, requiring that we lock the AGI
  2214. * first, and then freeing the inode can result in an inode chunk being freed
  2215. * and hence freeing disk space requiring that we lock an AGF.
  2216. *
  2217. * Hence the ordering that is imposed by other parts of the code is AGI before
  2218. * AGF. This means we cannot remove the directory entry before we drop the inode
  2219. * reference count and put it on the unlinked list as this results in a lock
  2220. * order of AGF then AGI, and this can deadlock against inode allocation and
  2221. * freeing. Therefore we must drop the link counts before we remove the
  2222. * directory entry.
  2223. *
  2224. * This is still safe from a transactional point of view - it is not until we
  2225. * get to xfs_defer_finish() that we have the possibility of multiple
  2226. * transactions in this operation. Hence as long as we remove the directory
  2227. * entry and drop the link count in the first transaction of the remove
  2228. * operation, there are no transactional constraints on the ordering here.
  2229. */
  2230. int
  2231. xfs_remove(
  2232. xfs_inode_t *dp,
  2233. struct xfs_name *name,
  2234. xfs_inode_t *ip)
  2235. {
  2236. xfs_mount_t *mp = dp->i_mount;
  2237. xfs_trans_t *tp = NULL;
  2238. int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
  2239. int error = 0;
  2240. struct xfs_defer_ops dfops;
  2241. xfs_fsblock_t first_block;
  2242. uint resblks;
  2243. trace_xfs_remove(dp, name);
  2244. if (XFS_FORCED_SHUTDOWN(mp))
  2245. return -EIO;
  2246. error = xfs_qm_dqattach(dp, 0);
  2247. if (error)
  2248. goto std_return;
  2249. error = xfs_qm_dqattach(ip, 0);
  2250. if (error)
  2251. goto std_return;
  2252. /*
  2253. * We try to get the real space reservation first,
  2254. * allowing for directory btree deletion(s) implying
  2255. * possible bmap insert(s). If we can't get the space
  2256. * reservation then we use 0 instead, and avoid the bmap
  2257. * btree insert(s) in the directory code by, if the bmap
  2258. * insert tries to happen, instead trimming the LAST
  2259. * block from the directory.
  2260. */
  2261. resblks = XFS_REMOVE_SPACE_RES(mp);
  2262. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
  2263. if (error == -ENOSPC) {
  2264. resblks = 0;
  2265. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
  2266. &tp);
  2267. }
  2268. if (error) {
  2269. ASSERT(error != -ENOSPC);
  2270. goto std_return;
  2271. }
  2272. xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
  2273. xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
  2274. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  2275. /*
  2276. * If we're removing a directory perform some additional validation.
  2277. */
  2278. if (is_dir) {
  2279. ASSERT(VFS_I(ip)->i_nlink >= 2);
  2280. if (VFS_I(ip)->i_nlink != 2) {
  2281. error = -ENOTEMPTY;
  2282. goto out_trans_cancel;
  2283. }
  2284. if (!xfs_dir_isempty(ip)) {
  2285. error = -ENOTEMPTY;
  2286. goto out_trans_cancel;
  2287. }
  2288. /* Drop the link from ip's "..". */
  2289. error = xfs_droplink(tp, dp);
  2290. if (error)
  2291. goto out_trans_cancel;
  2292. /* Drop the "." link from ip to self. */
  2293. error = xfs_droplink(tp, ip);
  2294. if (error)
  2295. goto out_trans_cancel;
  2296. } else {
  2297. /*
  2298. * When removing a non-directory we need to log the parent
  2299. * inode here. For a directory this is done implicitly
  2300. * by the xfs_droplink call for the ".." entry.
  2301. */
  2302. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  2303. }
  2304. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2305. /* Drop the link from dp to ip. */
  2306. error = xfs_droplink(tp, ip);
  2307. if (error)
  2308. goto out_trans_cancel;
  2309. xfs_defer_init(&dfops, &first_block);
  2310. error = xfs_dir_removename(tp, dp, name, ip->i_ino,
  2311. &first_block, &dfops, resblks);
  2312. if (error) {
  2313. ASSERT(error != -ENOENT);
  2314. goto out_bmap_cancel;
  2315. }
  2316. /*
  2317. * If this is a synchronous mount, make sure that the
  2318. * remove transaction goes to disk before returning to
  2319. * the user.
  2320. */
  2321. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  2322. xfs_trans_set_sync(tp);
  2323. error = xfs_defer_finish(&tp, &dfops, NULL);
  2324. if (error)
  2325. goto out_bmap_cancel;
  2326. error = xfs_trans_commit(tp);
  2327. if (error)
  2328. goto std_return;
  2329. if (is_dir && xfs_inode_is_filestream(ip))
  2330. xfs_filestream_deassociate(ip);
  2331. return 0;
  2332. out_bmap_cancel:
  2333. xfs_defer_cancel(&dfops);
  2334. out_trans_cancel:
  2335. xfs_trans_cancel(tp);
  2336. std_return:
  2337. return error;
  2338. }
  2339. /*
  2340. * Enter all inodes for a rename transaction into a sorted array.
  2341. */
  2342. #define __XFS_SORT_INODES 5
  2343. STATIC void
  2344. xfs_sort_for_rename(
  2345. struct xfs_inode *dp1, /* in: old (source) directory inode */
  2346. struct xfs_inode *dp2, /* in: new (target) directory inode */
  2347. struct xfs_inode *ip1, /* in: inode of old entry */
  2348. struct xfs_inode *ip2, /* in: inode of new entry */
  2349. struct xfs_inode *wip, /* in: whiteout inode */
  2350. struct xfs_inode **i_tab,/* out: sorted array of inodes */
  2351. int *num_inodes) /* in/out: inodes in array */
  2352. {
  2353. int i, j;
  2354. ASSERT(*num_inodes == __XFS_SORT_INODES);
  2355. memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
  2356. /*
  2357. * i_tab contains a list of pointers to inodes. We initialize
  2358. * the table here & we'll sort it. We will then use it to
  2359. * order the acquisition of the inode locks.
  2360. *
  2361. * Note that the table may contain duplicates. e.g., dp1 == dp2.
  2362. */
  2363. i = 0;
  2364. i_tab[i++] = dp1;
  2365. i_tab[i++] = dp2;
  2366. i_tab[i++] = ip1;
  2367. if (ip2)
  2368. i_tab[i++] = ip2;
  2369. if (wip)
  2370. i_tab[i++] = wip;
  2371. *num_inodes = i;
  2372. /*
  2373. * Sort the elements via bubble sort. (Remember, there are at
  2374. * most 5 elements to sort, so this is adequate.)
  2375. */
  2376. for (i = 0; i < *num_inodes; i++) {
  2377. for (j = 1; j < *num_inodes; j++) {
  2378. if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
  2379. struct xfs_inode *temp = i_tab[j];
  2380. i_tab[j] = i_tab[j-1];
  2381. i_tab[j-1] = temp;
  2382. }
  2383. }
  2384. }
  2385. }
  2386. static int
  2387. xfs_finish_rename(
  2388. struct xfs_trans *tp,
  2389. struct xfs_defer_ops *dfops)
  2390. {
  2391. int error;
  2392. /*
  2393. * If this is a synchronous mount, make sure that the rename transaction
  2394. * goes to disk before returning to the user.
  2395. */
  2396. if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  2397. xfs_trans_set_sync(tp);
  2398. error = xfs_defer_finish(&tp, dfops, NULL);
  2399. if (error) {
  2400. xfs_defer_cancel(dfops);
  2401. xfs_trans_cancel(tp);
  2402. return error;
  2403. }
  2404. return xfs_trans_commit(tp);
  2405. }
  2406. /*
  2407. * xfs_cross_rename()
  2408. *
  2409. * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
  2410. */
  2411. STATIC int
  2412. xfs_cross_rename(
  2413. struct xfs_trans *tp,
  2414. struct xfs_inode *dp1,
  2415. struct xfs_name *name1,
  2416. struct xfs_inode *ip1,
  2417. struct xfs_inode *dp2,
  2418. struct xfs_name *name2,
  2419. struct xfs_inode *ip2,
  2420. struct xfs_defer_ops *dfops,
  2421. xfs_fsblock_t *first_block,
  2422. int spaceres)
  2423. {
  2424. int error = 0;
  2425. int ip1_flags = 0;
  2426. int ip2_flags = 0;
  2427. int dp2_flags = 0;
  2428. /* Swap inode number for dirent in first parent */
  2429. error = xfs_dir_replace(tp, dp1, name1,
  2430. ip2->i_ino,
  2431. first_block, dfops, spaceres);
  2432. if (error)
  2433. goto out_trans_abort;
  2434. /* Swap inode number for dirent in second parent */
  2435. error = xfs_dir_replace(tp, dp2, name2,
  2436. ip1->i_ino,
  2437. first_block, dfops, spaceres);
  2438. if (error)
  2439. goto out_trans_abort;
  2440. /*
  2441. * If we're renaming one or more directories across different parents,
  2442. * update the respective ".." entries (and link counts) to match the new
  2443. * parents.
  2444. */
  2445. if (dp1 != dp2) {
  2446. dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2447. if (S_ISDIR(VFS_I(ip2)->i_mode)) {
  2448. error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
  2449. dp1->i_ino, first_block,
  2450. dfops, spaceres);
  2451. if (error)
  2452. goto out_trans_abort;
  2453. /* transfer ip2 ".." reference to dp1 */
  2454. if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
  2455. error = xfs_droplink(tp, dp2);
  2456. if (error)
  2457. goto out_trans_abort;
  2458. error = xfs_bumplink(tp, dp1);
  2459. if (error)
  2460. goto out_trans_abort;
  2461. }
  2462. /*
  2463. * Although ip1 isn't changed here, userspace needs
  2464. * to be warned about the change, so that applications
  2465. * relying on it (like backup ones), will properly
  2466. * notify the change
  2467. */
  2468. ip1_flags |= XFS_ICHGTIME_CHG;
  2469. ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2470. }
  2471. if (S_ISDIR(VFS_I(ip1)->i_mode)) {
  2472. error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
  2473. dp2->i_ino, first_block,
  2474. dfops, spaceres);
  2475. if (error)
  2476. goto out_trans_abort;
  2477. /* transfer ip1 ".." reference to dp2 */
  2478. if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
  2479. error = xfs_droplink(tp, dp1);
  2480. if (error)
  2481. goto out_trans_abort;
  2482. error = xfs_bumplink(tp, dp2);
  2483. if (error)
  2484. goto out_trans_abort;
  2485. }
  2486. /*
  2487. * Although ip2 isn't changed here, userspace needs
  2488. * to be warned about the change, so that applications
  2489. * relying on it (like backup ones), will properly
  2490. * notify the change
  2491. */
  2492. ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2493. ip2_flags |= XFS_ICHGTIME_CHG;
  2494. }
  2495. }
  2496. if (ip1_flags) {
  2497. xfs_trans_ichgtime(tp, ip1, ip1_flags);
  2498. xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
  2499. }
  2500. if (ip2_flags) {
  2501. xfs_trans_ichgtime(tp, ip2, ip2_flags);
  2502. xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
  2503. }
  2504. if (dp2_flags) {
  2505. xfs_trans_ichgtime(tp, dp2, dp2_flags);
  2506. xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
  2507. }
  2508. xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2509. xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
  2510. return xfs_finish_rename(tp, dfops);
  2511. out_trans_abort:
  2512. xfs_defer_cancel(dfops);
  2513. xfs_trans_cancel(tp);
  2514. return error;
  2515. }
  2516. /*
  2517. * xfs_rename_alloc_whiteout()
  2518. *
  2519. * Return a referenced, unlinked, unlocked inode that that can be used as a
  2520. * whiteout in a rename transaction. We use a tmpfile inode here so that if we
  2521. * crash between allocating the inode and linking it into the rename transaction
  2522. * recovery will free the inode and we won't leak it.
  2523. */
  2524. static int
  2525. xfs_rename_alloc_whiteout(
  2526. struct xfs_inode *dp,
  2527. struct xfs_inode **wip)
  2528. {
  2529. struct xfs_inode *tmpfile;
  2530. int error;
  2531. error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
  2532. if (error)
  2533. return error;
  2534. /*
  2535. * Prepare the tmpfile inode as if it were created through the VFS.
  2536. * Otherwise, the link increment paths will complain about nlink 0->1.
  2537. * Drop the link count as done by d_tmpfile(), complete the inode setup
  2538. * and flag it as linkable.
  2539. */
  2540. drop_nlink(VFS_I(tmpfile));
  2541. xfs_setup_iops(tmpfile);
  2542. xfs_finish_inode_setup(tmpfile);
  2543. VFS_I(tmpfile)->i_state |= I_LINKABLE;
  2544. *wip = tmpfile;
  2545. return 0;
  2546. }
  2547. /*
  2548. * xfs_rename
  2549. */
  2550. int
  2551. xfs_rename(
  2552. struct xfs_inode *src_dp,
  2553. struct xfs_name *src_name,
  2554. struct xfs_inode *src_ip,
  2555. struct xfs_inode *target_dp,
  2556. struct xfs_name *target_name,
  2557. struct xfs_inode *target_ip,
  2558. unsigned int flags)
  2559. {
  2560. struct xfs_mount *mp = src_dp->i_mount;
  2561. struct xfs_trans *tp;
  2562. struct xfs_defer_ops dfops;
  2563. xfs_fsblock_t first_block;
  2564. struct xfs_inode *wip = NULL; /* whiteout inode */
  2565. struct xfs_inode *inodes[__XFS_SORT_INODES];
  2566. int num_inodes = __XFS_SORT_INODES;
  2567. bool new_parent = (src_dp != target_dp);
  2568. bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
  2569. int spaceres;
  2570. int error;
  2571. trace_xfs_rename(src_dp, target_dp, src_name, target_name);
  2572. if ((flags & RENAME_EXCHANGE) && !target_ip)
  2573. return -EINVAL;
  2574. /*
  2575. * If we are doing a whiteout operation, allocate the whiteout inode
  2576. * we will be placing at the target and ensure the type is set
  2577. * appropriately.
  2578. */
  2579. if (flags & RENAME_WHITEOUT) {
  2580. ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
  2581. error = xfs_rename_alloc_whiteout(target_dp, &wip);
  2582. if (error)
  2583. return error;
  2584. /* setup target dirent info as whiteout */
  2585. src_name->type = XFS_DIR3_FT_CHRDEV;
  2586. }
  2587. xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
  2588. inodes, &num_inodes);
  2589. spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
  2590. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
  2591. if (error == -ENOSPC) {
  2592. spaceres = 0;
  2593. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
  2594. &tp);
  2595. }
  2596. if (error)
  2597. goto out_release_wip;
  2598. /*
  2599. * Attach the dquots to the inodes
  2600. */
  2601. error = xfs_qm_vop_rename_dqattach(inodes);
  2602. if (error)
  2603. goto out_trans_cancel;
  2604. /*
  2605. * Lock all the participating inodes. Depending upon whether
  2606. * the target_name exists in the target directory, and
  2607. * whether the target directory is the same as the source
  2608. * directory, we can lock from 2 to 4 inodes.
  2609. */
  2610. xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
  2611. /*
  2612. * Join all the inodes to the transaction. From this point on,
  2613. * we can rely on either trans_commit or trans_cancel to unlock
  2614. * them.
  2615. */
  2616. xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
  2617. if (new_parent)
  2618. xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
  2619. xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
  2620. if (target_ip)
  2621. xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
  2622. if (wip)
  2623. xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
  2624. /*
  2625. * If we are using project inheritance, we only allow renames
  2626. * into our tree when the project IDs are the same; else the
  2627. * tree quota mechanism would be circumvented.
  2628. */
  2629. if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  2630. (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
  2631. error = -EXDEV;
  2632. goto out_trans_cancel;
  2633. }
  2634. xfs_defer_init(&dfops, &first_block);
  2635. /* RENAME_EXCHANGE is unique from here on. */
  2636. if (flags & RENAME_EXCHANGE)
  2637. return xfs_cross_rename(tp, src_dp, src_name, src_ip,
  2638. target_dp, target_name, target_ip,
  2639. &dfops, &first_block, spaceres);
  2640. /*
  2641. * Set up the target.
  2642. */
  2643. if (target_ip == NULL) {
  2644. /*
  2645. * If there's no space reservation, check the entry will
  2646. * fit before actually inserting it.
  2647. */
  2648. if (!spaceres) {
  2649. error = xfs_dir_canenter(tp, target_dp, target_name);
  2650. if (error)
  2651. goto out_trans_cancel;
  2652. }
  2653. /*
  2654. * If target does not exist and the rename crosses
  2655. * directories, adjust the target directory link count
  2656. * to account for the ".." reference from the new entry.
  2657. */
  2658. error = xfs_dir_createname(tp, target_dp, target_name,
  2659. src_ip->i_ino, &first_block,
  2660. &dfops, spaceres);
  2661. if (error)
  2662. goto out_bmap_cancel;
  2663. xfs_trans_ichgtime(tp, target_dp,
  2664. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2665. if (new_parent && src_is_directory) {
  2666. error = xfs_bumplink(tp, target_dp);
  2667. if (error)
  2668. goto out_bmap_cancel;
  2669. }
  2670. } else { /* target_ip != NULL */
  2671. /*
  2672. * If target exists and it's a directory, check that both
  2673. * target and source are directories and that target can be
  2674. * destroyed, or that neither is a directory.
  2675. */
  2676. if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
  2677. /*
  2678. * Make sure target dir is empty.
  2679. */
  2680. if (!(xfs_dir_isempty(target_ip)) ||
  2681. (VFS_I(target_ip)->i_nlink > 2)) {
  2682. error = -EEXIST;
  2683. goto out_trans_cancel;
  2684. }
  2685. }
  2686. /*
  2687. * Link the source inode under the target name.
  2688. * If the source inode is a directory and we are moving
  2689. * it across directories, its ".." entry will be
  2690. * inconsistent until we replace that down below.
  2691. *
  2692. * In case there is already an entry with the same
  2693. * name at the destination directory, remove it first.
  2694. */
  2695. error = xfs_dir_replace(tp, target_dp, target_name,
  2696. src_ip->i_ino,
  2697. &first_block, &dfops, spaceres);
  2698. if (error)
  2699. goto out_bmap_cancel;
  2700. xfs_trans_ichgtime(tp, target_dp,
  2701. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2702. /*
  2703. * Decrement the link count on the target since the target
  2704. * dir no longer points to it.
  2705. */
  2706. error = xfs_droplink(tp, target_ip);
  2707. if (error)
  2708. goto out_bmap_cancel;
  2709. if (src_is_directory) {
  2710. /*
  2711. * Drop the link from the old "." entry.
  2712. */
  2713. error = xfs_droplink(tp, target_ip);
  2714. if (error)
  2715. goto out_bmap_cancel;
  2716. }
  2717. } /* target_ip != NULL */
  2718. /*
  2719. * Remove the source.
  2720. */
  2721. if (new_parent && src_is_directory) {
  2722. /*
  2723. * Rewrite the ".." entry to point to the new
  2724. * directory.
  2725. */
  2726. error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
  2727. target_dp->i_ino,
  2728. &first_block, &dfops, spaceres);
  2729. ASSERT(error != -EEXIST);
  2730. if (error)
  2731. goto out_bmap_cancel;
  2732. }
  2733. /*
  2734. * We always want to hit the ctime on the source inode.
  2735. *
  2736. * This isn't strictly required by the standards since the source
  2737. * inode isn't really being changed, but old unix file systems did
  2738. * it and some incremental backup programs won't work without it.
  2739. */
  2740. xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
  2741. xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
  2742. /*
  2743. * Adjust the link count on src_dp. This is necessary when
  2744. * renaming a directory, either within one parent when
  2745. * the target existed, or across two parent directories.
  2746. */
  2747. if (src_is_directory && (new_parent || target_ip != NULL)) {
  2748. /*
  2749. * Decrement link count on src_directory since the
  2750. * entry that's moved no longer points to it.
  2751. */
  2752. error = xfs_droplink(tp, src_dp);
  2753. if (error)
  2754. goto out_bmap_cancel;
  2755. }
  2756. /*
  2757. * For whiteouts, we only need to update the source dirent with the
  2758. * inode number of the whiteout inode rather than removing it
  2759. * altogether.
  2760. */
  2761. if (wip) {
  2762. error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
  2763. &first_block, &dfops, spaceres);
  2764. } else
  2765. error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
  2766. &first_block, &dfops, spaceres);
  2767. if (error)
  2768. goto out_bmap_cancel;
  2769. /*
  2770. * For whiteouts, we need to bump the link count on the whiteout inode.
  2771. * This means that failures all the way up to this point leave the inode
  2772. * on the unlinked list and so cleanup is a simple matter of dropping
  2773. * the remaining reference to it. If we fail here after bumping the link
  2774. * count, we're shutting down the filesystem so we'll never see the
  2775. * intermediate state on disk.
  2776. */
  2777. if (wip) {
  2778. ASSERT(VFS_I(wip)->i_nlink == 0);
  2779. error = xfs_bumplink(tp, wip);
  2780. if (error)
  2781. goto out_bmap_cancel;
  2782. error = xfs_iunlink_remove(tp, wip);
  2783. if (error)
  2784. goto out_bmap_cancel;
  2785. xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
  2786. /*
  2787. * Now we have a real link, clear the "I'm a tmpfile" state
  2788. * flag from the inode so it doesn't accidentally get misused in
  2789. * future.
  2790. */
  2791. VFS_I(wip)->i_state &= ~I_LINKABLE;
  2792. }
  2793. xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2794. xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
  2795. if (new_parent)
  2796. xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
  2797. error = xfs_finish_rename(tp, &dfops);
  2798. if (wip)
  2799. IRELE(wip);
  2800. return error;
  2801. out_bmap_cancel:
  2802. xfs_defer_cancel(&dfops);
  2803. out_trans_cancel:
  2804. xfs_trans_cancel(tp);
  2805. out_release_wip:
  2806. if (wip)
  2807. IRELE(wip);
  2808. return error;
  2809. }
  2810. STATIC int
  2811. xfs_iflush_cluster(
  2812. struct xfs_inode *ip,
  2813. struct xfs_buf *bp)
  2814. {
  2815. struct xfs_mount *mp = ip->i_mount;
  2816. struct xfs_perag *pag;
  2817. unsigned long first_index, mask;
  2818. unsigned long inodes_per_cluster;
  2819. int cilist_size;
  2820. struct xfs_inode **cilist;
  2821. struct xfs_inode *cip;
  2822. int nr_found;
  2823. int clcount = 0;
  2824. int bufwasdelwri;
  2825. int i;
  2826. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
  2827. inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
  2828. cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
  2829. cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
  2830. if (!cilist)
  2831. goto out_put;
  2832. mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
  2833. first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
  2834. rcu_read_lock();
  2835. /* really need a gang lookup range call here */
  2836. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
  2837. first_index, inodes_per_cluster);
  2838. if (nr_found == 0)
  2839. goto out_free;
  2840. for (i = 0; i < nr_found; i++) {
  2841. cip = cilist[i];
  2842. if (cip == ip)
  2843. continue;
  2844. /*
  2845. * because this is an RCU protected lookup, we could find a
  2846. * recently freed or even reallocated inode during the lookup.
  2847. * We need to check under the i_flags_lock for a valid inode
  2848. * here. Skip it if it is not valid or the wrong inode.
  2849. */
  2850. spin_lock(&cip->i_flags_lock);
  2851. if (!cip->i_ino ||
  2852. __xfs_iflags_test(cip, XFS_ISTALE)) {
  2853. spin_unlock(&cip->i_flags_lock);
  2854. continue;
  2855. }
  2856. /*
  2857. * Once we fall off the end of the cluster, no point checking
  2858. * any more inodes in the list because they will also all be
  2859. * outside the cluster.
  2860. */
  2861. if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
  2862. spin_unlock(&cip->i_flags_lock);
  2863. break;
  2864. }
  2865. spin_unlock(&cip->i_flags_lock);
  2866. /*
  2867. * Do an un-protected check to see if the inode is dirty and
  2868. * is a candidate for flushing. These checks will be repeated
  2869. * later after the appropriate locks are acquired.
  2870. */
  2871. if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
  2872. continue;
  2873. /*
  2874. * Try to get locks. If any are unavailable or it is pinned,
  2875. * then this inode cannot be flushed and is skipped.
  2876. */
  2877. if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
  2878. continue;
  2879. if (!xfs_iflock_nowait(cip)) {
  2880. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2881. continue;
  2882. }
  2883. if (xfs_ipincount(cip)) {
  2884. xfs_ifunlock(cip);
  2885. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2886. continue;
  2887. }
  2888. /*
  2889. * Check the inode number again, just to be certain we are not
  2890. * racing with freeing in xfs_reclaim_inode(). See the comments
  2891. * in that function for more information as to why the initial
  2892. * check is not sufficient.
  2893. */
  2894. if (!cip->i_ino) {
  2895. xfs_ifunlock(cip);
  2896. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2897. continue;
  2898. }
  2899. /*
  2900. * arriving here means that this inode can be flushed. First
  2901. * re-check that it's dirty before flushing.
  2902. */
  2903. if (!xfs_inode_clean(cip)) {
  2904. int error;
  2905. error = xfs_iflush_int(cip, bp);
  2906. if (error) {
  2907. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2908. goto cluster_corrupt_out;
  2909. }
  2910. clcount++;
  2911. } else {
  2912. xfs_ifunlock(cip);
  2913. }
  2914. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2915. }
  2916. if (clcount) {
  2917. XFS_STATS_INC(mp, xs_icluster_flushcnt);
  2918. XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
  2919. }
  2920. out_free:
  2921. rcu_read_unlock();
  2922. kmem_free(cilist);
  2923. out_put:
  2924. xfs_perag_put(pag);
  2925. return 0;
  2926. cluster_corrupt_out:
  2927. /*
  2928. * Corruption detected in the clustering loop. Invalidate the
  2929. * inode buffer and shut down the filesystem.
  2930. */
  2931. rcu_read_unlock();
  2932. /*
  2933. * Clean up the buffer. If it was delwri, just release it --
  2934. * brelse can handle it with no problems. If not, shut down the
  2935. * filesystem before releasing the buffer.
  2936. */
  2937. bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
  2938. if (bufwasdelwri)
  2939. xfs_buf_relse(bp);
  2940. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  2941. if (!bufwasdelwri) {
  2942. /*
  2943. * Just like incore_relse: if we have b_iodone functions,
  2944. * mark the buffer as an error and call them. Otherwise
  2945. * mark it as stale and brelse.
  2946. */
  2947. if (bp->b_iodone) {
  2948. bp->b_flags &= ~XBF_DONE;
  2949. xfs_buf_stale(bp);
  2950. xfs_buf_ioerror(bp, -EIO);
  2951. xfs_buf_ioend(bp);
  2952. } else {
  2953. xfs_buf_stale(bp);
  2954. xfs_buf_relse(bp);
  2955. }
  2956. }
  2957. /*
  2958. * Unlocks the flush lock
  2959. */
  2960. xfs_iflush_abort(cip, false);
  2961. kmem_free(cilist);
  2962. xfs_perag_put(pag);
  2963. return -EFSCORRUPTED;
  2964. }
  2965. /*
  2966. * Flush dirty inode metadata into the backing buffer.
  2967. *
  2968. * The caller must have the inode lock and the inode flush lock held. The
  2969. * inode lock will still be held upon return to the caller, and the inode
  2970. * flush lock will be released after the inode has reached the disk.
  2971. *
  2972. * The caller must write out the buffer returned in *bpp and release it.
  2973. */
  2974. int
  2975. xfs_iflush(
  2976. struct xfs_inode *ip,
  2977. struct xfs_buf **bpp)
  2978. {
  2979. struct xfs_mount *mp = ip->i_mount;
  2980. struct xfs_buf *bp = NULL;
  2981. struct xfs_dinode *dip;
  2982. int error;
  2983. XFS_STATS_INC(mp, xs_iflush_count);
  2984. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2985. ASSERT(xfs_isiflocked(ip));
  2986. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  2987. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  2988. *bpp = NULL;
  2989. xfs_iunpin_wait(ip);
  2990. /*
  2991. * For stale inodes we cannot rely on the backing buffer remaining
  2992. * stale in cache for the remaining life of the stale inode and so
  2993. * xfs_imap_to_bp() below may give us a buffer that no longer contains
  2994. * inodes below. We have to check this after ensuring the inode is
  2995. * unpinned so that it is safe to reclaim the stale inode after the
  2996. * flush call.
  2997. */
  2998. if (xfs_iflags_test(ip, XFS_ISTALE)) {
  2999. xfs_ifunlock(ip);
  3000. return 0;
  3001. }
  3002. /*
  3003. * This may have been unpinned because the filesystem is shutting
  3004. * down forcibly. If that's the case we must not write this inode
  3005. * to disk, because the log record didn't make it to disk.
  3006. *
  3007. * We also have to remove the log item from the AIL in this case,
  3008. * as we wait for an empty AIL as part of the unmount process.
  3009. */
  3010. if (XFS_FORCED_SHUTDOWN(mp)) {
  3011. error = -EIO;
  3012. goto abort_out;
  3013. }
  3014. /*
  3015. * Get the buffer containing the on-disk inode. We are doing a try-lock
  3016. * operation here, so we may get an EAGAIN error. In that case, we
  3017. * simply want to return with the inode still dirty.
  3018. *
  3019. * If we get any other error, we effectively have a corruption situation
  3020. * and we cannot flush the inode, so we treat it the same as failing
  3021. * xfs_iflush_int().
  3022. */
  3023. error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
  3024. 0);
  3025. if (error == -EAGAIN) {
  3026. xfs_ifunlock(ip);
  3027. return error;
  3028. }
  3029. if (error)
  3030. goto corrupt_out;
  3031. /*
  3032. * First flush out the inode that xfs_iflush was called with.
  3033. */
  3034. error = xfs_iflush_int(ip, bp);
  3035. if (error)
  3036. goto corrupt_out;
  3037. /*
  3038. * If the buffer is pinned then push on the log now so we won't
  3039. * get stuck waiting in the write for too long.
  3040. */
  3041. if (xfs_buf_ispinned(bp))
  3042. xfs_log_force(mp, 0);
  3043. /*
  3044. * inode clustering:
  3045. * see if other inodes can be gathered into this write
  3046. */
  3047. error = xfs_iflush_cluster(ip, bp);
  3048. if (error)
  3049. goto cluster_corrupt_out;
  3050. *bpp = bp;
  3051. return 0;
  3052. corrupt_out:
  3053. if (bp)
  3054. xfs_buf_relse(bp);
  3055. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  3056. cluster_corrupt_out:
  3057. error = -EFSCORRUPTED;
  3058. abort_out:
  3059. /*
  3060. * Unlocks the flush lock
  3061. */
  3062. xfs_iflush_abort(ip, false);
  3063. return error;
  3064. }
  3065. STATIC int
  3066. xfs_iflush_int(
  3067. struct xfs_inode *ip,
  3068. struct xfs_buf *bp)
  3069. {
  3070. struct xfs_inode_log_item *iip = ip->i_itemp;
  3071. struct xfs_dinode *dip;
  3072. struct xfs_mount *mp = ip->i_mount;
  3073. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  3074. ASSERT(xfs_isiflocked(ip));
  3075. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  3076. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  3077. ASSERT(iip != NULL && iip->ili_fields != 0);
  3078. ASSERT(ip->i_d.di_version > 1);
  3079. /* set *dip = inode's place in the buffer */
  3080. dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
  3081. if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
  3082. mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
  3083. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3084. "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
  3085. __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
  3086. goto corrupt_out;
  3087. }
  3088. if (S_ISREG(VFS_I(ip)->i_mode)) {
  3089. if (XFS_TEST_ERROR(
  3090. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  3091. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
  3092. mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
  3093. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3094. "%s: Bad regular inode %Lu, ptr 0x%p",
  3095. __func__, ip->i_ino, ip);
  3096. goto corrupt_out;
  3097. }
  3098. } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
  3099. if (XFS_TEST_ERROR(
  3100. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  3101. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
  3102. (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
  3103. mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
  3104. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3105. "%s: Bad directory inode %Lu, ptr 0x%p",
  3106. __func__, ip->i_ino, ip);
  3107. goto corrupt_out;
  3108. }
  3109. }
  3110. if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
  3111. ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
  3112. XFS_RANDOM_IFLUSH_5)) {
  3113. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3114. "%s: detected corrupt incore inode %Lu, "
  3115. "total extents = %d, nblocks = %Ld, ptr 0x%p",
  3116. __func__, ip->i_ino,
  3117. ip->i_d.di_nextents + ip->i_d.di_anextents,
  3118. ip->i_d.di_nblocks, ip);
  3119. goto corrupt_out;
  3120. }
  3121. if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
  3122. mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
  3123. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3124. "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
  3125. __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
  3126. goto corrupt_out;
  3127. }
  3128. /*
  3129. * Inode item log recovery for v2 inodes are dependent on the
  3130. * di_flushiter count for correct sequencing. We bump the flush
  3131. * iteration count so we can detect flushes which postdate a log record
  3132. * during recovery. This is redundant as we now log every change and
  3133. * hence this can't happen but we need to still do it to ensure
  3134. * backwards compatibility with old kernels that predate logging all
  3135. * inode changes.
  3136. */
  3137. if (ip->i_d.di_version < 3)
  3138. ip->i_d.di_flushiter++;
  3139. /*
  3140. * Copy the dirty parts of the inode into the on-disk inode. We always
  3141. * copy out the core of the inode, because if the inode is dirty at all
  3142. * the core must be.
  3143. */
  3144. xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
  3145. /* Wrap, we never let the log put out DI_MAX_FLUSH */
  3146. if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
  3147. ip->i_d.di_flushiter = 0;
  3148. xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
  3149. if (XFS_IFORK_Q(ip))
  3150. xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
  3151. xfs_inobp_check(mp, bp);
  3152. /*
  3153. * We've recorded everything logged in the inode, so we'd like to clear
  3154. * the ili_fields bits so we don't log and flush things unnecessarily.
  3155. * However, we can't stop logging all this information until the data
  3156. * we've copied into the disk buffer is written to disk. If we did we
  3157. * might overwrite the copy of the inode in the log with all the data
  3158. * after re-logging only part of it, and in the face of a crash we
  3159. * wouldn't have all the data we need to recover.
  3160. *
  3161. * What we do is move the bits to the ili_last_fields field. When
  3162. * logging the inode, these bits are moved back to the ili_fields field.
  3163. * In the xfs_iflush_done() routine we clear ili_last_fields, since we
  3164. * know that the information those bits represent is permanently on
  3165. * disk. As long as the flush completes before the inode is logged
  3166. * again, then both ili_fields and ili_last_fields will be cleared.
  3167. *
  3168. * We can play with the ili_fields bits here, because the inode lock
  3169. * must be held exclusively in order to set bits there and the flush
  3170. * lock protects the ili_last_fields bits. Set ili_logged so the flush
  3171. * done routine can tell whether or not to look in the AIL. Also, store
  3172. * the current LSN of the inode so that we can tell whether the item has
  3173. * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
  3174. * need the AIL lock, because it is a 64 bit value that cannot be read
  3175. * atomically.
  3176. */
  3177. iip->ili_last_fields = iip->ili_fields;
  3178. iip->ili_fields = 0;
  3179. iip->ili_fsync_fields = 0;
  3180. iip->ili_logged = 1;
  3181. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  3182. &iip->ili_item.li_lsn);
  3183. /*
  3184. * Attach the function xfs_iflush_done to the inode's
  3185. * buffer. This will remove the inode from the AIL
  3186. * and unlock the inode's flush lock when the inode is
  3187. * completely written to disk.
  3188. */
  3189. xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
  3190. /* generate the checksum. */
  3191. xfs_dinode_calc_crc(mp, dip);
  3192. ASSERT(bp->b_fspriv != NULL);
  3193. ASSERT(bp->b_iodone != NULL);
  3194. return 0;
  3195. corrupt_out:
  3196. return -EFSCORRUPTED;
  3197. }