xfs_inode.c 97 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/log2.h>
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_shared.h"
  22. #include "xfs_format.h"
  23. #include "xfs_log_format.h"
  24. #include "xfs_trans_resv.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_inode.h"
  28. #include "xfs_da_format.h"
  29. #include "xfs_da_btree.h"
  30. #include "xfs_dir2.h"
  31. #include "xfs_attr_sf.h"
  32. #include "xfs_attr.h"
  33. #include "xfs_trans_space.h"
  34. #include "xfs_trans.h"
  35. #include "xfs_buf_item.h"
  36. #include "xfs_inode_item.h"
  37. #include "xfs_ialloc.h"
  38. #include "xfs_bmap.h"
  39. #include "xfs_bmap_util.h"
  40. #include "xfs_error.h"
  41. #include "xfs_quota.h"
  42. #include "xfs_filestream.h"
  43. #include "xfs_cksum.h"
  44. #include "xfs_trace.h"
  45. #include "xfs_icache.h"
  46. #include "xfs_symlink.h"
  47. #include "xfs_trans_priv.h"
  48. #include "xfs_log.h"
  49. #include "xfs_bmap_btree.h"
  50. kmem_zone_t *xfs_inode_zone;
  51. /*
  52. * Used in xfs_itruncate_extents(). This is the maximum number of extents
  53. * freed from a file in a single transaction.
  54. */
  55. #define XFS_ITRUNC_MAX_EXTENTS 2
  56. STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
  57. STATIC int xfs_iunlink_remove(xfs_trans_t *, xfs_inode_t *);
  58. /*
  59. * helper function to extract extent size hint from inode
  60. */
  61. xfs_extlen_t
  62. xfs_get_extsz_hint(
  63. struct xfs_inode *ip)
  64. {
  65. if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  66. return ip->i_d.di_extsize;
  67. if (XFS_IS_REALTIME_INODE(ip))
  68. return ip->i_mount->m_sb.sb_rextsize;
  69. return 0;
  70. }
  71. /*
  72. * These two are wrapper routines around the xfs_ilock() routine used to
  73. * centralize some grungy code. They are used in places that wish to lock the
  74. * inode solely for reading the extents. The reason these places can't just
  75. * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  76. * bringing in of the extents from disk for a file in b-tree format. If the
  77. * inode is in b-tree format, then we need to lock the inode exclusively until
  78. * the extents are read in. Locking it exclusively all the time would limit
  79. * our parallelism unnecessarily, though. What we do instead is check to see
  80. * if the extents have been read in yet, and only lock the inode exclusively
  81. * if they have not.
  82. *
  83. * The functions return a value which should be given to the corresponding
  84. * xfs_iunlock() call.
  85. */
  86. uint
  87. xfs_ilock_data_map_shared(
  88. struct xfs_inode *ip)
  89. {
  90. uint lock_mode = XFS_ILOCK_SHARED;
  91. if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
  92. (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
  93. lock_mode = XFS_ILOCK_EXCL;
  94. xfs_ilock(ip, lock_mode);
  95. return lock_mode;
  96. }
  97. uint
  98. xfs_ilock_attr_map_shared(
  99. struct xfs_inode *ip)
  100. {
  101. uint lock_mode = XFS_ILOCK_SHARED;
  102. if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
  103. (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
  104. lock_mode = XFS_ILOCK_EXCL;
  105. xfs_ilock(ip, lock_mode);
  106. return lock_mode;
  107. }
  108. /*
  109. * The xfs inode contains 3 multi-reader locks: the i_iolock the i_mmap_lock and
  110. * the i_lock. This routine allows various combinations of the locks to be
  111. * obtained.
  112. *
  113. * The 3 locks should always be ordered so that the IO lock is obtained first,
  114. * the mmap lock second and the ilock last in order to prevent deadlock.
  115. *
  116. * Basic locking order:
  117. *
  118. * i_iolock -> i_mmap_lock -> page_lock -> i_ilock
  119. *
  120. * mmap_sem locking order:
  121. *
  122. * i_iolock -> page lock -> mmap_sem
  123. * mmap_sem -> i_mmap_lock -> page_lock
  124. *
  125. * The difference in mmap_sem locking order mean that we cannot hold the
  126. * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
  127. * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
  128. * in get_user_pages() to map the user pages into the kernel address space for
  129. * direct IO. Similarly the i_iolock cannot be taken inside a page fault because
  130. * page faults already hold the mmap_sem.
  131. *
  132. * Hence to serialise fully against both syscall and mmap based IO, we need to
  133. * take both the i_iolock and the i_mmap_lock. These locks should *only* be both
  134. * taken in places where we need to invalidate the page cache in a race
  135. * free manner (e.g. truncate, hole punch and other extent manipulation
  136. * functions).
  137. */
  138. void
  139. xfs_ilock(
  140. xfs_inode_t *ip,
  141. uint lock_flags)
  142. {
  143. trace_xfs_ilock(ip, lock_flags, _RET_IP_);
  144. /*
  145. * You can't set both SHARED and EXCL for the same lock,
  146. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  147. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  148. */
  149. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  150. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  151. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  152. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  153. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  154. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  155. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  156. if (lock_flags & XFS_IOLOCK_EXCL)
  157. mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  158. else if (lock_flags & XFS_IOLOCK_SHARED)
  159. mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  160. if (lock_flags & XFS_MMAPLOCK_EXCL)
  161. mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
  162. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  163. mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
  164. if (lock_flags & XFS_ILOCK_EXCL)
  165. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  166. else if (lock_flags & XFS_ILOCK_SHARED)
  167. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  168. }
  169. /*
  170. * This is just like xfs_ilock(), except that the caller
  171. * is guaranteed not to sleep. It returns 1 if it gets
  172. * the requested locks and 0 otherwise. If the IO lock is
  173. * obtained but the inode lock cannot be, then the IO lock
  174. * is dropped before returning.
  175. *
  176. * ip -- the inode being locked
  177. * lock_flags -- this parameter indicates the inode's locks to be
  178. * to be locked. See the comment for xfs_ilock() for a list
  179. * of valid values.
  180. */
  181. int
  182. xfs_ilock_nowait(
  183. xfs_inode_t *ip,
  184. uint lock_flags)
  185. {
  186. trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
  187. /*
  188. * You can't set both SHARED and EXCL for the same lock,
  189. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  190. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  191. */
  192. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  193. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  194. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  195. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  196. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  197. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  198. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  199. if (lock_flags & XFS_IOLOCK_EXCL) {
  200. if (!mrtryupdate(&ip->i_iolock))
  201. goto out;
  202. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  203. if (!mrtryaccess(&ip->i_iolock))
  204. goto out;
  205. }
  206. if (lock_flags & XFS_MMAPLOCK_EXCL) {
  207. if (!mrtryupdate(&ip->i_mmaplock))
  208. goto out_undo_iolock;
  209. } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
  210. if (!mrtryaccess(&ip->i_mmaplock))
  211. goto out_undo_iolock;
  212. }
  213. if (lock_flags & XFS_ILOCK_EXCL) {
  214. if (!mrtryupdate(&ip->i_lock))
  215. goto out_undo_mmaplock;
  216. } else if (lock_flags & XFS_ILOCK_SHARED) {
  217. if (!mrtryaccess(&ip->i_lock))
  218. goto out_undo_mmaplock;
  219. }
  220. return 1;
  221. out_undo_mmaplock:
  222. if (lock_flags & XFS_MMAPLOCK_EXCL)
  223. mrunlock_excl(&ip->i_mmaplock);
  224. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  225. mrunlock_shared(&ip->i_mmaplock);
  226. out_undo_iolock:
  227. if (lock_flags & XFS_IOLOCK_EXCL)
  228. mrunlock_excl(&ip->i_iolock);
  229. else if (lock_flags & XFS_IOLOCK_SHARED)
  230. mrunlock_shared(&ip->i_iolock);
  231. out:
  232. return 0;
  233. }
  234. /*
  235. * xfs_iunlock() is used to drop the inode locks acquired with
  236. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  237. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  238. * that we know which locks to drop.
  239. *
  240. * ip -- the inode being unlocked
  241. * lock_flags -- this parameter indicates the inode's locks to be
  242. * to be unlocked. See the comment for xfs_ilock() for a list
  243. * of valid values for this parameter.
  244. *
  245. */
  246. void
  247. xfs_iunlock(
  248. xfs_inode_t *ip,
  249. uint lock_flags)
  250. {
  251. /*
  252. * You can't set both SHARED and EXCL for the same lock,
  253. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  254. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  255. */
  256. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  257. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  258. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  259. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  260. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  261. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  262. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  263. ASSERT(lock_flags != 0);
  264. if (lock_flags & XFS_IOLOCK_EXCL)
  265. mrunlock_excl(&ip->i_iolock);
  266. else if (lock_flags & XFS_IOLOCK_SHARED)
  267. mrunlock_shared(&ip->i_iolock);
  268. if (lock_flags & XFS_MMAPLOCK_EXCL)
  269. mrunlock_excl(&ip->i_mmaplock);
  270. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  271. mrunlock_shared(&ip->i_mmaplock);
  272. if (lock_flags & XFS_ILOCK_EXCL)
  273. mrunlock_excl(&ip->i_lock);
  274. else if (lock_flags & XFS_ILOCK_SHARED)
  275. mrunlock_shared(&ip->i_lock);
  276. trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
  277. }
  278. /*
  279. * give up write locks. the i/o lock cannot be held nested
  280. * if it is being demoted.
  281. */
  282. void
  283. xfs_ilock_demote(
  284. xfs_inode_t *ip,
  285. uint lock_flags)
  286. {
  287. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
  288. ASSERT((lock_flags &
  289. ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  290. if (lock_flags & XFS_ILOCK_EXCL)
  291. mrdemote(&ip->i_lock);
  292. if (lock_flags & XFS_MMAPLOCK_EXCL)
  293. mrdemote(&ip->i_mmaplock);
  294. if (lock_flags & XFS_IOLOCK_EXCL)
  295. mrdemote(&ip->i_iolock);
  296. trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
  297. }
  298. #if defined(DEBUG) || defined(XFS_WARN)
  299. int
  300. xfs_isilocked(
  301. xfs_inode_t *ip,
  302. uint lock_flags)
  303. {
  304. if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
  305. if (!(lock_flags & XFS_ILOCK_SHARED))
  306. return !!ip->i_lock.mr_writer;
  307. return rwsem_is_locked(&ip->i_lock.mr_lock);
  308. }
  309. if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
  310. if (!(lock_flags & XFS_MMAPLOCK_SHARED))
  311. return !!ip->i_mmaplock.mr_writer;
  312. return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
  313. }
  314. if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
  315. if (!(lock_flags & XFS_IOLOCK_SHARED))
  316. return !!ip->i_iolock.mr_writer;
  317. return rwsem_is_locked(&ip->i_iolock.mr_lock);
  318. }
  319. ASSERT(0);
  320. return 0;
  321. }
  322. #endif
  323. #ifdef DEBUG
  324. int xfs_locked_n;
  325. int xfs_small_retries;
  326. int xfs_middle_retries;
  327. int xfs_lots_retries;
  328. int xfs_lock_delays;
  329. #endif
  330. /*
  331. * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
  332. * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
  333. * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
  334. * errors and warnings.
  335. */
  336. #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
  337. static bool
  338. xfs_lockdep_subclass_ok(
  339. int subclass)
  340. {
  341. return subclass < MAX_LOCKDEP_SUBCLASSES;
  342. }
  343. #else
  344. #define xfs_lockdep_subclass_ok(subclass) (true)
  345. #endif
  346. /*
  347. * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
  348. * value. This can be called for any type of inode lock combination, including
  349. * parent locking. Care must be taken to ensure we don't overrun the subclass
  350. * storage fields in the class mask we build.
  351. */
  352. static inline int
  353. xfs_lock_inumorder(int lock_mode, int subclass)
  354. {
  355. int class = 0;
  356. ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
  357. XFS_ILOCK_RTSUM)));
  358. ASSERT(xfs_lockdep_subclass_ok(subclass));
  359. if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
  360. ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
  361. ASSERT(xfs_lockdep_subclass_ok(subclass +
  362. XFS_IOLOCK_PARENT_VAL));
  363. class += subclass << XFS_IOLOCK_SHIFT;
  364. if (lock_mode & XFS_IOLOCK_PARENT)
  365. class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
  366. }
  367. if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
  368. ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
  369. class += subclass << XFS_MMAPLOCK_SHIFT;
  370. }
  371. if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
  372. ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
  373. class += subclass << XFS_ILOCK_SHIFT;
  374. }
  375. return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
  376. }
  377. /*
  378. * The following routine will lock n inodes in exclusive mode. We assume the
  379. * caller calls us with the inodes in i_ino order.
  380. *
  381. * We need to detect deadlock where an inode that we lock is in the AIL and we
  382. * start waiting for another inode that is locked by a thread in a long running
  383. * transaction (such as truncate). This can result in deadlock since the long
  384. * running trans might need to wait for the inode we just locked in order to
  385. * push the tail and free space in the log.
  386. *
  387. * xfs_lock_inodes() can only be used to lock one type of lock at a time -
  388. * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
  389. * lock more than one at a time, lockdep will report false positives saying we
  390. * have violated locking orders.
  391. */
  392. void
  393. xfs_lock_inodes(
  394. xfs_inode_t **ips,
  395. int inodes,
  396. uint lock_mode)
  397. {
  398. int attempts = 0, i, j, try_lock;
  399. xfs_log_item_t *lp;
  400. /*
  401. * Currently supports between 2 and 5 inodes with exclusive locking. We
  402. * support an arbitrary depth of locking here, but absolute limits on
  403. * inodes depend on the the type of locking and the limits placed by
  404. * lockdep annotations in xfs_lock_inumorder. These are all checked by
  405. * the asserts.
  406. */
  407. ASSERT(ips && inodes >= 2 && inodes <= 5);
  408. ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
  409. XFS_ILOCK_EXCL));
  410. ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
  411. XFS_ILOCK_SHARED)));
  412. ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
  413. inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
  414. ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
  415. inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
  416. ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
  417. inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
  418. if (lock_mode & XFS_IOLOCK_EXCL) {
  419. ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
  420. } else if (lock_mode & XFS_MMAPLOCK_EXCL)
  421. ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
  422. try_lock = 0;
  423. i = 0;
  424. again:
  425. for (; i < inodes; i++) {
  426. ASSERT(ips[i]);
  427. if (i && (ips[i] == ips[i - 1])) /* Already locked */
  428. continue;
  429. /*
  430. * If try_lock is not set yet, make sure all locked inodes are
  431. * not in the AIL. If any are, set try_lock to be used later.
  432. */
  433. if (!try_lock) {
  434. for (j = (i - 1); j >= 0 && !try_lock; j--) {
  435. lp = (xfs_log_item_t *)ips[j]->i_itemp;
  436. if (lp && (lp->li_flags & XFS_LI_IN_AIL))
  437. try_lock++;
  438. }
  439. }
  440. /*
  441. * If any of the previous locks we have locked is in the AIL,
  442. * we must TRY to get the second and subsequent locks. If
  443. * we can't get any, we must release all we have
  444. * and try again.
  445. */
  446. if (!try_lock) {
  447. xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
  448. continue;
  449. }
  450. /* try_lock means we have an inode locked that is in the AIL. */
  451. ASSERT(i != 0);
  452. if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
  453. continue;
  454. /*
  455. * Unlock all previous guys and try again. xfs_iunlock will try
  456. * to push the tail if the inode is in the AIL.
  457. */
  458. attempts++;
  459. for (j = i - 1; j >= 0; j--) {
  460. /*
  461. * Check to see if we've already unlocked this one. Not
  462. * the first one going back, and the inode ptr is the
  463. * same.
  464. */
  465. if (j != (i - 1) && ips[j] == ips[j + 1])
  466. continue;
  467. xfs_iunlock(ips[j], lock_mode);
  468. }
  469. if ((attempts % 5) == 0) {
  470. delay(1); /* Don't just spin the CPU */
  471. #ifdef DEBUG
  472. xfs_lock_delays++;
  473. #endif
  474. }
  475. i = 0;
  476. try_lock = 0;
  477. goto again;
  478. }
  479. #ifdef DEBUG
  480. if (attempts) {
  481. if (attempts < 5) xfs_small_retries++;
  482. else if (attempts < 100) xfs_middle_retries++;
  483. else xfs_lots_retries++;
  484. } else {
  485. xfs_locked_n++;
  486. }
  487. #endif
  488. }
  489. /*
  490. * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
  491. * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
  492. * lock more than one at a time, lockdep will report false positives saying we
  493. * have violated locking orders.
  494. */
  495. void
  496. xfs_lock_two_inodes(
  497. xfs_inode_t *ip0,
  498. xfs_inode_t *ip1,
  499. uint lock_mode)
  500. {
  501. xfs_inode_t *temp;
  502. int attempts = 0;
  503. xfs_log_item_t *lp;
  504. if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
  505. ASSERT(!(lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
  506. ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
  507. } else if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
  508. ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
  509. ASSERT(ip0->i_ino != ip1->i_ino);
  510. if (ip0->i_ino > ip1->i_ino) {
  511. temp = ip0;
  512. ip0 = ip1;
  513. ip1 = temp;
  514. }
  515. again:
  516. xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
  517. /*
  518. * If the first lock we have locked is in the AIL, we must TRY to get
  519. * the second lock. If we can't get it, we must release the first one
  520. * and try again.
  521. */
  522. lp = (xfs_log_item_t *)ip0->i_itemp;
  523. if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
  524. if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
  525. xfs_iunlock(ip0, lock_mode);
  526. if ((++attempts % 5) == 0)
  527. delay(1); /* Don't just spin the CPU */
  528. goto again;
  529. }
  530. } else {
  531. xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
  532. }
  533. }
  534. void
  535. __xfs_iflock(
  536. struct xfs_inode *ip)
  537. {
  538. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
  539. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
  540. do {
  541. prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  542. if (xfs_isiflocked(ip))
  543. io_schedule();
  544. } while (!xfs_iflock_nowait(ip));
  545. finish_wait(wq, &wait.wait);
  546. }
  547. STATIC uint
  548. _xfs_dic2xflags(
  549. __uint16_t di_flags,
  550. uint64_t di_flags2,
  551. bool has_attr)
  552. {
  553. uint flags = 0;
  554. if (di_flags & XFS_DIFLAG_ANY) {
  555. if (di_flags & XFS_DIFLAG_REALTIME)
  556. flags |= FS_XFLAG_REALTIME;
  557. if (di_flags & XFS_DIFLAG_PREALLOC)
  558. flags |= FS_XFLAG_PREALLOC;
  559. if (di_flags & XFS_DIFLAG_IMMUTABLE)
  560. flags |= FS_XFLAG_IMMUTABLE;
  561. if (di_flags & XFS_DIFLAG_APPEND)
  562. flags |= FS_XFLAG_APPEND;
  563. if (di_flags & XFS_DIFLAG_SYNC)
  564. flags |= FS_XFLAG_SYNC;
  565. if (di_flags & XFS_DIFLAG_NOATIME)
  566. flags |= FS_XFLAG_NOATIME;
  567. if (di_flags & XFS_DIFLAG_NODUMP)
  568. flags |= FS_XFLAG_NODUMP;
  569. if (di_flags & XFS_DIFLAG_RTINHERIT)
  570. flags |= FS_XFLAG_RTINHERIT;
  571. if (di_flags & XFS_DIFLAG_PROJINHERIT)
  572. flags |= FS_XFLAG_PROJINHERIT;
  573. if (di_flags & XFS_DIFLAG_NOSYMLINKS)
  574. flags |= FS_XFLAG_NOSYMLINKS;
  575. if (di_flags & XFS_DIFLAG_EXTSIZE)
  576. flags |= FS_XFLAG_EXTSIZE;
  577. if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
  578. flags |= FS_XFLAG_EXTSZINHERIT;
  579. if (di_flags & XFS_DIFLAG_NODEFRAG)
  580. flags |= FS_XFLAG_NODEFRAG;
  581. if (di_flags & XFS_DIFLAG_FILESTREAM)
  582. flags |= FS_XFLAG_FILESTREAM;
  583. }
  584. if (di_flags2 & XFS_DIFLAG2_ANY) {
  585. if (di_flags2 & XFS_DIFLAG2_DAX)
  586. flags |= FS_XFLAG_DAX;
  587. }
  588. if (has_attr)
  589. flags |= FS_XFLAG_HASATTR;
  590. return flags;
  591. }
  592. uint
  593. xfs_ip2xflags(
  594. struct xfs_inode *ip)
  595. {
  596. struct xfs_icdinode *dic = &ip->i_d;
  597. return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
  598. }
  599. uint
  600. xfs_dic2xflags(
  601. struct xfs_dinode *dip)
  602. {
  603. return _xfs_dic2xflags(be16_to_cpu(dip->di_flags),
  604. be64_to_cpu(dip->di_flags2), XFS_DFORK_Q(dip));
  605. }
  606. /*
  607. * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
  608. * is allowed, otherwise it has to be an exact match. If a CI match is found,
  609. * ci_name->name will point to a the actual name (caller must free) or
  610. * will be set to NULL if an exact match is found.
  611. */
  612. int
  613. xfs_lookup(
  614. xfs_inode_t *dp,
  615. struct xfs_name *name,
  616. xfs_inode_t **ipp,
  617. struct xfs_name *ci_name)
  618. {
  619. xfs_ino_t inum;
  620. int error;
  621. trace_xfs_lookup(dp, name);
  622. if (XFS_FORCED_SHUTDOWN(dp->i_mount))
  623. return -EIO;
  624. xfs_ilock(dp, XFS_IOLOCK_SHARED);
  625. error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
  626. if (error)
  627. goto out_unlock;
  628. error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
  629. if (error)
  630. goto out_free_name;
  631. xfs_iunlock(dp, XFS_IOLOCK_SHARED);
  632. return 0;
  633. out_free_name:
  634. if (ci_name)
  635. kmem_free(ci_name->name);
  636. out_unlock:
  637. xfs_iunlock(dp, XFS_IOLOCK_SHARED);
  638. *ipp = NULL;
  639. return error;
  640. }
  641. /*
  642. * Allocate an inode on disk and return a copy of its in-core version.
  643. * The in-core inode is locked exclusively. Set mode, nlink, and rdev
  644. * appropriately within the inode. The uid and gid for the inode are
  645. * set according to the contents of the given cred structure.
  646. *
  647. * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
  648. * has a free inode available, call xfs_iget() to obtain the in-core
  649. * version of the allocated inode. Finally, fill in the inode and
  650. * log its initial contents. In this case, ialloc_context would be
  651. * set to NULL.
  652. *
  653. * If xfs_dialloc() does not have an available inode, it will replenish
  654. * its supply by doing an allocation. Since we can only do one
  655. * allocation within a transaction without deadlocks, we must commit
  656. * the current transaction before returning the inode itself.
  657. * In this case, therefore, we will set ialloc_context and return.
  658. * The caller should then commit the current transaction, start a new
  659. * transaction, and call xfs_ialloc() again to actually get the inode.
  660. *
  661. * To ensure that some other process does not grab the inode that
  662. * was allocated during the first call to xfs_ialloc(), this routine
  663. * also returns the [locked] bp pointing to the head of the freelist
  664. * as ialloc_context. The caller should hold this buffer across
  665. * the commit and pass it back into this routine on the second call.
  666. *
  667. * If we are allocating quota inodes, we do not have a parent inode
  668. * to attach to or associate with (i.e. pip == NULL) because they
  669. * are not linked into the directory structure - they are attached
  670. * directly to the superblock - and so have no parent.
  671. */
  672. int
  673. xfs_ialloc(
  674. xfs_trans_t *tp,
  675. xfs_inode_t *pip,
  676. umode_t mode,
  677. xfs_nlink_t nlink,
  678. xfs_dev_t rdev,
  679. prid_t prid,
  680. int okalloc,
  681. xfs_buf_t **ialloc_context,
  682. xfs_inode_t **ipp)
  683. {
  684. struct xfs_mount *mp = tp->t_mountp;
  685. xfs_ino_t ino;
  686. xfs_inode_t *ip;
  687. uint flags;
  688. int error;
  689. struct timespec tv;
  690. /*
  691. * Call the space management code to pick
  692. * the on-disk inode to be allocated.
  693. */
  694. error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
  695. ialloc_context, &ino);
  696. if (error)
  697. return error;
  698. if (*ialloc_context || ino == NULLFSINO) {
  699. *ipp = NULL;
  700. return 0;
  701. }
  702. ASSERT(*ialloc_context == NULL);
  703. /*
  704. * Get the in-core inode with the lock held exclusively.
  705. * This is because we're setting fields here we need
  706. * to prevent others from looking at until we're done.
  707. */
  708. error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
  709. XFS_ILOCK_EXCL, &ip);
  710. if (error)
  711. return error;
  712. ASSERT(ip != NULL);
  713. /*
  714. * We always convert v1 inodes to v2 now - we only support filesystems
  715. * with >= v2 inode capability, so there is no reason for ever leaving
  716. * an inode in v1 format.
  717. */
  718. if (ip->i_d.di_version == 1)
  719. ip->i_d.di_version = 2;
  720. ip->i_d.di_mode = mode;
  721. ip->i_d.di_onlink = 0;
  722. ip->i_d.di_nlink = nlink;
  723. ASSERT(ip->i_d.di_nlink == nlink);
  724. ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
  725. ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
  726. xfs_set_projid(ip, prid);
  727. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  728. if (pip && XFS_INHERIT_GID(pip)) {
  729. ip->i_d.di_gid = pip->i_d.di_gid;
  730. if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
  731. ip->i_d.di_mode |= S_ISGID;
  732. }
  733. }
  734. /*
  735. * If the group ID of the new file does not match the effective group
  736. * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
  737. * (and only if the irix_sgid_inherit compatibility variable is set).
  738. */
  739. if ((irix_sgid_inherit) &&
  740. (ip->i_d.di_mode & S_ISGID) &&
  741. (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) {
  742. ip->i_d.di_mode &= ~S_ISGID;
  743. }
  744. ip->i_d.di_size = 0;
  745. ip->i_d.di_nextents = 0;
  746. ASSERT(ip->i_d.di_nblocks == 0);
  747. tv = current_fs_time(mp->m_super);
  748. ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
  749. ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
  750. ip->i_d.di_atime = ip->i_d.di_mtime;
  751. ip->i_d.di_ctime = ip->i_d.di_mtime;
  752. /*
  753. * di_gen will have been taken care of in xfs_iread.
  754. */
  755. ip->i_d.di_extsize = 0;
  756. ip->i_d.di_dmevmask = 0;
  757. ip->i_d.di_dmstate = 0;
  758. ip->i_d.di_flags = 0;
  759. if (ip->i_d.di_version == 3) {
  760. ASSERT(ip->i_d.di_ino == ino);
  761. ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid));
  762. ip->i_d.di_crc = 0;
  763. ip->i_d.di_changecount = 1;
  764. ip->i_d.di_lsn = 0;
  765. ip->i_d.di_flags2 = 0;
  766. memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
  767. ip->i_d.di_crtime = ip->i_d.di_mtime;
  768. }
  769. flags = XFS_ILOG_CORE;
  770. switch (mode & S_IFMT) {
  771. case S_IFIFO:
  772. case S_IFCHR:
  773. case S_IFBLK:
  774. case S_IFSOCK:
  775. ip->i_d.di_format = XFS_DINODE_FMT_DEV;
  776. ip->i_df.if_u2.if_rdev = rdev;
  777. ip->i_df.if_flags = 0;
  778. flags |= XFS_ILOG_DEV;
  779. break;
  780. case S_IFREG:
  781. case S_IFDIR:
  782. if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
  783. uint64_t di_flags2 = 0;
  784. uint di_flags = 0;
  785. if (S_ISDIR(mode)) {
  786. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  787. di_flags |= XFS_DIFLAG_RTINHERIT;
  788. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  789. di_flags |= XFS_DIFLAG_EXTSZINHERIT;
  790. ip->i_d.di_extsize = pip->i_d.di_extsize;
  791. }
  792. if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
  793. di_flags |= XFS_DIFLAG_PROJINHERIT;
  794. } else if (S_ISREG(mode)) {
  795. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  796. di_flags |= XFS_DIFLAG_REALTIME;
  797. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  798. di_flags |= XFS_DIFLAG_EXTSIZE;
  799. ip->i_d.di_extsize = pip->i_d.di_extsize;
  800. }
  801. }
  802. if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
  803. xfs_inherit_noatime)
  804. di_flags |= XFS_DIFLAG_NOATIME;
  805. if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
  806. xfs_inherit_nodump)
  807. di_flags |= XFS_DIFLAG_NODUMP;
  808. if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
  809. xfs_inherit_sync)
  810. di_flags |= XFS_DIFLAG_SYNC;
  811. if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
  812. xfs_inherit_nosymlinks)
  813. di_flags |= XFS_DIFLAG_NOSYMLINKS;
  814. if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
  815. xfs_inherit_nodefrag)
  816. di_flags |= XFS_DIFLAG_NODEFRAG;
  817. if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
  818. di_flags |= XFS_DIFLAG_FILESTREAM;
  819. if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
  820. di_flags2 |= XFS_DIFLAG2_DAX;
  821. ip->i_d.di_flags |= di_flags;
  822. ip->i_d.di_flags2 |= di_flags2;
  823. }
  824. /* FALLTHROUGH */
  825. case S_IFLNK:
  826. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  827. ip->i_df.if_flags = XFS_IFEXTENTS;
  828. ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
  829. ip->i_df.if_u1.if_extents = NULL;
  830. break;
  831. default:
  832. ASSERT(0);
  833. }
  834. /*
  835. * Attribute fork settings for new inode.
  836. */
  837. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  838. ip->i_d.di_anextents = 0;
  839. /*
  840. * Log the new values stuffed into the inode.
  841. */
  842. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  843. xfs_trans_log_inode(tp, ip, flags);
  844. /* now that we have an i_mode we can setup the inode structure */
  845. xfs_setup_inode(ip);
  846. *ipp = ip;
  847. return 0;
  848. }
  849. /*
  850. * Allocates a new inode from disk and return a pointer to the
  851. * incore copy. This routine will internally commit the current
  852. * transaction and allocate a new one if the Space Manager needed
  853. * to do an allocation to replenish the inode free-list.
  854. *
  855. * This routine is designed to be called from xfs_create and
  856. * xfs_create_dir.
  857. *
  858. */
  859. int
  860. xfs_dir_ialloc(
  861. xfs_trans_t **tpp, /* input: current transaction;
  862. output: may be a new transaction. */
  863. xfs_inode_t *dp, /* directory within whose allocate
  864. the inode. */
  865. umode_t mode,
  866. xfs_nlink_t nlink,
  867. xfs_dev_t rdev,
  868. prid_t prid, /* project id */
  869. int okalloc, /* ok to allocate new space */
  870. xfs_inode_t **ipp, /* pointer to inode; it will be
  871. locked. */
  872. int *committed)
  873. {
  874. xfs_trans_t *tp;
  875. xfs_inode_t *ip;
  876. xfs_buf_t *ialloc_context = NULL;
  877. int code;
  878. void *dqinfo;
  879. uint tflags;
  880. tp = *tpp;
  881. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  882. /*
  883. * xfs_ialloc will return a pointer to an incore inode if
  884. * the Space Manager has an available inode on the free
  885. * list. Otherwise, it will do an allocation and replenish
  886. * the freelist. Since we can only do one allocation per
  887. * transaction without deadlocks, we will need to commit the
  888. * current transaction and start a new one. We will then
  889. * need to call xfs_ialloc again to get the inode.
  890. *
  891. * If xfs_ialloc did an allocation to replenish the freelist,
  892. * it returns the bp containing the head of the freelist as
  893. * ialloc_context. We will hold a lock on it across the
  894. * transaction commit so that no other process can steal
  895. * the inode(s) that we've just allocated.
  896. */
  897. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
  898. &ialloc_context, &ip);
  899. /*
  900. * Return an error if we were unable to allocate a new inode.
  901. * This should only happen if we run out of space on disk or
  902. * encounter a disk error.
  903. */
  904. if (code) {
  905. *ipp = NULL;
  906. return code;
  907. }
  908. if (!ialloc_context && !ip) {
  909. *ipp = NULL;
  910. return -ENOSPC;
  911. }
  912. /*
  913. * If the AGI buffer is non-NULL, then we were unable to get an
  914. * inode in one operation. We need to commit the current
  915. * transaction and call xfs_ialloc() again. It is guaranteed
  916. * to succeed the second time.
  917. */
  918. if (ialloc_context) {
  919. /*
  920. * Normally, xfs_trans_commit releases all the locks.
  921. * We call bhold to hang on to the ialloc_context across
  922. * the commit. Holding this buffer prevents any other
  923. * processes from doing any allocations in this
  924. * allocation group.
  925. */
  926. xfs_trans_bhold(tp, ialloc_context);
  927. /*
  928. * We want the quota changes to be associated with the next
  929. * transaction, NOT this one. So, detach the dqinfo from this
  930. * and attach it to the next transaction.
  931. */
  932. dqinfo = NULL;
  933. tflags = 0;
  934. if (tp->t_dqinfo) {
  935. dqinfo = (void *)tp->t_dqinfo;
  936. tp->t_dqinfo = NULL;
  937. tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
  938. tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
  939. }
  940. code = xfs_trans_roll(&tp, 0);
  941. if (committed != NULL)
  942. *committed = 1;
  943. /*
  944. * Re-attach the quota info that we detached from prev trx.
  945. */
  946. if (dqinfo) {
  947. tp->t_dqinfo = dqinfo;
  948. tp->t_flags |= tflags;
  949. }
  950. if (code) {
  951. xfs_buf_relse(ialloc_context);
  952. *tpp = tp;
  953. *ipp = NULL;
  954. return code;
  955. }
  956. xfs_trans_bjoin(tp, ialloc_context);
  957. /*
  958. * Call ialloc again. Since we've locked out all
  959. * other allocations in this allocation group,
  960. * this call should always succeed.
  961. */
  962. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
  963. okalloc, &ialloc_context, &ip);
  964. /*
  965. * If we get an error at this point, return to the caller
  966. * so that the current transaction can be aborted.
  967. */
  968. if (code) {
  969. *tpp = tp;
  970. *ipp = NULL;
  971. return code;
  972. }
  973. ASSERT(!ialloc_context && ip);
  974. } else {
  975. if (committed != NULL)
  976. *committed = 0;
  977. }
  978. *ipp = ip;
  979. *tpp = tp;
  980. return 0;
  981. }
  982. /*
  983. * Decrement the link count on an inode & log the change.
  984. * If this causes the link count to go to zero, initiate the
  985. * logging activity required to truncate a file.
  986. */
  987. int /* error */
  988. xfs_droplink(
  989. xfs_trans_t *tp,
  990. xfs_inode_t *ip)
  991. {
  992. int error;
  993. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  994. ASSERT (ip->i_d.di_nlink > 0);
  995. ip->i_d.di_nlink--;
  996. drop_nlink(VFS_I(ip));
  997. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  998. error = 0;
  999. if (ip->i_d.di_nlink == 0) {
  1000. /*
  1001. * We're dropping the last link to this file.
  1002. * Move the on-disk inode to the AGI unlinked list.
  1003. * From xfs_inactive() we will pull the inode from
  1004. * the list and free it.
  1005. */
  1006. error = xfs_iunlink(tp, ip);
  1007. }
  1008. return error;
  1009. }
  1010. /*
  1011. * Increment the link count on an inode & log the change.
  1012. */
  1013. int
  1014. xfs_bumplink(
  1015. xfs_trans_t *tp,
  1016. xfs_inode_t *ip)
  1017. {
  1018. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  1019. ASSERT(ip->i_d.di_version > 1);
  1020. ASSERT(ip->i_d.di_nlink > 0 || (VFS_I(ip)->i_state & I_LINKABLE));
  1021. ip->i_d.di_nlink++;
  1022. inc_nlink(VFS_I(ip));
  1023. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1024. return 0;
  1025. }
  1026. int
  1027. xfs_create(
  1028. xfs_inode_t *dp,
  1029. struct xfs_name *name,
  1030. umode_t mode,
  1031. xfs_dev_t rdev,
  1032. xfs_inode_t **ipp)
  1033. {
  1034. int is_dir = S_ISDIR(mode);
  1035. struct xfs_mount *mp = dp->i_mount;
  1036. struct xfs_inode *ip = NULL;
  1037. struct xfs_trans *tp = NULL;
  1038. int error;
  1039. xfs_bmap_free_t free_list;
  1040. xfs_fsblock_t first_block;
  1041. bool unlock_dp_on_error = false;
  1042. prid_t prid;
  1043. struct xfs_dquot *udqp = NULL;
  1044. struct xfs_dquot *gdqp = NULL;
  1045. struct xfs_dquot *pdqp = NULL;
  1046. struct xfs_trans_res *tres;
  1047. uint resblks;
  1048. trace_xfs_create(dp, name);
  1049. if (XFS_FORCED_SHUTDOWN(mp))
  1050. return -EIO;
  1051. prid = xfs_get_initial_prid(dp);
  1052. /*
  1053. * Make sure that we have allocated dquot(s) on disk.
  1054. */
  1055. error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
  1056. xfs_kgid_to_gid(current_fsgid()), prid,
  1057. XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
  1058. &udqp, &gdqp, &pdqp);
  1059. if (error)
  1060. return error;
  1061. if (is_dir) {
  1062. rdev = 0;
  1063. resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
  1064. tres = &M_RES(mp)->tr_mkdir;
  1065. tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
  1066. } else {
  1067. resblks = XFS_CREATE_SPACE_RES(mp, name->len);
  1068. tres = &M_RES(mp)->tr_create;
  1069. tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
  1070. }
  1071. /*
  1072. * Initially assume that the file does not exist and
  1073. * reserve the resources for that case. If that is not
  1074. * the case we'll drop the one we have and get a more
  1075. * appropriate transaction later.
  1076. */
  1077. error = xfs_trans_reserve(tp, tres, resblks, 0);
  1078. if (error == -ENOSPC) {
  1079. /* flush outstanding delalloc blocks and retry */
  1080. xfs_flush_inodes(mp);
  1081. error = xfs_trans_reserve(tp, tres, resblks, 0);
  1082. }
  1083. if (error == -ENOSPC) {
  1084. /* No space at all so try a "no-allocation" reservation */
  1085. resblks = 0;
  1086. error = xfs_trans_reserve(tp, tres, 0, 0);
  1087. }
  1088. if (error)
  1089. goto out_trans_cancel;
  1090. xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
  1091. XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
  1092. unlock_dp_on_error = true;
  1093. xfs_bmap_init(&free_list, &first_block);
  1094. /*
  1095. * Reserve disk quota and the inode.
  1096. */
  1097. error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
  1098. pdqp, resblks, 1, 0);
  1099. if (error)
  1100. goto out_trans_cancel;
  1101. if (!resblks) {
  1102. error = xfs_dir_canenter(tp, dp, name);
  1103. if (error)
  1104. goto out_trans_cancel;
  1105. }
  1106. /*
  1107. * A newly created regular or special file just has one directory
  1108. * entry pointing to them, but a directory also the "." entry
  1109. * pointing to itself.
  1110. */
  1111. error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
  1112. prid, resblks > 0, &ip, NULL);
  1113. if (error)
  1114. goto out_trans_cancel;
  1115. /*
  1116. * Now we join the directory inode to the transaction. We do not do it
  1117. * earlier because xfs_dir_ialloc might commit the previous transaction
  1118. * (and release all the locks). An error from here on will result in
  1119. * the transaction cancel unlocking dp so don't do it explicitly in the
  1120. * error path.
  1121. */
  1122. xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  1123. unlock_dp_on_error = false;
  1124. error = xfs_dir_createname(tp, dp, name, ip->i_ino,
  1125. &first_block, &free_list, resblks ?
  1126. resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
  1127. if (error) {
  1128. ASSERT(error != -ENOSPC);
  1129. goto out_trans_cancel;
  1130. }
  1131. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1132. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  1133. if (is_dir) {
  1134. error = xfs_dir_init(tp, ip, dp);
  1135. if (error)
  1136. goto out_bmap_cancel;
  1137. error = xfs_bumplink(tp, dp);
  1138. if (error)
  1139. goto out_bmap_cancel;
  1140. }
  1141. /*
  1142. * If this is a synchronous mount, make sure that the
  1143. * create transaction goes to disk before returning to
  1144. * the user.
  1145. */
  1146. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  1147. xfs_trans_set_sync(tp);
  1148. /*
  1149. * Attach the dquot(s) to the inodes and modify them incore.
  1150. * These ids of the inode couldn't have changed since the new
  1151. * inode has been locked ever since it was created.
  1152. */
  1153. xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
  1154. error = xfs_bmap_finish(&tp, &free_list, NULL);
  1155. if (error)
  1156. goto out_bmap_cancel;
  1157. error = xfs_trans_commit(tp);
  1158. if (error)
  1159. goto out_release_inode;
  1160. xfs_qm_dqrele(udqp);
  1161. xfs_qm_dqrele(gdqp);
  1162. xfs_qm_dqrele(pdqp);
  1163. *ipp = ip;
  1164. return 0;
  1165. out_bmap_cancel:
  1166. xfs_bmap_cancel(&free_list);
  1167. out_trans_cancel:
  1168. xfs_trans_cancel(tp);
  1169. out_release_inode:
  1170. /*
  1171. * Wait until after the current transaction is aborted to finish the
  1172. * setup of the inode and release the inode. This prevents recursive
  1173. * transactions and deadlocks from xfs_inactive.
  1174. */
  1175. if (ip) {
  1176. xfs_finish_inode_setup(ip);
  1177. IRELE(ip);
  1178. }
  1179. xfs_qm_dqrele(udqp);
  1180. xfs_qm_dqrele(gdqp);
  1181. xfs_qm_dqrele(pdqp);
  1182. if (unlock_dp_on_error)
  1183. xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  1184. return error;
  1185. }
  1186. int
  1187. xfs_create_tmpfile(
  1188. struct xfs_inode *dp,
  1189. struct dentry *dentry,
  1190. umode_t mode,
  1191. struct xfs_inode **ipp)
  1192. {
  1193. struct xfs_mount *mp = dp->i_mount;
  1194. struct xfs_inode *ip = NULL;
  1195. struct xfs_trans *tp = NULL;
  1196. int error;
  1197. prid_t prid;
  1198. struct xfs_dquot *udqp = NULL;
  1199. struct xfs_dquot *gdqp = NULL;
  1200. struct xfs_dquot *pdqp = NULL;
  1201. struct xfs_trans_res *tres;
  1202. uint resblks;
  1203. if (XFS_FORCED_SHUTDOWN(mp))
  1204. return -EIO;
  1205. prid = xfs_get_initial_prid(dp);
  1206. /*
  1207. * Make sure that we have allocated dquot(s) on disk.
  1208. */
  1209. error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
  1210. xfs_kgid_to_gid(current_fsgid()), prid,
  1211. XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
  1212. &udqp, &gdqp, &pdqp);
  1213. if (error)
  1214. return error;
  1215. resblks = XFS_IALLOC_SPACE_RES(mp);
  1216. tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE_TMPFILE);
  1217. tres = &M_RES(mp)->tr_create_tmpfile;
  1218. error = xfs_trans_reserve(tp, tres, resblks, 0);
  1219. if (error == -ENOSPC) {
  1220. /* No space at all so try a "no-allocation" reservation */
  1221. resblks = 0;
  1222. error = xfs_trans_reserve(tp, tres, 0, 0);
  1223. }
  1224. if (error)
  1225. goto out_trans_cancel;
  1226. error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
  1227. pdqp, resblks, 1, 0);
  1228. if (error)
  1229. goto out_trans_cancel;
  1230. error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
  1231. prid, resblks > 0, &ip, NULL);
  1232. if (error)
  1233. goto out_trans_cancel;
  1234. if (mp->m_flags & XFS_MOUNT_WSYNC)
  1235. xfs_trans_set_sync(tp);
  1236. /*
  1237. * Attach the dquot(s) to the inodes and modify them incore.
  1238. * These ids of the inode couldn't have changed since the new
  1239. * inode has been locked ever since it was created.
  1240. */
  1241. xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
  1242. ip->i_d.di_nlink--;
  1243. error = xfs_iunlink(tp, ip);
  1244. if (error)
  1245. goto out_trans_cancel;
  1246. error = xfs_trans_commit(tp);
  1247. if (error)
  1248. goto out_release_inode;
  1249. xfs_qm_dqrele(udqp);
  1250. xfs_qm_dqrele(gdqp);
  1251. xfs_qm_dqrele(pdqp);
  1252. *ipp = ip;
  1253. return 0;
  1254. out_trans_cancel:
  1255. xfs_trans_cancel(tp);
  1256. out_release_inode:
  1257. /*
  1258. * Wait until after the current transaction is aborted to finish the
  1259. * setup of the inode and release the inode. This prevents recursive
  1260. * transactions and deadlocks from xfs_inactive.
  1261. */
  1262. if (ip) {
  1263. xfs_finish_inode_setup(ip);
  1264. IRELE(ip);
  1265. }
  1266. xfs_qm_dqrele(udqp);
  1267. xfs_qm_dqrele(gdqp);
  1268. xfs_qm_dqrele(pdqp);
  1269. return error;
  1270. }
  1271. int
  1272. xfs_link(
  1273. xfs_inode_t *tdp,
  1274. xfs_inode_t *sip,
  1275. struct xfs_name *target_name)
  1276. {
  1277. xfs_mount_t *mp = tdp->i_mount;
  1278. xfs_trans_t *tp;
  1279. int error;
  1280. xfs_bmap_free_t free_list;
  1281. xfs_fsblock_t first_block;
  1282. int resblks;
  1283. trace_xfs_link(tdp, target_name);
  1284. ASSERT(!S_ISDIR(sip->i_d.di_mode));
  1285. if (XFS_FORCED_SHUTDOWN(mp))
  1286. return -EIO;
  1287. error = xfs_qm_dqattach(sip, 0);
  1288. if (error)
  1289. goto std_return;
  1290. error = xfs_qm_dqattach(tdp, 0);
  1291. if (error)
  1292. goto std_return;
  1293. tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
  1294. resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
  1295. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
  1296. if (error == -ENOSPC) {
  1297. resblks = 0;
  1298. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
  1299. }
  1300. if (error)
  1301. goto error_return;
  1302. xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  1303. xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
  1304. xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
  1305. xfs_trans_ijoin(tp, tdp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  1306. /*
  1307. * If we are using project inheritance, we only allow hard link
  1308. * creation in our tree when the project IDs are the same; else
  1309. * the tree quota mechanism could be circumvented.
  1310. */
  1311. if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  1312. (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
  1313. error = -EXDEV;
  1314. goto error_return;
  1315. }
  1316. if (!resblks) {
  1317. error = xfs_dir_canenter(tp, tdp, target_name);
  1318. if (error)
  1319. goto error_return;
  1320. }
  1321. xfs_bmap_init(&free_list, &first_block);
  1322. if (sip->i_d.di_nlink == 0) {
  1323. error = xfs_iunlink_remove(tp, sip);
  1324. if (error)
  1325. goto error_return;
  1326. }
  1327. error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
  1328. &first_block, &free_list, resblks);
  1329. if (error)
  1330. goto error_return;
  1331. xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1332. xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
  1333. error = xfs_bumplink(tp, sip);
  1334. if (error)
  1335. goto error_return;
  1336. /*
  1337. * If this is a synchronous mount, make sure that the
  1338. * link transaction goes to disk before returning to
  1339. * the user.
  1340. */
  1341. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  1342. xfs_trans_set_sync(tp);
  1343. error = xfs_bmap_finish(&tp, &free_list, NULL);
  1344. if (error) {
  1345. xfs_bmap_cancel(&free_list);
  1346. goto error_return;
  1347. }
  1348. return xfs_trans_commit(tp);
  1349. error_return:
  1350. xfs_trans_cancel(tp);
  1351. std_return:
  1352. return error;
  1353. }
  1354. /*
  1355. * Free up the underlying blocks past new_size. The new size must be smaller
  1356. * than the current size. This routine can be used both for the attribute and
  1357. * data fork, and does not modify the inode size, which is left to the caller.
  1358. *
  1359. * The transaction passed to this routine must have made a permanent log
  1360. * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
  1361. * given transaction and start new ones, so make sure everything involved in
  1362. * the transaction is tidy before calling here. Some transaction will be
  1363. * returned to the caller to be committed. The incoming transaction must
  1364. * already include the inode, and both inode locks must be held exclusively.
  1365. * The inode must also be "held" within the transaction. On return the inode
  1366. * will be "held" within the returned transaction. This routine does NOT
  1367. * require any disk space to be reserved for it within the transaction.
  1368. *
  1369. * If we get an error, we must return with the inode locked and linked into the
  1370. * current transaction. This keeps things simple for the higher level code,
  1371. * because it always knows that the inode is locked and held in the transaction
  1372. * that returns to it whether errors occur or not. We don't mark the inode
  1373. * dirty on error so that transactions can be easily aborted if possible.
  1374. */
  1375. int
  1376. xfs_itruncate_extents(
  1377. struct xfs_trans **tpp,
  1378. struct xfs_inode *ip,
  1379. int whichfork,
  1380. xfs_fsize_t new_size)
  1381. {
  1382. struct xfs_mount *mp = ip->i_mount;
  1383. struct xfs_trans *tp = *tpp;
  1384. xfs_bmap_free_t free_list;
  1385. xfs_fsblock_t first_block;
  1386. xfs_fileoff_t first_unmap_block;
  1387. xfs_fileoff_t last_block;
  1388. xfs_filblks_t unmap_len;
  1389. int error = 0;
  1390. int done = 0;
  1391. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1392. ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
  1393. xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1394. ASSERT(new_size <= XFS_ISIZE(ip));
  1395. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  1396. ASSERT(ip->i_itemp != NULL);
  1397. ASSERT(ip->i_itemp->ili_lock_flags == 0);
  1398. ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
  1399. trace_xfs_itruncate_extents_start(ip, new_size);
  1400. /*
  1401. * Since it is possible for space to become allocated beyond
  1402. * the end of the file (in a crash where the space is allocated
  1403. * but the inode size is not yet updated), simply remove any
  1404. * blocks which show up between the new EOF and the maximum
  1405. * possible file size. If the first block to be removed is
  1406. * beyond the maximum file size (ie it is the same as last_block),
  1407. * then there is nothing to do.
  1408. */
  1409. first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
  1410. last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  1411. if (first_unmap_block == last_block)
  1412. return 0;
  1413. ASSERT(first_unmap_block < last_block);
  1414. unmap_len = last_block - first_unmap_block + 1;
  1415. while (!done) {
  1416. xfs_bmap_init(&free_list, &first_block);
  1417. error = xfs_bunmapi(tp, ip,
  1418. first_unmap_block, unmap_len,
  1419. xfs_bmapi_aflag(whichfork),
  1420. XFS_ITRUNC_MAX_EXTENTS,
  1421. &first_block, &free_list,
  1422. &done);
  1423. if (error)
  1424. goto out_bmap_cancel;
  1425. /*
  1426. * Duplicate the transaction that has the permanent
  1427. * reservation and commit the old transaction.
  1428. */
  1429. error = xfs_bmap_finish(&tp, &free_list, ip);
  1430. if (error)
  1431. goto out_bmap_cancel;
  1432. error = xfs_trans_roll(&tp, ip);
  1433. if (error)
  1434. goto out;
  1435. }
  1436. /*
  1437. * Always re-log the inode so that our permanent transaction can keep
  1438. * on rolling it forward in the log.
  1439. */
  1440. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1441. trace_xfs_itruncate_extents_end(ip, new_size);
  1442. out:
  1443. *tpp = tp;
  1444. return error;
  1445. out_bmap_cancel:
  1446. /*
  1447. * If the bunmapi call encounters an error, return to the caller where
  1448. * the transaction can be properly aborted. We just need to make sure
  1449. * we're not holding any resources that we were not when we came in.
  1450. */
  1451. xfs_bmap_cancel(&free_list);
  1452. goto out;
  1453. }
  1454. int
  1455. xfs_release(
  1456. xfs_inode_t *ip)
  1457. {
  1458. xfs_mount_t *mp = ip->i_mount;
  1459. int error;
  1460. if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
  1461. return 0;
  1462. /* If this is a read-only mount, don't do this (would generate I/O) */
  1463. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1464. return 0;
  1465. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1466. int truncated;
  1467. /*
  1468. * If we previously truncated this file and removed old data
  1469. * in the process, we want to initiate "early" writeout on
  1470. * the last close. This is an attempt to combat the notorious
  1471. * NULL files problem which is particularly noticeable from a
  1472. * truncate down, buffered (re-)write (delalloc), followed by
  1473. * a crash. What we are effectively doing here is
  1474. * significantly reducing the time window where we'd otherwise
  1475. * be exposed to that problem.
  1476. */
  1477. truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
  1478. if (truncated) {
  1479. xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
  1480. if (ip->i_delayed_blks > 0) {
  1481. error = filemap_flush(VFS_I(ip)->i_mapping);
  1482. if (error)
  1483. return error;
  1484. }
  1485. }
  1486. }
  1487. if (ip->i_d.di_nlink == 0)
  1488. return 0;
  1489. if (xfs_can_free_eofblocks(ip, false)) {
  1490. /*
  1491. * If we can't get the iolock just skip truncating the blocks
  1492. * past EOF because we could deadlock with the mmap_sem
  1493. * otherwise. We'll get another chance to drop them once the
  1494. * last reference to the inode is dropped, so we'll never leak
  1495. * blocks permanently.
  1496. *
  1497. * Further, check if the inode is being opened, written and
  1498. * closed frequently and we have delayed allocation blocks
  1499. * outstanding (e.g. streaming writes from the NFS server),
  1500. * truncating the blocks past EOF will cause fragmentation to
  1501. * occur.
  1502. *
  1503. * In this case don't do the truncation, either, but we have to
  1504. * be careful how we detect this case. Blocks beyond EOF show
  1505. * up as i_delayed_blks even when the inode is clean, so we
  1506. * need to truncate them away first before checking for a dirty
  1507. * release. Hence on the first dirty close we will still remove
  1508. * the speculative allocation, but after that we will leave it
  1509. * in place.
  1510. */
  1511. if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
  1512. return 0;
  1513. error = xfs_free_eofblocks(mp, ip, true);
  1514. if (error && error != -EAGAIN)
  1515. return error;
  1516. /* delalloc blocks after truncation means it really is dirty */
  1517. if (ip->i_delayed_blks)
  1518. xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
  1519. }
  1520. return 0;
  1521. }
  1522. /*
  1523. * xfs_inactive_truncate
  1524. *
  1525. * Called to perform a truncate when an inode becomes unlinked.
  1526. */
  1527. STATIC int
  1528. xfs_inactive_truncate(
  1529. struct xfs_inode *ip)
  1530. {
  1531. struct xfs_mount *mp = ip->i_mount;
  1532. struct xfs_trans *tp;
  1533. int error;
  1534. tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
  1535. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
  1536. if (error) {
  1537. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1538. xfs_trans_cancel(tp);
  1539. return error;
  1540. }
  1541. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1542. xfs_trans_ijoin(tp, ip, 0);
  1543. /*
  1544. * Log the inode size first to prevent stale data exposure in the event
  1545. * of a system crash before the truncate completes. See the related
  1546. * comment in xfs_setattr_size() for details.
  1547. */
  1548. ip->i_d.di_size = 0;
  1549. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1550. error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
  1551. if (error)
  1552. goto error_trans_cancel;
  1553. ASSERT(ip->i_d.di_nextents == 0);
  1554. error = xfs_trans_commit(tp);
  1555. if (error)
  1556. goto error_unlock;
  1557. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1558. return 0;
  1559. error_trans_cancel:
  1560. xfs_trans_cancel(tp);
  1561. error_unlock:
  1562. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1563. return error;
  1564. }
  1565. /*
  1566. * xfs_inactive_ifree()
  1567. *
  1568. * Perform the inode free when an inode is unlinked.
  1569. */
  1570. STATIC int
  1571. xfs_inactive_ifree(
  1572. struct xfs_inode *ip)
  1573. {
  1574. xfs_bmap_free_t free_list;
  1575. xfs_fsblock_t first_block;
  1576. struct xfs_mount *mp = ip->i_mount;
  1577. struct xfs_trans *tp;
  1578. int error;
  1579. tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
  1580. /*
  1581. * The ifree transaction might need to allocate blocks for record
  1582. * insertion to the finobt. We don't want to fail here at ENOSPC, so
  1583. * allow ifree to dip into the reserved block pool if necessary.
  1584. *
  1585. * Freeing large sets of inodes generally means freeing inode chunks,
  1586. * directory and file data blocks, so this should be relatively safe.
  1587. * Only under severe circumstances should it be possible to free enough
  1588. * inodes to exhaust the reserve block pool via finobt expansion while
  1589. * at the same time not creating free space in the filesystem.
  1590. *
  1591. * Send a warning if the reservation does happen to fail, as the inode
  1592. * now remains allocated and sits on the unlinked list until the fs is
  1593. * repaired.
  1594. */
  1595. tp->t_flags |= XFS_TRANS_RESERVE;
  1596. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree,
  1597. XFS_IFREE_SPACE_RES(mp), 0);
  1598. if (error) {
  1599. if (error == -ENOSPC) {
  1600. xfs_warn_ratelimited(mp,
  1601. "Failed to remove inode(s) from unlinked list. "
  1602. "Please free space, unmount and run xfs_repair.");
  1603. } else {
  1604. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1605. }
  1606. xfs_trans_cancel(tp);
  1607. return error;
  1608. }
  1609. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1610. xfs_trans_ijoin(tp, ip, 0);
  1611. xfs_bmap_init(&free_list, &first_block);
  1612. error = xfs_ifree(tp, ip, &free_list);
  1613. if (error) {
  1614. /*
  1615. * If we fail to free the inode, shut down. The cancel
  1616. * might do that, we need to make sure. Otherwise the
  1617. * inode might be lost for a long time or forever.
  1618. */
  1619. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1620. xfs_notice(mp, "%s: xfs_ifree returned error %d",
  1621. __func__, error);
  1622. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  1623. }
  1624. xfs_trans_cancel(tp);
  1625. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1626. return error;
  1627. }
  1628. /*
  1629. * Credit the quota account(s). The inode is gone.
  1630. */
  1631. xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
  1632. /*
  1633. * Just ignore errors at this point. There is nothing we can do except
  1634. * to try to keep going. Make sure it's not a silent error.
  1635. */
  1636. error = xfs_bmap_finish(&tp, &free_list, NULL);
  1637. if (error) {
  1638. xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
  1639. __func__, error);
  1640. xfs_bmap_cancel(&free_list);
  1641. }
  1642. error = xfs_trans_commit(tp);
  1643. if (error)
  1644. xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
  1645. __func__, error);
  1646. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1647. return 0;
  1648. }
  1649. /*
  1650. * xfs_inactive
  1651. *
  1652. * This is called when the vnode reference count for the vnode
  1653. * goes to zero. If the file has been unlinked, then it must
  1654. * now be truncated. Also, we clear all of the read-ahead state
  1655. * kept for the inode here since the file is now closed.
  1656. */
  1657. void
  1658. xfs_inactive(
  1659. xfs_inode_t *ip)
  1660. {
  1661. struct xfs_mount *mp;
  1662. int error;
  1663. int truncate = 0;
  1664. /*
  1665. * If the inode is already free, then there can be nothing
  1666. * to clean up here.
  1667. */
  1668. if (ip->i_d.di_mode == 0) {
  1669. ASSERT(ip->i_df.if_real_bytes == 0);
  1670. ASSERT(ip->i_df.if_broot_bytes == 0);
  1671. return;
  1672. }
  1673. mp = ip->i_mount;
  1674. /* If this is a read-only mount, don't do this (would generate I/O) */
  1675. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1676. return;
  1677. if (ip->i_d.di_nlink != 0) {
  1678. /*
  1679. * force is true because we are evicting an inode from the
  1680. * cache. Post-eof blocks must be freed, lest we end up with
  1681. * broken free space accounting.
  1682. */
  1683. if (xfs_can_free_eofblocks(ip, true))
  1684. xfs_free_eofblocks(mp, ip, false);
  1685. return;
  1686. }
  1687. if (S_ISREG(ip->i_d.di_mode) &&
  1688. (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
  1689. ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
  1690. truncate = 1;
  1691. error = xfs_qm_dqattach(ip, 0);
  1692. if (error)
  1693. return;
  1694. if (S_ISLNK(ip->i_d.di_mode))
  1695. error = xfs_inactive_symlink(ip);
  1696. else if (truncate)
  1697. error = xfs_inactive_truncate(ip);
  1698. if (error)
  1699. return;
  1700. /*
  1701. * If there are attributes associated with the file then blow them away
  1702. * now. The code calls a routine that recursively deconstructs the
  1703. * attribute fork. If also blows away the in-core attribute fork.
  1704. */
  1705. if (XFS_IFORK_Q(ip)) {
  1706. error = xfs_attr_inactive(ip);
  1707. if (error)
  1708. return;
  1709. }
  1710. ASSERT(!ip->i_afp);
  1711. ASSERT(ip->i_d.di_anextents == 0);
  1712. ASSERT(ip->i_d.di_forkoff == 0);
  1713. /*
  1714. * Free the inode.
  1715. */
  1716. error = xfs_inactive_ifree(ip);
  1717. if (error)
  1718. return;
  1719. /*
  1720. * Release the dquots held by inode, if any.
  1721. */
  1722. xfs_qm_dqdetach(ip);
  1723. }
  1724. /*
  1725. * This is called when the inode's link count goes to 0.
  1726. * We place the on-disk inode on a list in the AGI. It
  1727. * will be pulled from this list when the inode is freed.
  1728. */
  1729. int
  1730. xfs_iunlink(
  1731. xfs_trans_t *tp,
  1732. xfs_inode_t *ip)
  1733. {
  1734. xfs_mount_t *mp;
  1735. xfs_agi_t *agi;
  1736. xfs_dinode_t *dip;
  1737. xfs_buf_t *agibp;
  1738. xfs_buf_t *ibp;
  1739. xfs_agino_t agino;
  1740. short bucket_index;
  1741. int offset;
  1742. int error;
  1743. ASSERT(ip->i_d.di_nlink == 0);
  1744. ASSERT(ip->i_d.di_mode != 0);
  1745. mp = tp->t_mountp;
  1746. /*
  1747. * Get the agi buffer first. It ensures lock ordering
  1748. * on the list.
  1749. */
  1750. error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
  1751. if (error)
  1752. return error;
  1753. agi = XFS_BUF_TO_AGI(agibp);
  1754. /*
  1755. * Get the index into the agi hash table for the
  1756. * list this inode will go on.
  1757. */
  1758. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1759. ASSERT(agino != 0);
  1760. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1761. ASSERT(agi->agi_unlinked[bucket_index]);
  1762. ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
  1763. if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
  1764. /*
  1765. * There is already another inode in the bucket we need
  1766. * to add ourselves to. Add us at the front of the list.
  1767. * Here we put the head pointer into our next pointer,
  1768. * and then we fall through to point the head at us.
  1769. */
  1770. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1771. 0, 0);
  1772. if (error)
  1773. return error;
  1774. ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
  1775. dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
  1776. offset = ip->i_imap.im_boffset +
  1777. offsetof(xfs_dinode_t, di_next_unlinked);
  1778. /* need to recalc the inode CRC if appropriate */
  1779. xfs_dinode_calc_crc(mp, dip);
  1780. xfs_trans_inode_buf(tp, ibp);
  1781. xfs_trans_log_buf(tp, ibp, offset,
  1782. (offset + sizeof(xfs_agino_t) - 1));
  1783. xfs_inobp_check(mp, ibp);
  1784. }
  1785. /*
  1786. * Point the bucket head pointer at the inode being inserted.
  1787. */
  1788. ASSERT(agino != 0);
  1789. agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
  1790. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1791. (sizeof(xfs_agino_t) * bucket_index);
  1792. xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
  1793. xfs_trans_log_buf(tp, agibp, offset,
  1794. (offset + sizeof(xfs_agino_t) - 1));
  1795. return 0;
  1796. }
  1797. /*
  1798. * Pull the on-disk inode from the AGI unlinked list.
  1799. */
  1800. STATIC int
  1801. xfs_iunlink_remove(
  1802. xfs_trans_t *tp,
  1803. xfs_inode_t *ip)
  1804. {
  1805. xfs_ino_t next_ino;
  1806. xfs_mount_t *mp;
  1807. xfs_agi_t *agi;
  1808. xfs_dinode_t *dip;
  1809. xfs_buf_t *agibp;
  1810. xfs_buf_t *ibp;
  1811. xfs_agnumber_t agno;
  1812. xfs_agino_t agino;
  1813. xfs_agino_t next_agino;
  1814. xfs_buf_t *last_ibp;
  1815. xfs_dinode_t *last_dip = NULL;
  1816. short bucket_index;
  1817. int offset, last_offset = 0;
  1818. int error;
  1819. mp = tp->t_mountp;
  1820. agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
  1821. /*
  1822. * Get the agi buffer first. It ensures lock ordering
  1823. * on the list.
  1824. */
  1825. error = xfs_read_agi(mp, tp, agno, &agibp);
  1826. if (error)
  1827. return error;
  1828. agi = XFS_BUF_TO_AGI(agibp);
  1829. /*
  1830. * Get the index into the agi hash table for the
  1831. * list this inode will go on.
  1832. */
  1833. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1834. ASSERT(agino != 0);
  1835. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1836. ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
  1837. ASSERT(agi->agi_unlinked[bucket_index]);
  1838. if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
  1839. /*
  1840. * We're at the head of the list. Get the inode's on-disk
  1841. * buffer to see if there is anyone after us on the list.
  1842. * Only modify our next pointer if it is not already NULLAGINO.
  1843. * This saves us the overhead of dealing with the buffer when
  1844. * there is no need to change it.
  1845. */
  1846. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1847. 0, 0);
  1848. if (error) {
  1849. xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
  1850. __func__, error);
  1851. return error;
  1852. }
  1853. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1854. ASSERT(next_agino != 0);
  1855. if (next_agino != NULLAGINO) {
  1856. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1857. offset = ip->i_imap.im_boffset +
  1858. offsetof(xfs_dinode_t, di_next_unlinked);
  1859. /* need to recalc the inode CRC if appropriate */
  1860. xfs_dinode_calc_crc(mp, dip);
  1861. xfs_trans_inode_buf(tp, ibp);
  1862. xfs_trans_log_buf(tp, ibp, offset,
  1863. (offset + sizeof(xfs_agino_t) - 1));
  1864. xfs_inobp_check(mp, ibp);
  1865. } else {
  1866. xfs_trans_brelse(tp, ibp);
  1867. }
  1868. /*
  1869. * Point the bucket head pointer at the next inode.
  1870. */
  1871. ASSERT(next_agino != 0);
  1872. ASSERT(next_agino != agino);
  1873. agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
  1874. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1875. (sizeof(xfs_agino_t) * bucket_index);
  1876. xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
  1877. xfs_trans_log_buf(tp, agibp, offset,
  1878. (offset + sizeof(xfs_agino_t) - 1));
  1879. } else {
  1880. /*
  1881. * We need to search the list for the inode being freed.
  1882. */
  1883. next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
  1884. last_ibp = NULL;
  1885. while (next_agino != agino) {
  1886. struct xfs_imap imap;
  1887. if (last_ibp)
  1888. xfs_trans_brelse(tp, last_ibp);
  1889. imap.im_blkno = 0;
  1890. next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
  1891. error = xfs_imap(mp, tp, next_ino, &imap, 0);
  1892. if (error) {
  1893. xfs_warn(mp,
  1894. "%s: xfs_imap returned error %d.",
  1895. __func__, error);
  1896. return error;
  1897. }
  1898. error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
  1899. &last_ibp, 0, 0);
  1900. if (error) {
  1901. xfs_warn(mp,
  1902. "%s: xfs_imap_to_bp returned error %d.",
  1903. __func__, error);
  1904. return error;
  1905. }
  1906. last_offset = imap.im_boffset;
  1907. next_agino = be32_to_cpu(last_dip->di_next_unlinked);
  1908. ASSERT(next_agino != NULLAGINO);
  1909. ASSERT(next_agino != 0);
  1910. }
  1911. /*
  1912. * Now last_ibp points to the buffer previous to us on the
  1913. * unlinked list. Pull us from the list.
  1914. */
  1915. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1916. 0, 0);
  1917. if (error) {
  1918. xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
  1919. __func__, error);
  1920. return error;
  1921. }
  1922. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1923. ASSERT(next_agino != 0);
  1924. ASSERT(next_agino != agino);
  1925. if (next_agino != NULLAGINO) {
  1926. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1927. offset = ip->i_imap.im_boffset +
  1928. offsetof(xfs_dinode_t, di_next_unlinked);
  1929. /* need to recalc the inode CRC if appropriate */
  1930. xfs_dinode_calc_crc(mp, dip);
  1931. xfs_trans_inode_buf(tp, ibp);
  1932. xfs_trans_log_buf(tp, ibp, offset,
  1933. (offset + sizeof(xfs_agino_t) - 1));
  1934. xfs_inobp_check(mp, ibp);
  1935. } else {
  1936. xfs_trans_brelse(tp, ibp);
  1937. }
  1938. /*
  1939. * Point the previous inode on the list to the next inode.
  1940. */
  1941. last_dip->di_next_unlinked = cpu_to_be32(next_agino);
  1942. ASSERT(next_agino != 0);
  1943. offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
  1944. /* need to recalc the inode CRC if appropriate */
  1945. xfs_dinode_calc_crc(mp, last_dip);
  1946. xfs_trans_inode_buf(tp, last_ibp);
  1947. xfs_trans_log_buf(tp, last_ibp, offset,
  1948. (offset + sizeof(xfs_agino_t) - 1));
  1949. xfs_inobp_check(mp, last_ibp);
  1950. }
  1951. return 0;
  1952. }
  1953. /*
  1954. * A big issue when freeing the inode cluster is that we _cannot_ skip any
  1955. * inodes that are in memory - they all must be marked stale and attached to
  1956. * the cluster buffer.
  1957. */
  1958. STATIC int
  1959. xfs_ifree_cluster(
  1960. xfs_inode_t *free_ip,
  1961. xfs_trans_t *tp,
  1962. struct xfs_icluster *xic)
  1963. {
  1964. xfs_mount_t *mp = free_ip->i_mount;
  1965. int blks_per_cluster;
  1966. int inodes_per_cluster;
  1967. int nbufs;
  1968. int i, j;
  1969. int ioffset;
  1970. xfs_daddr_t blkno;
  1971. xfs_buf_t *bp;
  1972. xfs_inode_t *ip;
  1973. xfs_inode_log_item_t *iip;
  1974. xfs_log_item_t *lip;
  1975. struct xfs_perag *pag;
  1976. xfs_ino_t inum;
  1977. inum = xic->first_ino;
  1978. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
  1979. blks_per_cluster = xfs_icluster_size_fsb(mp);
  1980. inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
  1981. nbufs = mp->m_ialloc_blks / blks_per_cluster;
  1982. for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
  1983. /*
  1984. * The allocation bitmap tells us which inodes of the chunk were
  1985. * physically allocated. Skip the cluster if an inode falls into
  1986. * a sparse region.
  1987. */
  1988. ioffset = inum - xic->first_ino;
  1989. if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
  1990. ASSERT(do_mod(ioffset, inodes_per_cluster) == 0);
  1991. continue;
  1992. }
  1993. blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
  1994. XFS_INO_TO_AGBNO(mp, inum));
  1995. /*
  1996. * We obtain and lock the backing buffer first in the process
  1997. * here, as we have to ensure that any dirty inode that we
  1998. * can't get the flush lock on is attached to the buffer.
  1999. * If we scan the in-memory inodes first, then buffer IO can
  2000. * complete before we get a lock on it, and hence we may fail
  2001. * to mark all the active inodes on the buffer stale.
  2002. */
  2003. bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
  2004. mp->m_bsize * blks_per_cluster,
  2005. XBF_UNMAPPED);
  2006. if (!bp)
  2007. return -ENOMEM;
  2008. /*
  2009. * This buffer may not have been correctly initialised as we
  2010. * didn't read it from disk. That's not important because we are
  2011. * only using to mark the buffer as stale in the log, and to
  2012. * attach stale cached inodes on it. That means it will never be
  2013. * dispatched for IO. If it is, we want to know about it, and we
  2014. * want it to fail. We can acheive this by adding a write
  2015. * verifier to the buffer.
  2016. */
  2017. bp->b_ops = &xfs_inode_buf_ops;
  2018. /*
  2019. * Walk the inodes already attached to the buffer and mark them
  2020. * stale. These will all have the flush locks held, so an
  2021. * in-memory inode walk can't lock them. By marking them all
  2022. * stale first, we will not attempt to lock them in the loop
  2023. * below as the XFS_ISTALE flag will be set.
  2024. */
  2025. lip = bp->b_fspriv;
  2026. while (lip) {
  2027. if (lip->li_type == XFS_LI_INODE) {
  2028. iip = (xfs_inode_log_item_t *)lip;
  2029. ASSERT(iip->ili_logged == 1);
  2030. lip->li_cb = xfs_istale_done;
  2031. xfs_trans_ail_copy_lsn(mp->m_ail,
  2032. &iip->ili_flush_lsn,
  2033. &iip->ili_item.li_lsn);
  2034. xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
  2035. }
  2036. lip = lip->li_bio_list;
  2037. }
  2038. /*
  2039. * For each inode in memory attempt to add it to the inode
  2040. * buffer and set it up for being staled on buffer IO
  2041. * completion. This is safe as we've locked out tail pushing
  2042. * and flushing by locking the buffer.
  2043. *
  2044. * We have already marked every inode that was part of a
  2045. * transaction stale above, which means there is no point in
  2046. * even trying to lock them.
  2047. */
  2048. for (i = 0; i < inodes_per_cluster; i++) {
  2049. retry:
  2050. rcu_read_lock();
  2051. ip = radix_tree_lookup(&pag->pag_ici_root,
  2052. XFS_INO_TO_AGINO(mp, (inum + i)));
  2053. /* Inode not in memory, nothing to do */
  2054. if (!ip) {
  2055. rcu_read_unlock();
  2056. continue;
  2057. }
  2058. /*
  2059. * because this is an RCU protected lookup, we could
  2060. * find a recently freed or even reallocated inode
  2061. * during the lookup. We need to check under the
  2062. * i_flags_lock for a valid inode here. Skip it if it
  2063. * is not valid, the wrong inode or stale.
  2064. */
  2065. spin_lock(&ip->i_flags_lock);
  2066. if (ip->i_ino != inum + i ||
  2067. __xfs_iflags_test(ip, XFS_ISTALE)) {
  2068. spin_unlock(&ip->i_flags_lock);
  2069. rcu_read_unlock();
  2070. continue;
  2071. }
  2072. spin_unlock(&ip->i_flags_lock);
  2073. /*
  2074. * Don't try to lock/unlock the current inode, but we
  2075. * _cannot_ skip the other inodes that we did not find
  2076. * in the list attached to the buffer and are not
  2077. * already marked stale. If we can't lock it, back off
  2078. * and retry.
  2079. */
  2080. if (ip != free_ip &&
  2081. !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
  2082. rcu_read_unlock();
  2083. delay(1);
  2084. goto retry;
  2085. }
  2086. rcu_read_unlock();
  2087. xfs_iflock(ip);
  2088. xfs_iflags_set(ip, XFS_ISTALE);
  2089. /*
  2090. * we don't need to attach clean inodes or those only
  2091. * with unlogged changes (which we throw away, anyway).
  2092. */
  2093. iip = ip->i_itemp;
  2094. if (!iip || xfs_inode_clean(ip)) {
  2095. ASSERT(ip != free_ip);
  2096. xfs_ifunlock(ip);
  2097. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2098. continue;
  2099. }
  2100. iip->ili_last_fields = iip->ili_fields;
  2101. iip->ili_fields = 0;
  2102. iip->ili_fsync_fields = 0;
  2103. iip->ili_logged = 1;
  2104. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  2105. &iip->ili_item.li_lsn);
  2106. xfs_buf_attach_iodone(bp, xfs_istale_done,
  2107. &iip->ili_item);
  2108. if (ip != free_ip)
  2109. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2110. }
  2111. xfs_trans_stale_inode_buf(tp, bp);
  2112. xfs_trans_binval(tp, bp);
  2113. }
  2114. xfs_perag_put(pag);
  2115. return 0;
  2116. }
  2117. /*
  2118. * This is called to return an inode to the inode free list.
  2119. * The inode should already be truncated to 0 length and have
  2120. * no pages associated with it. This routine also assumes that
  2121. * the inode is already a part of the transaction.
  2122. *
  2123. * The on-disk copy of the inode will have been added to the list
  2124. * of unlinked inodes in the AGI. We need to remove the inode from
  2125. * that list atomically with respect to freeing it here.
  2126. */
  2127. int
  2128. xfs_ifree(
  2129. xfs_trans_t *tp,
  2130. xfs_inode_t *ip,
  2131. xfs_bmap_free_t *flist)
  2132. {
  2133. int error;
  2134. struct xfs_icluster xic = { 0 };
  2135. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  2136. ASSERT(ip->i_d.di_nlink == 0);
  2137. ASSERT(ip->i_d.di_nextents == 0);
  2138. ASSERT(ip->i_d.di_anextents == 0);
  2139. ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
  2140. ASSERT(ip->i_d.di_nblocks == 0);
  2141. /*
  2142. * Pull the on-disk inode from the AGI unlinked list.
  2143. */
  2144. error = xfs_iunlink_remove(tp, ip);
  2145. if (error)
  2146. return error;
  2147. error = xfs_difree(tp, ip->i_ino, flist, &xic);
  2148. if (error)
  2149. return error;
  2150. ip->i_d.di_mode = 0; /* mark incore inode as free */
  2151. ip->i_d.di_flags = 0;
  2152. ip->i_d.di_dmevmask = 0;
  2153. ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
  2154. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  2155. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  2156. /*
  2157. * Bump the generation count so no one will be confused
  2158. * by reincarnations of this inode.
  2159. */
  2160. ip->i_d.di_gen++;
  2161. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  2162. if (xic.deleted)
  2163. error = xfs_ifree_cluster(ip, tp, &xic);
  2164. return error;
  2165. }
  2166. /*
  2167. * This is called to unpin an inode. The caller must have the inode locked
  2168. * in at least shared mode so that the buffer cannot be subsequently pinned
  2169. * once someone is waiting for it to be unpinned.
  2170. */
  2171. static void
  2172. xfs_iunpin(
  2173. struct xfs_inode *ip)
  2174. {
  2175. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2176. trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
  2177. /* Give the log a push to start the unpinning I/O */
  2178. xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
  2179. }
  2180. static void
  2181. __xfs_iunpin_wait(
  2182. struct xfs_inode *ip)
  2183. {
  2184. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
  2185. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
  2186. xfs_iunpin(ip);
  2187. do {
  2188. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  2189. if (xfs_ipincount(ip))
  2190. io_schedule();
  2191. } while (xfs_ipincount(ip));
  2192. finish_wait(wq, &wait.wait);
  2193. }
  2194. void
  2195. xfs_iunpin_wait(
  2196. struct xfs_inode *ip)
  2197. {
  2198. if (xfs_ipincount(ip))
  2199. __xfs_iunpin_wait(ip);
  2200. }
  2201. /*
  2202. * Removing an inode from the namespace involves removing the directory entry
  2203. * and dropping the link count on the inode. Removing the directory entry can
  2204. * result in locking an AGF (directory blocks were freed) and removing a link
  2205. * count can result in placing the inode on an unlinked list which results in
  2206. * locking an AGI.
  2207. *
  2208. * The big problem here is that we have an ordering constraint on AGF and AGI
  2209. * locking - inode allocation locks the AGI, then can allocate a new extent for
  2210. * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
  2211. * removes the inode from the unlinked list, requiring that we lock the AGI
  2212. * first, and then freeing the inode can result in an inode chunk being freed
  2213. * and hence freeing disk space requiring that we lock an AGF.
  2214. *
  2215. * Hence the ordering that is imposed by other parts of the code is AGI before
  2216. * AGF. This means we cannot remove the directory entry before we drop the inode
  2217. * reference count and put it on the unlinked list as this results in a lock
  2218. * order of AGF then AGI, and this can deadlock against inode allocation and
  2219. * freeing. Therefore we must drop the link counts before we remove the
  2220. * directory entry.
  2221. *
  2222. * This is still safe from a transactional point of view - it is not until we
  2223. * get to xfs_bmap_finish() that we have the possibility of multiple
  2224. * transactions in this operation. Hence as long as we remove the directory
  2225. * entry and drop the link count in the first transaction of the remove
  2226. * operation, there are no transactional constraints on the ordering here.
  2227. */
  2228. int
  2229. xfs_remove(
  2230. xfs_inode_t *dp,
  2231. struct xfs_name *name,
  2232. xfs_inode_t *ip)
  2233. {
  2234. xfs_mount_t *mp = dp->i_mount;
  2235. xfs_trans_t *tp = NULL;
  2236. int is_dir = S_ISDIR(ip->i_d.di_mode);
  2237. int error = 0;
  2238. xfs_bmap_free_t free_list;
  2239. xfs_fsblock_t first_block;
  2240. uint resblks;
  2241. trace_xfs_remove(dp, name);
  2242. if (XFS_FORCED_SHUTDOWN(mp))
  2243. return -EIO;
  2244. error = xfs_qm_dqattach(dp, 0);
  2245. if (error)
  2246. goto std_return;
  2247. error = xfs_qm_dqattach(ip, 0);
  2248. if (error)
  2249. goto std_return;
  2250. if (is_dir)
  2251. tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
  2252. else
  2253. tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
  2254. /*
  2255. * We try to get the real space reservation first,
  2256. * allowing for directory btree deletion(s) implying
  2257. * possible bmap insert(s). If we can't get the space
  2258. * reservation then we use 0 instead, and avoid the bmap
  2259. * btree insert(s) in the directory code by, if the bmap
  2260. * insert tries to happen, instead trimming the LAST
  2261. * block from the directory.
  2262. */
  2263. resblks = XFS_REMOVE_SPACE_RES(mp);
  2264. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
  2265. if (error == -ENOSPC) {
  2266. resblks = 0;
  2267. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
  2268. }
  2269. if (error) {
  2270. ASSERT(error != -ENOSPC);
  2271. goto out_trans_cancel;
  2272. }
  2273. xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  2274. xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
  2275. xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  2276. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  2277. /*
  2278. * If we're removing a directory perform some additional validation.
  2279. */
  2280. if (is_dir) {
  2281. ASSERT(ip->i_d.di_nlink >= 2);
  2282. if (ip->i_d.di_nlink != 2) {
  2283. error = -ENOTEMPTY;
  2284. goto out_trans_cancel;
  2285. }
  2286. if (!xfs_dir_isempty(ip)) {
  2287. error = -ENOTEMPTY;
  2288. goto out_trans_cancel;
  2289. }
  2290. /* Drop the link from ip's "..". */
  2291. error = xfs_droplink(tp, dp);
  2292. if (error)
  2293. goto out_trans_cancel;
  2294. /* Drop the "." link from ip to self. */
  2295. error = xfs_droplink(tp, ip);
  2296. if (error)
  2297. goto out_trans_cancel;
  2298. } else {
  2299. /*
  2300. * When removing a non-directory we need to log the parent
  2301. * inode here. For a directory this is done implicitly
  2302. * by the xfs_droplink call for the ".." entry.
  2303. */
  2304. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  2305. }
  2306. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2307. /* Drop the link from dp to ip. */
  2308. error = xfs_droplink(tp, ip);
  2309. if (error)
  2310. goto out_trans_cancel;
  2311. xfs_bmap_init(&free_list, &first_block);
  2312. error = xfs_dir_removename(tp, dp, name, ip->i_ino,
  2313. &first_block, &free_list, resblks);
  2314. if (error) {
  2315. ASSERT(error != -ENOENT);
  2316. goto out_bmap_cancel;
  2317. }
  2318. /*
  2319. * If this is a synchronous mount, make sure that the
  2320. * remove transaction goes to disk before returning to
  2321. * the user.
  2322. */
  2323. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  2324. xfs_trans_set_sync(tp);
  2325. error = xfs_bmap_finish(&tp, &free_list, NULL);
  2326. if (error)
  2327. goto out_bmap_cancel;
  2328. error = xfs_trans_commit(tp);
  2329. if (error)
  2330. goto std_return;
  2331. if (is_dir && xfs_inode_is_filestream(ip))
  2332. xfs_filestream_deassociate(ip);
  2333. return 0;
  2334. out_bmap_cancel:
  2335. xfs_bmap_cancel(&free_list);
  2336. out_trans_cancel:
  2337. xfs_trans_cancel(tp);
  2338. std_return:
  2339. return error;
  2340. }
  2341. /*
  2342. * Enter all inodes for a rename transaction into a sorted array.
  2343. */
  2344. #define __XFS_SORT_INODES 5
  2345. STATIC void
  2346. xfs_sort_for_rename(
  2347. struct xfs_inode *dp1, /* in: old (source) directory inode */
  2348. struct xfs_inode *dp2, /* in: new (target) directory inode */
  2349. struct xfs_inode *ip1, /* in: inode of old entry */
  2350. struct xfs_inode *ip2, /* in: inode of new entry */
  2351. struct xfs_inode *wip, /* in: whiteout inode */
  2352. struct xfs_inode **i_tab,/* out: sorted array of inodes */
  2353. int *num_inodes) /* in/out: inodes in array */
  2354. {
  2355. int i, j;
  2356. ASSERT(*num_inodes == __XFS_SORT_INODES);
  2357. memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
  2358. /*
  2359. * i_tab contains a list of pointers to inodes. We initialize
  2360. * the table here & we'll sort it. We will then use it to
  2361. * order the acquisition of the inode locks.
  2362. *
  2363. * Note that the table may contain duplicates. e.g., dp1 == dp2.
  2364. */
  2365. i = 0;
  2366. i_tab[i++] = dp1;
  2367. i_tab[i++] = dp2;
  2368. i_tab[i++] = ip1;
  2369. if (ip2)
  2370. i_tab[i++] = ip2;
  2371. if (wip)
  2372. i_tab[i++] = wip;
  2373. *num_inodes = i;
  2374. /*
  2375. * Sort the elements via bubble sort. (Remember, there are at
  2376. * most 5 elements to sort, so this is adequate.)
  2377. */
  2378. for (i = 0; i < *num_inodes; i++) {
  2379. for (j = 1; j < *num_inodes; j++) {
  2380. if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
  2381. struct xfs_inode *temp = i_tab[j];
  2382. i_tab[j] = i_tab[j-1];
  2383. i_tab[j-1] = temp;
  2384. }
  2385. }
  2386. }
  2387. }
  2388. static int
  2389. xfs_finish_rename(
  2390. struct xfs_trans *tp,
  2391. struct xfs_bmap_free *free_list)
  2392. {
  2393. int error;
  2394. /*
  2395. * If this is a synchronous mount, make sure that the rename transaction
  2396. * goes to disk before returning to the user.
  2397. */
  2398. if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  2399. xfs_trans_set_sync(tp);
  2400. error = xfs_bmap_finish(&tp, free_list, NULL);
  2401. if (error) {
  2402. xfs_bmap_cancel(free_list);
  2403. xfs_trans_cancel(tp);
  2404. return error;
  2405. }
  2406. return xfs_trans_commit(tp);
  2407. }
  2408. /*
  2409. * xfs_cross_rename()
  2410. *
  2411. * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
  2412. */
  2413. STATIC int
  2414. xfs_cross_rename(
  2415. struct xfs_trans *tp,
  2416. struct xfs_inode *dp1,
  2417. struct xfs_name *name1,
  2418. struct xfs_inode *ip1,
  2419. struct xfs_inode *dp2,
  2420. struct xfs_name *name2,
  2421. struct xfs_inode *ip2,
  2422. struct xfs_bmap_free *free_list,
  2423. xfs_fsblock_t *first_block,
  2424. int spaceres)
  2425. {
  2426. int error = 0;
  2427. int ip1_flags = 0;
  2428. int ip2_flags = 0;
  2429. int dp2_flags = 0;
  2430. /* Swap inode number for dirent in first parent */
  2431. error = xfs_dir_replace(tp, dp1, name1,
  2432. ip2->i_ino,
  2433. first_block, free_list, spaceres);
  2434. if (error)
  2435. goto out_trans_abort;
  2436. /* Swap inode number for dirent in second parent */
  2437. error = xfs_dir_replace(tp, dp2, name2,
  2438. ip1->i_ino,
  2439. first_block, free_list, spaceres);
  2440. if (error)
  2441. goto out_trans_abort;
  2442. /*
  2443. * If we're renaming one or more directories across different parents,
  2444. * update the respective ".." entries (and link counts) to match the new
  2445. * parents.
  2446. */
  2447. if (dp1 != dp2) {
  2448. dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2449. if (S_ISDIR(ip2->i_d.di_mode)) {
  2450. error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
  2451. dp1->i_ino, first_block,
  2452. free_list, spaceres);
  2453. if (error)
  2454. goto out_trans_abort;
  2455. /* transfer ip2 ".." reference to dp1 */
  2456. if (!S_ISDIR(ip1->i_d.di_mode)) {
  2457. error = xfs_droplink(tp, dp2);
  2458. if (error)
  2459. goto out_trans_abort;
  2460. error = xfs_bumplink(tp, dp1);
  2461. if (error)
  2462. goto out_trans_abort;
  2463. }
  2464. /*
  2465. * Although ip1 isn't changed here, userspace needs
  2466. * to be warned about the change, so that applications
  2467. * relying on it (like backup ones), will properly
  2468. * notify the change
  2469. */
  2470. ip1_flags |= XFS_ICHGTIME_CHG;
  2471. ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2472. }
  2473. if (S_ISDIR(ip1->i_d.di_mode)) {
  2474. error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
  2475. dp2->i_ino, first_block,
  2476. free_list, spaceres);
  2477. if (error)
  2478. goto out_trans_abort;
  2479. /* transfer ip1 ".." reference to dp2 */
  2480. if (!S_ISDIR(ip2->i_d.di_mode)) {
  2481. error = xfs_droplink(tp, dp1);
  2482. if (error)
  2483. goto out_trans_abort;
  2484. error = xfs_bumplink(tp, dp2);
  2485. if (error)
  2486. goto out_trans_abort;
  2487. }
  2488. /*
  2489. * Although ip2 isn't changed here, userspace needs
  2490. * to be warned about the change, so that applications
  2491. * relying on it (like backup ones), will properly
  2492. * notify the change
  2493. */
  2494. ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2495. ip2_flags |= XFS_ICHGTIME_CHG;
  2496. }
  2497. }
  2498. if (ip1_flags) {
  2499. xfs_trans_ichgtime(tp, ip1, ip1_flags);
  2500. xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
  2501. }
  2502. if (ip2_flags) {
  2503. xfs_trans_ichgtime(tp, ip2, ip2_flags);
  2504. xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
  2505. }
  2506. if (dp2_flags) {
  2507. xfs_trans_ichgtime(tp, dp2, dp2_flags);
  2508. xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
  2509. }
  2510. xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2511. xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
  2512. return xfs_finish_rename(tp, free_list);
  2513. out_trans_abort:
  2514. xfs_bmap_cancel(free_list);
  2515. xfs_trans_cancel(tp);
  2516. return error;
  2517. }
  2518. /*
  2519. * xfs_rename_alloc_whiteout()
  2520. *
  2521. * Return a referenced, unlinked, unlocked inode that that can be used as a
  2522. * whiteout in a rename transaction. We use a tmpfile inode here so that if we
  2523. * crash between allocating the inode and linking it into the rename transaction
  2524. * recovery will free the inode and we won't leak it.
  2525. */
  2526. static int
  2527. xfs_rename_alloc_whiteout(
  2528. struct xfs_inode *dp,
  2529. struct xfs_inode **wip)
  2530. {
  2531. struct xfs_inode *tmpfile;
  2532. int error;
  2533. error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
  2534. if (error)
  2535. return error;
  2536. /*
  2537. * Prepare the tmpfile inode as if it were created through the VFS.
  2538. * Otherwise, the link increment paths will complain about nlink 0->1.
  2539. * Drop the link count as done by d_tmpfile(), complete the inode setup
  2540. * and flag it as linkable.
  2541. */
  2542. drop_nlink(VFS_I(tmpfile));
  2543. xfs_finish_inode_setup(tmpfile);
  2544. VFS_I(tmpfile)->i_state |= I_LINKABLE;
  2545. *wip = tmpfile;
  2546. return 0;
  2547. }
  2548. /*
  2549. * xfs_rename
  2550. */
  2551. int
  2552. xfs_rename(
  2553. struct xfs_inode *src_dp,
  2554. struct xfs_name *src_name,
  2555. struct xfs_inode *src_ip,
  2556. struct xfs_inode *target_dp,
  2557. struct xfs_name *target_name,
  2558. struct xfs_inode *target_ip,
  2559. unsigned int flags)
  2560. {
  2561. struct xfs_mount *mp = src_dp->i_mount;
  2562. struct xfs_trans *tp;
  2563. struct xfs_bmap_free free_list;
  2564. xfs_fsblock_t first_block;
  2565. struct xfs_inode *wip = NULL; /* whiteout inode */
  2566. struct xfs_inode *inodes[__XFS_SORT_INODES];
  2567. int num_inodes = __XFS_SORT_INODES;
  2568. bool new_parent = (src_dp != target_dp);
  2569. bool src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
  2570. int spaceres;
  2571. int error;
  2572. trace_xfs_rename(src_dp, target_dp, src_name, target_name);
  2573. if ((flags & RENAME_EXCHANGE) && !target_ip)
  2574. return -EINVAL;
  2575. /*
  2576. * If we are doing a whiteout operation, allocate the whiteout inode
  2577. * we will be placing at the target and ensure the type is set
  2578. * appropriately.
  2579. */
  2580. if (flags & RENAME_WHITEOUT) {
  2581. ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
  2582. error = xfs_rename_alloc_whiteout(target_dp, &wip);
  2583. if (error)
  2584. return error;
  2585. /* setup target dirent info as whiteout */
  2586. src_name->type = XFS_DIR3_FT_CHRDEV;
  2587. }
  2588. xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
  2589. inodes, &num_inodes);
  2590. tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
  2591. spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
  2592. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
  2593. if (error == -ENOSPC) {
  2594. spaceres = 0;
  2595. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
  2596. }
  2597. if (error)
  2598. goto out_trans_cancel;
  2599. /*
  2600. * Attach the dquots to the inodes
  2601. */
  2602. error = xfs_qm_vop_rename_dqattach(inodes);
  2603. if (error)
  2604. goto out_trans_cancel;
  2605. /*
  2606. * Lock all the participating inodes. Depending upon whether
  2607. * the target_name exists in the target directory, and
  2608. * whether the target directory is the same as the source
  2609. * directory, we can lock from 2 to 4 inodes.
  2610. */
  2611. if (!new_parent)
  2612. xfs_ilock(src_dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  2613. else
  2614. xfs_lock_two_inodes(src_dp, target_dp,
  2615. XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  2616. xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
  2617. /*
  2618. * Join all the inodes to the transaction. From this point on,
  2619. * we can rely on either trans_commit or trans_cancel to unlock
  2620. * them.
  2621. */
  2622. xfs_trans_ijoin(tp, src_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  2623. if (new_parent)
  2624. xfs_trans_ijoin(tp, target_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  2625. xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
  2626. if (target_ip)
  2627. xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
  2628. if (wip)
  2629. xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
  2630. /*
  2631. * If we are using project inheritance, we only allow renames
  2632. * into our tree when the project IDs are the same; else the
  2633. * tree quota mechanism would be circumvented.
  2634. */
  2635. if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  2636. (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
  2637. error = -EXDEV;
  2638. goto out_trans_cancel;
  2639. }
  2640. xfs_bmap_init(&free_list, &first_block);
  2641. /* RENAME_EXCHANGE is unique from here on. */
  2642. if (flags & RENAME_EXCHANGE)
  2643. return xfs_cross_rename(tp, src_dp, src_name, src_ip,
  2644. target_dp, target_name, target_ip,
  2645. &free_list, &first_block, spaceres);
  2646. /*
  2647. * Set up the target.
  2648. */
  2649. if (target_ip == NULL) {
  2650. /*
  2651. * If there's no space reservation, check the entry will
  2652. * fit before actually inserting it.
  2653. */
  2654. if (!spaceres) {
  2655. error = xfs_dir_canenter(tp, target_dp, target_name);
  2656. if (error)
  2657. goto out_trans_cancel;
  2658. }
  2659. /*
  2660. * If target does not exist and the rename crosses
  2661. * directories, adjust the target directory link count
  2662. * to account for the ".." reference from the new entry.
  2663. */
  2664. error = xfs_dir_createname(tp, target_dp, target_name,
  2665. src_ip->i_ino, &first_block,
  2666. &free_list, spaceres);
  2667. if (error)
  2668. goto out_bmap_cancel;
  2669. xfs_trans_ichgtime(tp, target_dp,
  2670. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2671. if (new_parent && src_is_directory) {
  2672. error = xfs_bumplink(tp, target_dp);
  2673. if (error)
  2674. goto out_bmap_cancel;
  2675. }
  2676. } else { /* target_ip != NULL */
  2677. /*
  2678. * If target exists and it's a directory, check that both
  2679. * target and source are directories and that target can be
  2680. * destroyed, or that neither is a directory.
  2681. */
  2682. if (S_ISDIR(target_ip->i_d.di_mode)) {
  2683. /*
  2684. * Make sure target dir is empty.
  2685. */
  2686. if (!(xfs_dir_isempty(target_ip)) ||
  2687. (target_ip->i_d.di_nlink > 2)) {
  2688. error = -EEXIST;
  2689. goto out_trans_cancel;
  2690. }
  2691. }
  2692. /*
  2693. * Link the source inode under the target name.
  2694. * If the source inode is a directory and we are moving
  2695. * it across directories, its ".." entry will be
  2696. * inconsistent until we replace that down below.
  2697. *
  2698. * In case there is already an entry with the same
  2699. * name at the destination directory, remove it first.
  2700. */
  2701. error = xfs_dir_replace(tp, target_dp, target_name,
  2702. src_ip->i_ino,
  2703. &first_block, &free_list, spaceres);
  2704. if (error)
  2705. goto out_bmap_cancel;
  2706. xfs_trans_ichgtime(tp, target_dp,
  2707. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2708. /*
  2709. * Decrement the link count on the target since the target
  2710. * dir no longer points to it.
  2711. */
  2712. error = xfs_droplink(tp, target_ip);
  2713. if (error)
  2714. goto out_bmap_cancel;
  2715. if (src_is_directory) {
  2716. /*
  2717. * Drop the link from the old "." entry.
  2718. */
  2719. error = xfs_droplink(tp, target_ip);
  2720. if (error)
  2721. goto out_bmap_cancel;
  2722. }
  2723. } /* target_ip != NULL */
  2724. /*
  2725. * Remove the source.
  2726. */
  2727. if (new_parent && src_is_directory) {
  2728. /*
  2729. * Rewrite the ".." entry to point to the new
  2730. * directory.
  2731. */
  2732. error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
  2733. target_dp->i_ino,
  2734. &first_block, &free_list, spaceres);
  2735. ASSERT(error != -EEXIST);
  2736. if (error)
  2737. goto out_bmap_cancel;
  2738. }
  2739. /*
  2740. * We always want to hit the ctime on the source inode.
  2741. *
  2742. * This isn't strictly required by the standards since the source
  2743. * inode isn't really being changed, but old unix file systems did
  2744. * it and some incremental backup programs won't work without it.
  2745. */
  2746. xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
  2747. xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
  2748. /*
  2749. * Adjust the link count on src_dp. This is necessary when
  2750. * renaming a directory, either within one parent when
  2751. * the target existed, or across two parent directories.
  2752. */
  2753. if (src_is_directory && (new_parent || target_ip != NULL)) {
  2754. /*
  2755. * Decrement link count on src_directory since the
  2756. * entry that's moved no longer points to it.
  2757. */
  2758. error = xfs_droplink(tp, src_dp);
  2759. if (error)
  2760. goto out_bmap_cancel;
  2761. }
  2762. /*
  2763. * For whiteouts, we only need to update the source dirent with the
  2764. * inode number of the whiteout inode rather than removing it
  2765. * altogether.
  2766. */
  2767. if (wip) {
  2768. error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
  2769. &first_block, &free_list, spaceres);
  2770. } else
  2771. error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
  2772. &first_block, &free_list, spaceres);
  2773. if (error)
  2774. goto out_bmap_cancel;
  2775. /*
  2776. * For whiteouts, we need to bump the link count on the whiteout inode.
  2777. * This means that failures all the way up to this point leave the inode
  2778. * on the unlinked list and so cleanup is a simple matter of dropping
  2779. * the remaining reference to it. If we fail here after bumping the link
  2780. * count, we're shutting down the filesystem so we'll never see the
  2781. * intermediate state on disk.
  2782. */
  2783. if (wip) {
  2784. ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0);
  2785. error = xfs_bumplink(tp, wip);
  2786. if (error)
  2787. goto out_bmap_cancel;
  2788. error = xfs_iunlink_remove(tp, wip);
  2789. if (error)
  2790. goto out_bmap_cancel;
  2791. xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
  2792. /*
  2793. * Now we have a real link, clear the "I'm a tmpfile" state
  2794. * flag from the inode so it doesn't accidentally get misused in
  2795. * future.
  2796. */
  2797. VFS_I(wip)->i_state &= ~I_LINKABLE;
  2798. }
  2799. xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2800. xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
  2801. if (new_parent)
  2802. xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
  2803. error = xfs_finish_rename(tp, &free_list);
  2804. if (wip)
  2805. IRELE(wip);
  2806. return error;
  2807. out_bmap_cancel:
  2808. xfs_bmap_cancel(&free_list);
  2809. out_trans_cancel:
  2810. xfs_trans_cancel(tp);
  2811. if (wip)
  2812. IRELE(wip);
  2813. return error;
  2814. }
  2815. STATIC int
  2816. xfs_iflush_cluster(
  2817. xfs_inode_t *ip,
  2818. xfs_buf_t *bp)
  2819. {
  2820. xfs_mount_t *mp = ip->i_mount;
  2821. struct xfs_perag *pag;
  2822. unsigned long first_index, mask;
  2823. unsigned long inodes_per_cluster;
  2824. int ilist_size;
  2825. xfs_inode_t **ilist;
  2826. xfs_inode_t *iq;
  2827. int nr_found;
  2828. int clcount = 0;
  2829. int bufwasdelwri;
  2830. int i;
  2831. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
  2832. inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
  2833. ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
  2834. ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
  2835. if (!ilist)
  2836. goto out_put;
  2837. mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
  2838. first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
  2839. rcu_read_lock();
  2840. /* really need a gang lookup range call here */
  2841. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
  2842. first_index, inodes_per_cluster);
  2843. if (nr_found == 0)
  2844. goto out_free;
  2845. for (i = 0; i < nr_found; i++) {
  2846. iq = ilist[i];
  2847. if (iq == ip)
  2848. continue;
  2849. /*
  2850. * because this is an RCU protected lookup, we could find a
  2851. * recently freed or even reallocated inode during the lookup.
  2852. * We need to check under the i_flags_lock for a valid inode
  2853. * here. Skip it if it is not valid or the wrong inode.
  2854. */
  2855. spin_lock(&ip->i_flags_lock);
  2856. if (!ip->i_ino ||
  2857. (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
  2858. spin_unlock(&ip->i_flags_lock);
  2859. continue;
  2860. }
  2861. spin_unlock(&ip->i_flags_lock);
  2862. /*
  2863. * Do an un-protected check to see if the inode is dirty and
  2864. * is a candidate for flushing. These checks will be repeated
  2865. * later after the appropriate locks are acquired.
  2866. */
  2867. if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
  2868. continue;
  2869. /*
  2870. * Try to get locks. If any are unavailable or it is pinned,
  2871. * then this inode cannot be flushed and is skipped.
  2872. */
  2873. if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
  2874. continue;
  2875. if (!xfs_iflock_nowait(iq)) {
  2876. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2877. continue;
  2878. }
  2879. if (xfs_ipincount(iq)) {
  2880. xfs_ifunlock(iq);
  2881. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2882. continue;
  2883. }
  2884. /*
  2885. * arriving here means that this inode can be flushed. First
  2886. * re-check that it's dirty before flushing.
  2887. */
  2888. if (!xfs_inode_clean(iq)) {
  2889. int error;
  2890. error = xfs_iflush_int(iq, bp);
  2891. if (error) {
  2892. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2893. goto cluster_corrupt_out;
  2894. }
  2895. clcount++;
  2896. } else {
  2897. xfs_ifunlock(iq);
  2898. }
  2899. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2900. }
  2901. if (clcount) {
  2902. XFS_STATS_INC(mp, xs_icluster_flushcnt);
  2903. XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
  2904. }
  2905. out_free:
  2906. rcu_read_unlock();
  2907. kmem_free(ilist);
  2908. out_put:
  2909. xfs_perag_put(pag);
  2910. return 0;
  2911. cluster_corrupt_out:
  2912. /*
  2913. * Corruption detected in the clustering loop. Invalidate the
  2914. * inode buffer and shut down the filesystem.
  2915. */
  2916. rcu_read_unlock();
  2917. /*
  2918. * Clean up the buffer. If it was delwri, just release it --
  2919. * brelse can handle it with no problems. If not, shut down the
  2920. * filesystem before releasing the buffer.
  2921. */
  2922. bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
  2923. if (bufwasdelwri)
  2924. xfs_buf_relse(bp);
  2925. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  2926. if (!bufwasdelwri) {
  2927. /*
  2928. * Just like incore_relse: if we have b_iodone functions,
  2929. * mark the buffer as an error and call them. Otherwise
  2930. * mark it as stale and brelse.
  2931. */
  2932. if (bp->b_iodone) {
  2933. XFS_BUF_UNDONE(bp);
  2934. xfs_buf_stale(bp);
  2935. xfs_buf_ioerror(bp, -EIO);
  2936. xfs_buf_ioend(bp);
  2937. } else {
  2938. xfs_buf_stale(bp);
  2939. xfs_buf_relse(bp);
  2940. }
  2941. }
  2942. /*
  2943. * Unlocks the flush lock
  2944. */
  2945. xfs_iflush_abort(iq, false);
  2946. kmem_free(ilist);
  2947. xfs_perag_put(pag);
  2948. return -EFSCORRUPTED;
  2949. }
  2950. /*
  2951. * Flush dirty inode metadata into the backing buffer.
  2952. *
  2953. * The caller must have the inode lock and the inode flush lock held. The
  2954. * inode lock will still be held upon return to the caller, and the inode
  2955. * flush lock will be released after the inode has reached the disk.
  2956. *
  2957. * The caller must write out the buffer returned in *bpp and release it.
  2958. */
  2959. int
  2960. xfs_iflush(
  2961. struct xfs_inode *ip,
  2962. struct xfs_buf **bpp)
  2963. {
  2964. struct xfs_mount *mp = ip->i_mount;
  2965. struct xfs_buf *bp;
  2966. struct xfs_dinode *dip;
  2967. int error;
  2968. XFS_STATS_INC(mp, xs_iflush_count);
  2969. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2970. ASSERT(xfs_isiflocked(ip));
  2971. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  2972. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  2973. *bpp = NULL;
  2974. xfs_iunpin_wait(ip);
  2975. /*
  2976. * For stale inodes we cannot rely on the backing buffer remaining
  2977. * stale in cache for the remaining life of the stale inode and so
  2978. * xfs_imap_to_bp() below may give us a buffer that no longer contains
  2979. * inodes below. We have to check this after ensuring the inode is
  2980. * unpinned so that it is safe to reclaim the stale inode after the
  2981. * flush call.
  2982. */
  2983. if (xfs_iflags_test(ip, XFS_ISTALE)) {
  2984. xfs_ifunlock(ip);
  2985. return 0;
  2986. }
  2987. /*
  2988. * This may have been unpinned because the filesystem is shutting
  2989. * down forcibly. If that's the case we must not write this inode
  2990. * to disk, because the log record didn't make it to disk.
  2991. *
  2992. * We also have to remove the log item from the AIL in this case,
  2993. * as we wait for an empty AIL as part of the unmount process.
  2994. */
  2995. if (XFS_FORCED_SHUTDOWN(mp)) {
  2996. error = -EIO;
  2997. goto abort_out;
  2998. }
  2999. /*
  3000. * Get the buffer containing the on-disk inode.
  3001. */
  3002. error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
  3003. 0);
  3004. if (error || !bp) {
  3005. xfs_ifunlock(ip);
  3006. return error;
  3007. }
  3008. /*
  3009. * First flush out the inode that xfs_iflush was called with.
  3010. */
  3011. error = xfs_iflush_int(ip, bp);
  3012. if (error)
  3013. goto corrupt_out;
  3014. /*
  3015. * If the buffer is pinned then push on the log now so we won't
  3016. * get stuck waiting in the write for too long.
  3017. */
  3018. if (xfs_buf_ispinned(bp))
  3019. xfs_log_force(mp, 0);
  3020. /*
  3021. * inode clustering:
  3022. * see if other inodes can be gathered into this write
  3023. */
  3024. error = xfs_iflush_cluster(ip, bp);
  3025. if (error)
  3026. goto cluster_corrupt_out;
  3027. *bpp = bp;
  3028. return 0;
  3029. corrupt_out:
  3030. xfs_buf_relse(bp);
  3031. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  3032. cluster_corrupt_out:
  3033. error = -EFSCORRUPTED;
  3034. abort_out:
  3035. /*
  3036. * Unlocks the flush lock
  3037. */
  3038. xfs_iflush_abort(ip, false);
  3039. return error;
  3040. }
  3041. STATIC int
  3042. xfs_iflush_int(
  3043. struct xfs_inode *ip,
  3044. struct xfs_buf *bp)
  3045. {
  3046. struct xfs_inode_log_item *iip = ip->i_itemp;
  3047. struct xfs_dinode *dip;
  3048. struct xfs_mount *mp = ip->i_mount;
  3049. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  3050. ASSERT(xfs_isiflocked(ip));
  3051. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  3052. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  3053. ASSERT(iip != NULL && iip->ili_fields != 0);
  3054. ASSERT(ip->i_d.di_version > 1);
  3055. /* set *dip = inode's place in the buffer */
  3056. dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
  3057. if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
  3058. mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
  3059. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3060. "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
  3061. __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
  3062. goto corrupt_out;
  3063. }
  3064. if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
  3065. mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
  3066. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3067. "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
  3068. __func__, ip->i_ino, ip, ip->i_d.di_magic);
  3069. goto corrupt_out;
  3070. }
  3071. if (S_ISREG(ip->i_d.di_mode)) {
  3072. if (XFS_TEST_ERROR(
  3073. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  3074. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
  3075. mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
  3076. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3077. "%s: Bad regular inode %Lu, ptr 0x%p",
  3078. __func__, ip->i_ino, ip);
  3079. goto corrupt_out;
  3080. }
  3081. } else if (S_ISDIR(ip->i_d.di_mode)) {
  3082. if (XFS_TEST_ERROR(
  3083. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  3084. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
  3085. (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
  3086. mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
  3087. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3088. "%s: Bad directory inode %Lu, ptr 0x%p",
  3089. __func__, ip->i_ino, ip);
  3090. goto corrupt_out;
  3091. }
  3092. }
  3093. if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
  3094. ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
  3095. XFS_RANDOM_IFLUSH_5)) {
  3096. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3097. "%s: detected corrupt incore inode %Lu, "
  3098. "total extents = %d, nblocks = %Ld, ptr 0x%p",
  3099. __func__, ip->i_ino,
  3100. ip->i_d.di_nextents + ip->i_d.di_anextents,
  3101. ip->i_d.di_nblocks, ip);
  3102. goto corrupt_out;
  3103. }
  3104. if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
  3105. mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
  3106. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3107. "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
  3108. __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
  3109. goto corrupt_out;
  3110. }
  3111. /*
  3112. * Inode item log recovery for v2 inodes are dependent on the
  3113. * di_flushiter count for correct sequencing. We bump the flush
  3114. * iteration count so we can detect flushes which postdate a log record
  3115. * during recovery. This is redundant as we now log every change and
  3116. * hence this can't happen but we need to still do it to ensure
  3117. * backwards compatibility with old kernels that predate logging all
  3118. * inode changes.
  3119. */
  3120. if (ip->i_d.di_version < 3)
  3121. ip->i_d.di_flushiter++;
  3122. /*
  3123. * Copy the dirty parts of the inode into the on-disk
  3124. * inode. We always copy out the core of the inode,
  3125. * because if the inode is dirty at all the core must
  3126. * be.
  3127. */
  3128. xfs_dinode_to_disk(dip, &ip->i_d);
  3129. /* Wrap, we never let the log put out DI_MAX_FLUSH */
  3130. if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
  3131. ip->i_d.di_flushiter = 0;
  3132. xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
  3133. if (XFS_IFORK_Q(ip))
  3134. xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
  3135. xfs_inobp_check(mp, bp);
  3136. /*
  3137. * We've recorded everything logged in the inode, so we'd like to clear
  3138. * the ili_fields bits so we don't log and flush things unnecessarily.
  3139. * However, we can't stop logging all this information until the data
  3140. * we've copied into the disk buffer is written to disk. If we did we
  3141. * might overwrite the copy of the inode in the log with all the data
  3142. * after re-logging only part of it, and in the face of a crash we
  3143. * wouldn't have all the data we need to recover.
  3144. *
  3145. * What we do is move the bits to the ili_last_fields field. When
  3146. * logging the inode, these bits are moved back to the ili_fields field.
  3147. * In the xfs_iflush_done() routine we clear ili_last_fields, since we
  3148. * know that the information those bits represent is permanently on
  3149. * disk. As long as the flush completes before the inode is logged
  3150. * again, then both ili_fields and ili_last_fields will be cleared.
  3151. *
  3152. * We can play with the ili_fields bits here, because the inode lock
  3153. * must be held exclusively in order to set bits there and the flush
  3154. * lock protects the ili_last_fields bits. Set ili_logged so the flush
  3155. * done routine can tell whether or not to look in the AIL. Also, store
  3156. * the current LSN of the inode so that we can tell whether the item has
  3157. * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
  3158. * need the AIL lock, because it is a 64 bit value that cannot be read
  3159. * atomically.
  3160. */
  3161. iip->ili_last_fields = iip->ili_fields;
  3162. iip->ili_fields = 0;
  3163. iip->ili_fsync_fields = 0;
  3164. iip->ili_logged = 1;
  3165. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  3166. &iip->ili_item.li_lsn);
  3167. /*
  3168. * Attach the function xfs_iflush_done to the inode's
  3169. * buffer. This will remove the inode from the AIL
  3170. * and unlock the inode's flush lock when the inode is
  3171. * completely written to disk.
  3172. */
  3173. xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
  3174. /* update the lsn in the on disk inode if required */
  3175. if (ip->i_d.di_version == 3)
  3176. dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
  3177. /* generate the checksum. */
  3178. xfs_dinode_calc_crc(mp, dip);
  3179. ASSERT(bp->b_fspriv != NULL);
  3180. ASSERT(bp->b_iodone != NULL);
  3181. return 0;
  3182. corrupt_out:
  3183. return -EFSCORRUPTED;
  3184. }