xfs_da_btree.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * Copyright (c) 2013 Red Hat, Inc.
  4. * All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it would be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write the Free Software Foundation,
  17. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_shared.h"
  22. #include "xfs_format.h"
  23. #include "xfs_log_format.h"
  24. #include "xfs_trans_resv.h"
  25. #include "xfs_bit.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_da_format.h"
  28. #include "xfs_da_btree.h"
  29. #include "xfs_dir2.h"
  30. #include "xfs_dir2_priv.h"
  31. #include "xfs_inode.h"
  32. #include "xfs_trans.h"
  33. #include "xfs_inode_item.h"
  34. #include "xfs_alloc.h"
  35. #include "xfs_bmap.h"
  36. #include "xfs_attr.h"
  37. #include "xfs_attr_leaf.h"
  38. #include "xfs_error.h"
  39. #include "xfs_trace.h"
  40. #include "xfs_cksum.h"
  41. #include "xfs_buf_item.h"
  42. #include "xfs_log.h"
  43. /*
  44. * xfs_da_btree.c
  45. *
  46. * Routines to implement directories as Btrees of hashed names.
  47. */
  48. /*========================================================================
  49. * Function prototypes for the kernel.
  50. *========================================================================*/
  51. /*
  52. * Routines used for growing the Btree.
  53. */
  54. STATIC int xfs_da3_root_split(xfs_da_state_t *state,
  55. xfs_da_state_blk_t *existing_root,
  56. xfs_da_state_blk_t *new_child);
  57. STATIC int xfs_da3_node_split(xfs_da_state_t *state,
  58. xfs_da_state_blk_t *existing_blk,
  59. xfs_da_state_blk_t *split_blk,
  60. xfs_da_state_blk_t *blk_to_add,
  61. int treelevel,
  62. int *result);
  63. STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
  64. xfs_da_state_blk_t *node_blk_1,
  65. xfs_da_state_blk_t *node_blk_2);
  66. STATIC void xfs_da3_node_add(xfs_da_state_t *state,
  67. xfs_da_state_blk_t *old_node_blk,
  68. xfs_da_state_blk_t *new_node_blk);
  69. /*
  70. * Routines used for shrinking the Btree.
  71. */
  72. STATIC int xfs_da3_root_join(xfs_da_state_t *state,
  73. xfs_da_state_blk_t *root_blk);
  74. STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
  75. STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
  76. xfs_da_state_blk_t *drop_blk);
  77. STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
  78. xfs_da_state_blk_t *src_node_blk,
  79. xfs_da_state_blk_t *dst_node_blk);
  80. /*
  81. * Utility routines.
  82. */
  83. STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
  84. xfs_da_state_blk_t *drop_blk,
  85. xfs_da_state_blk_t *save_blk);
  86. kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
  87. /*
  88. * Allocate a dir-state structure.
  89. * We don't put them on the stack since they're large.
  90. */
  91. xfs_da_state_t *
  92. xfs_da_state_alloc(void)
  93. {
  94. return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
  95. }
  96. /*
  97. * Kill the altpath contents of a da-state structure.
  98. */
  99. STATIC void
  100. xfs_da_state_kill_altpath(xfs_da_state_t *state)
  101. {
  102. int i;
  103. for (i = 0; i < state->altpath.active; i++)
  104. state->altpath.blk[i].bp = NULL;
  105. state->altpath.active = 0;
  106. }
  107. /*
  108. * Free a da-state structure.
  109. */
  110. void
  111. xfs_da_state_free(xfs_da_state_t *state)
  112. {
  113. xfs_da_state_kill_altpath(state);
  114. #ifdef DEBUG
  115. memset((char *)state, 0, sizeof(*state));
  116. #endif /* DEBUG */
  117. kmem_zone_free(xfs_da_state_zone, state);
  118. }
  119. static bool
  120. xfs_da3_node_verify(
  121. struct xfs_buf *bp)
  122. {
  123. struct xfs_mount *mp = bp->b_target->bt_mount;
  124. struct xfs_da_intnode *hdr = bp->b_addr;
  125. struct xfs_da3_icnode_hdr ichdr;
  126. const struct xfs_dir_ops *ops;
  127. ops = xfs_dir_get_ops(mp, NULL);
  128. ops->node_hdr_from_disk(&ichdr, hdr);
  129. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  130. struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
  131. if (ichdr.magic != XFS_DA3_NODE_MAGIC)
  132. return false;
  133. if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
  134. return false;
  135. if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
  136. return false;
  137. if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
  138. return false;
  139. } else {
  140. if (ichdr.magic != XFS_DA_NODE_MAGIC)
  141. return false;
  142. }
  143. if (ichdr.level == 0)
  144. return false;
  145. if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
  146. return false;
  147. if (ichdr.count == 0)
  148. return false;
  149. /*
  150. * we don't know if the node is for and attribute or directory tree,
  151. * so only fail if the count is outside both bounds
  152. */
  153. if (ichdr.count > mp->m_dir_geo->node_ents &&
  154. ichdr.count > mp->m_attr_geo->node_ents)
  155. return false;
  156. /* XXX: hash order check? */
  157. return true;
  158. }
  159. static void
  160. xfs_da3_node_write_verify(
  161. struct xfs_buf *bp)
  162. {
  163. struct xfs_mount *mp = bp->b_target->bt_mount;
  164. struct xfs_buf_log_item *bip = bp->b_fspriv;
  165. struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
  166. if (!xfs_da3_node_verify(bp)) {
  167. xfs_buf_ioerror(bp, -EFSCORRUPTED);
  168. xfs_verifier_error(bp);
  169. return;
  170. }
  171. if (!xfs_sb_version_hascrc(&mp->m_sb))
  172. return;
  173. if (bip)
  174. hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
  175. xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
  176. }
  177. /*
  178. * leaf/node format detection on trees is sketchy, so a node read can be done on
  179. * leaf level blocks when detection identifies the tree as a node format tree
  180. * incorrectly. In this case, we need to swap the verifier to match the correct
  181. * format of the block being read.
  182. */
  183. static void
  184. xfs_da3_node_read_verify(
  185. struct xfs_buf *bp)
  186. {
  187. struct xfs_da_blkinfo *info = bp->b_addr;
  188. switch (be16_to_cpu(info->magic)) {
  189. case XFS_DA3_NODE_MAGIC:
  190. if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
  191. xfs_buf_ioerror(bp, -EFSBADCRC);
  192. break;
  193. }
  194. /* fall through */
  195. case XFS_DA_NODE_MAGIC:
  196. if (!xfs_da3_node_verify(bp)) {
  197. xfs_buf_ioerror(bp, -EFSCORRUPTED);
  198. break;
  199. }
  200. return;
  201. case XFS_ATTR_LEAF_MAGIC:
  202. case XFS_ATTR3_LEAF_MAGIC:
  203. bp->b_ops = &xfs_attr3_leaf_buf_ops;
  204. bp->b_ops->verify_read(bp);
  205. return;
  206. case XFS_DIR2_LEAFN_MAGIC:
  207. case XFS_DIR3_LEAFN_MAGIC:
  208. bp->b_ops = &xfs_dir3_leafn_buf_ops;
  209. bp->b_ops->verify_read(bp);
  210. return;
  211. default:
  212. xfs_buf_ioerror(bp, -EFSCORRUPTED);
  213. break;
  214. }
  215. /* corrupt block */
  216. xfs_verifier_error(bp);
  217. }
  218. const struct xfs_buf_ops xfs_da3_node_buf_ops = {
  219. .name = "xfs_da3_node",
  220. .verify_read = xfs_da3_node_read_verify,
  221. .verify_write = xfs_da3_node_write_verify,
  222. };
  223. int
  224. xfs_da3_node_read(
  225. struct xfs_trans *tp,
  226. struct xfs_inode *dp,
  227. xfs_dablk_t bno,
  228. xfs_daddr_t mappedbno,
  229. struct xfs_buf **bpp,
  230. int which_fork)
  231. {
  232. int err;
  233. err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
  234. which_fork, &xfs_da3_node_buf_ops);
  235. if (!err && tp) {
  236. struct xfs_da_blkinfo *info = (*bpp)->b_addr;
  237. int type;
  238. switch (be16_to_cpu(info->magic)) {
  239. case XFS_DA_NODE_MAGIC:
  240. case XFS_DA3_NODE_MAGIC:
  241. type = XFS_BLFT_DA_NODE_BUF;
  242. break;
  243. case XFS_ATTR_LEAF_MAGIC:
  244. case XFS_ATTR3_LEAF_MAGIC:
  245. type = XFS_BLFT_ATTR_LEAF_BUF;
  246. break;
  247. case XFS_DIR2_LEAFN_MAGIC:
  248. case XFS_DIR3_LEAFN_MAGIC:
  249. type = XFS_BLFT_DIR_LEAFN_BUF;
  250. break;
  251. default:
  252. type = 0;
  253. ASSERT(0);
  254. break;
  255. }
  256. xfs_trans_buf_set_type(tp, *bpp, type);
  257. }
  258. return err;
  259. }
  260. /*========================================================================
  261. * Routines used for growing the Btree.
  262. *========================================================================*/
  263. /*
  264. * Create the initial contents of an intermediate node.
  265. */
  266. int
  267. xfs_da3_node_create(
  268. struct xfs_da_args *args,
  269. xfs_dablk_t blkno,
  270. int level,
  271. struct xfs_buf **bpp,
  272. int whichfork)
  273. {
  274. struct xfs_da_intnode *node;
  275. struct xfs_trans *tp = args->trans;
  276. struct xfs_mount *mp = tp->t_mountp;
  277. struct xfs_da3_icnode_hdr ichdr = {0};
  278. struct xfs_buf *bp;
  279. int error;
  280. struct xfs_inode *dp = args->dp;
  281. trace_xfs_da_node_create(args);
  282. ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
  283. error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
  284. if (error)
  285. return error;
  286. bp->b_ops = &xfs_da3_node_buf_ops;
  287. xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
  288. node = bp->b_addr;
  289. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  290. struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
  291. memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
  292. ichdr.magic = XFS_DA3_NODE_MAGIC;
  293. hdr3->info.blkno = cpu_to_be64(bp->b_bn);
  294. hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
  295. uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
  296. } else {
  297. ichdr.magic = XFS_DA_NODE_MAGIC;
  298. }
  299. ichdr.level = level;
  300. dp->d_ops->node_hdr_to_disk(node, &ichdr);
  301. xfs_trans_log_buf(tp, bp,
  302. XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
  303. *bpp = bp;
  304. return 0;
  305. }
  306. /*
  307. * Split a leaf node, rebalance, then possibly split
  308. * intermediate nodes, rebalance, etc.
  309. */
  310. int /* error */
  311. xfs_da3_split(
  312. struct xfs_da_state *state)
  313. {
  314. struct xfs_da_state_blk *oldblk;
  315. struct xfs_da_state_blk *newblk;
  316. struct xfs_da_state_blk *addblk;
  317. struct xfs_da_intnode *node;
  318. struct xfs_buf *bp;
  319. int max;
  320. int action = 0;
  321. int error;
  322. int i;
  323. trace_xfs_da_split(state->args);
  324. /*
  325. * Walk back up the tree splitting/inserting/adjusting as necessary.
  326. * If we need to insert and there isn't room, split the node, then
  327. * decide which fragment to insert the new block from below into.
  328. * Note that we may split the root this way, but we need more fixup.
  329. */
  330. max = state->path.active - 1;
  331. ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
  332. ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
  333. state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
  334. addblk = &state->path.blk[max]; /* initial dummy value */
  335. for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
  336. oldblk = &state->path.blk[i];
  337. newblk = &state->altpath.blk[i];
  338. /*
  339. * If a leaf node then
  340. * Allocate a new leaf node, then rebalance across them.
  341. * else if an intermediate node then
  342. * We split on the last layer, must we split the node?
  343. */
  344. switch (oldblk->magic) {
  345. case XFS_ATTR_LEAF_MAGIC:
  346. error = xfs_attr3_leaf_split(state, oldblk, newblk);
  347. if ((error != 0) && (error != -ENOSPC)) {
  348. return error; /* GROT: attr is inconsistent */
  349. }
  350. if (!error) {
  351. addblk = newblk;
  352. break;
  353. }
  354. /*
  355. * Entry wouldn't fit, split the leaf again.
  356. */
  357. state->extravalid = 1;
  358. if (state->inleaf) {
  359. state->extraafter = 0; /* before newblk */
  360. trace_xfs_attr_leaf_split_before(state->args);
  361. error = xfs_attr3_leaf_split(state, oldblk,
  362. &state->extrablk);
  363. } else {
  364. state->extraafter = 1; /* after newblk */
  365. trace_xfs_attr_leaf_split_after(state->args);
  366. error = xfs_attr3_leaf_split(state, newblk,
  367. &state->extrablk);
  368. }
  369. if (error)
  370. return error; /* GROT: attr inconsistent */
  371. addblk = newblk;
  372. break;
  373. case XFS_DIR2_LEAFN_MAGIC:
  374. error = xfs_dir2_leafn_split(state, oldblk, newblk);
  375. if (error)
  376. return error;
  377. addblk = newblk;
  378. break;
  379. case XFS_DA_NODE_MAGIC:
  380. error = xfs_da3_node_split(state, oldblk, newblk, addblk,
  381. max - i, &action);
  382. addblk->bp = NULL;
  383. if (error)
  384. return error; /* GROT: dir is inconsistent */
  385. /*
  386. * Record the newly split block for the next time thru?
  387. */
  388. if (action)
  389. addblk = newblk;
  390. else
  391. addblk = NULL;
  392. break;
  393. }
  394. /*
  395. * Update the btree to show the new hashval for this child.
  396. */
  397. xfs_da3_fixhashpath(state, &state->path);
  398. }
  399. if (!addblk)
  400. return 0;
  401. /*
  402. * Split the root node.
  403. */
  404. ASSERT(state->path.active == 0);
  405. oldblk = &state->path.blk[0];
  406. error = xfs_da3_root_split(state, oldblk, addblk);
  407. if (error) {
  408. addblk->bp = NULL;
  409. return error; /* GROT: dir is inconsistent */
  410. }
  411. /*
  412. * Update pointers to the node which used to be block 0 and
  413. * just got bumped because of the addition of a new root node.
  414. * There might be three blocks involved if a double split occurred,
  415. * and the original block 0 could be at any position in the list.
  416. *
  417. * Note: the magic numbers and sibling pointers are in the same
  418. * physical place for both v2 and v3 headers (by design). Hence it
  419. * doesn't matter which version of the xfs_da_intnode structure we use
  420. * here as the result will be the same using either structure.
  421. */
  422. node = oldblk->bp->b_addr;
  423. if (node->hdr.info.forw) {
  424. if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
  425. bp = addblk->bp;
  426. } else {
  427. ASSERT(state->extravalid);
  428. bp = state->extrablk.bp;
  429. }
  430. node = bp->b_addr;
  431. node->hdr.info.back = cpu_to_be32(oldblk->blkno);
  432. xfs_trans_log_buf(state->args->trans, bp,
  433. XFS_DA_LOGRANGE(node, &node->hdr.info,
  434. sizeof(node->hdr.info)));
  435. }
  436. node = oldblk->bp->b_addr;
  437. if (node->hdr.info.back) {
  438. if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
  439. bp = addblk->bp;
  440. } else {
  441. ASSERT(state->extravalid);
  442. bp = state->extrablk.bp;
  443. }
  444. node = bp->b_addr;
  445. node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
  446. xfs_trans_log_buf(state->args->trans, bp,
  447. XFS_DA_LOGRANGE(node, &node->hdr.info,
  448. sizeof(node->hdr.info)));
  449. }
  450. addblk->bp = NULL;
  451. return 0;
  452. }
  453. /*
  454. * Split the root. We have to create a new root and point to the two
  455. * parts (the split old root) that we just created. Copy block zero to
  456. * the EOF, extending the inode in process.
  457. */
  458. STATIC int /* error */
  459. xfs_da3_root_split(
  460. struct xfs_da_state *state,
  461. struct xfs_da_state_blk *blk1,
  462. struct xfs_da_state_blk *blk2)
  463. {
  464. struct xfs_da_intnode *node;
  465. struct xfs_da_intnode *oldroot;
  466. struct xfs_da_node_entry *btree;
  467. struct xfs_da3_icnode_hdr nodehdr;
  468. struct xfs_da_args *args;
  469. struct xfs_buf *bp;
  470. struct xfs_inode *dp;
  471. struct xfs_trans *tp;
  472. struct xfs_dir2_leaf *leaf;
  473. xfs_dablk_t blkno;
  474. int level;
  475. int error;
  476. int size;
  477. trace_xfs_da_root_split(state->args);
  478. /*
  479. * Copy the existing (incorrect) block from the root node position
  480. * to a free space somewhere.
  481. */
  482. args = state->args;
  483. error = xfs_da_grow_inode(args, &blkno);
  484. if (error)
  485. return error;
  486. dp = args->dp;
  487. tp = args->trans;
  488. error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
  489. if (error)
  490. return error;
  491. node = bp->b_addr;
  492. oldroot = blk1->bp->b_addr;
  493. if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
  494. oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
  495. struct xfs_da3_icnode_hdr icnodehdr;
  496. dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
  497. btree = dp->d_ops->node_tree_p(oldroot);
  498. size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
  499. level = icnodehdr.level;
  500. /*
  501. * we are about to copy oldroot to bp, so set up the type
  502. * of bp while we know exactly what it will be.
  503. */
  504. xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
  505. } else {
  506. struct xfs_dir3_icleaf_hdr leafhdr;
  507. struct xfs_dir2_leaf_entry *ents;
  508. leaf = (xfs_dir2_leaf_t *)oldroot;
  509. dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
  510. ents = dp->d_ops->leaf_ents_p(leaf);
  511. ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
  512. leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
  513. size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
  514. level = 0;
  515. /*
  516. * we are about to copy oldroot to bp, so set up the type
  517. * of bp while we know exactly what it will be.
  518. */
  519. xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
  520. }
  521. /*
  522. * we can copy most of the information in the node from one block to
  523. * another, but for CRC enabled headers we have to make sure that the
  524. * block specific identifiers are kept intact. We update the buffer
  525. * directly for this.
  526. */
  527. memcpy(node, oldroot, size);
  528. if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
  529. oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
  530. struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
  531. node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
  532. }
  533. xfs_trans_log_buf(tp, bp, 0, size - 1);
  534. bp->b_ops = blk1->bp->b_ops;
  535. xfs_trans_buf_copy_type(bp, blk1->bp);
  536. blk1->bp = bp;
  537. blk1->blkno = blkno;
  538. /*
  539. * Set up the new root node.
  540. */
  541. error = xfs_da3_node_create(args,
  542. (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
  543. level + 1, &bp, args->whichfork);
  544. if (error)
  545. return error;
  546. node = bp->b_addr;
  547. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  548. btree = dp->d_ops->node_tree_p(node);
  549. btree[0].hashval = cpu_to_be32(blk1->hashval);
  550. btree[0].before = cpu_to_be32(blk1->blkno);
  551. btree[1].hashval = cpu_to_be32(blk2->hashval);
  552. btree[1].before = cpu_to_be32(blk2->blkno);
  553. nodehdr.count = 2;
  554. dp->d_ops->node_hdr_to_disk(node, &nodehdr);
  555. #ifdef DEBUG
  556. if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  557. oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
  558. ASSERT(blk1->blkno >= args->geo->leafblk &&
  559. blk1->blkno < args->geo->freeblk);
  560. ASSERT(blk2->blkno >= args->geo->leafblk &&
  561. blk2->blkno < args->geo->freeblk);
  562. }
  563. #endif
  564. /* Header is already logged by xfs_da_node_create */
  565. xfs_trans_log_buf(tp, bp,
  566. XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
  567. return 0;
  568. }
  569. /*
  570. * Split the node, rebalance, then add the new entry.
  571. */
  572. STATIC int /* error */
  573. xfs_da3_node_split(
  574. struct xfs_da_state *state,
  575. struct xfs_da_state_blk *oldblk,
  576. struct xfs_da_state_blk *newblk,
  577. struct xfs_da_state_blk *addblk,
  578. int treelevel,
  579. int *result)
  580. {
  581. struct xfs_da_intnode *node;
  582. struct xfs_da3_icnode_hdr nodehdr;
  583. xfs_dablk_t blkno;
  584. int newcount;
  585. int error;
  586. int useextra;
  587. struct xfs_inode *dp = state->args->dp;
  588. trace_xfs_da_node_split(state->args);
  589. node = oldblk->bp->b_addr;
  590. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  591. /*
  592. * With V2 dirs the extra block is data or freespace.
  593. */
  594. useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
  595. newcount = 1 + useextra;
  596. /*
  597. * Do we have to split the node?
  598. */
  599. if (nodehdr.count + newcount > state->args->geo->node_ents) {
  600. /*
  601. * Allocate a new node, add to the doubly linked chain of
  602. * nodes, then move some of our excess entries into it.
  603. */
  604. error = xfs_da_grow_inode(state->args, &blkno);
  605. if (error)
  606. return error; /* GROT: dir is inconsistent */
  607. error = xfs_da3_node_create(state->args, blkno, treelevel,
  608. &newblk->bp, state->args->whichfork);
  609. if (error)
  610. return error; /* GROT: dir is inconsistent */
  611. newblk->blkno = blkno;
  612. newblk->magic = XFS_DA_NODE_MAGIC;
  613. xfs_da3_node_rebalance(state, oldblk, newblk);
  614. error = xfs_da3_blk_link(state, oldblk, newblk);
  615. if (error)
  616. return error;
  617. *result = 1;
  618. } else {
  619. *result = 0;
  620. }
  621. /*
  622. * Insert the new entry(s) into the correct block
  623. * (updating last hashval in the process).
  624. *
  625. * xfs_da3_node_add() inserts BEFORE the given index,
  626. * and as a result of using node_lookup_int() we always
  627. * point to a valid entry (not after one), but a split
  628. * operation always results in a new block whose hashvals
  629. * FOLLOW the current block.
  630. *
  631. * If we had double-split op below us, then add the extra block too.
  632. */
  633. node = oldblk->bp->b_addr;
  634. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  635. if (oldblk->index <= nodehdr.count) {
  636. oldblk->index++;
  637. xfs_da3_node_add(state, oldblk, addblk);
  638. if (useextra) {
  639. if (state->extraafter)
  640. oldblk->index++;
  641. xfs_da3_node_add(state, oldblk, &state->extrablk);
  642. state->extravalid = 0;
  643. }
  644. } else {
  645. newblk->index++;
  646. xfs_da3_node_add(state, newblk, addblk);
  647. if (useextra) {
  648. if (state->extraafter)
  649. newblk->index++;
  650. xfs_da3_node_add(state, newblk, &state->extrablk);
  651. state->extravalid = 0;
  652. }
  653. }
  654. return 0;
  655. }
  656. /*
  657. * Balance the btree elements between two intermediate nodes,
  658. * usually one full and one empty.
  659. *
  660. * NOTE: if blk2 is empty, then it will get the upper half of blk1.
  661. */
  662. STATIC void
  663. xfs_da3_node_rebalance(
  664. struct xfs_da_state *state,
  665. struct xfs_da_state_blk *blk1,
  666. struct xfs_da_state_blk *blk2)
  667. {
  668. struct xfs_da_intnode *node1;
  669. struct xfs_da_intnode *node2;
  670. struct xfs_da_intnode *tmpnode;
  671. struct xfs_da_node_entry *btree1;
  672. struct xfs_da_node_entry *btree2;
  673. struct xfs_da_node_entry *btree_s;
  674. struct xfs_da_node_entry *btree_d;
  675. struct xfs_da3_icnode_hdr nodehdr1;
  676. struct xfs_da3_icnode_hdr nodehdr2;
  677. struct xfs_trans *tp;
  678. int count;
  679. int tmp;
  680. int swap = 0;
  681. struct xfs_inode *dp = state->args->dp;
  682. trace_xfs_da_node_rebalance(state->args);
  683. node1 = blk1->bp->b_addr;
  684. node2 = blk2->bp->b_addr;
  685. dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
  686. dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
  687. btree1 = dp->d_ops->node_tree_p(node1);
  688. btree2 = dp->d_ops->node_tree_p(node2);
  689. /*
  690. * Figure out how many entries need to move, and in which direction.
  691. * Swap the nodes around if that makes it simpler.
  692. */
  693. if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
  694. ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
  695. (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
  696. be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
  697. tmpnode = node1;
  698. node1 = node2;
  699. node2 = tmpnode;
  700. dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
  701. dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
  702. btree1 = dp->d_ops->node_tree_p(node1);
  703. btree2 = dp->d_ops->node_tree_p(node2);
  704. swap = 1;
  705. }
  706. count = (nodehdr1.count - nodehdr2.count) / 2;
  707. if (count == 0)
  708. return;
  709. tp = state->args->trans;
  710. /*
  711. * Two cases: high-to-low and low-to-high.
  712. */
  713. if (count > 0) {
  714. /*
  715. * Move elements in node2 up to make a hole.
  716. */
  717. tmp = nodehdr2.count;
  718. if (tmp > 0) {
  719. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  720. btree_s = &btree2[0];
  721. btree_d = &btree2[count];
  722. memmove(btree_d, btree_s, tmp);
  723. }
  724. /*
  725. * Move the req'd B-tree elements from high in node1 to
  726. * low in node2.
  727. */
  728. nodehdr2.count += count;
  729. tmp = count * (uint)sizeof(xfs_da_node_entry_t);
  730. btree_s = &btree1[nodehdr1.count - count];
  731. btree_d = &btree2[0];
  732. memcpy(btree_d, btree_s, tmp);
  733. nodehdr1.count -= count;
  734. } else {
  735. /*
  736. * Move the req'd B-tree elements from low in node2 to
  737. * high in node1.
  738. */
  739. count = -count;
  740. tmp = count * (uint)sizeof(xfs_da_node_entry_t);
  741. btree_s = &btree2[0];
  742. btree_d = &btree1[nodehdr1.count];
  743. memcpy(btree_d, btree_s, tmp);
  744. nodehdr1.count += count;
  745. xfs_trans_log_buf(tp, blk1->bp,
  746. XFS_DA_LOGRANGE(node1, btree_d, tmp));
  747. /*
  748. * Move elements in node2 down to fill the hole.
  749. */
  750. tmp = nodehdr2.count - count;
  751. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  752. btree_s = &btree2[count];
  753. btree_d = &btree2[0];
  754. memmove(btree_d, btree_s, tmp);
  755. nodehdr2.count -= count;
  756. }
  757. /*
  758. * Log header of node 1 and all current bits of node 2.
  759. */
  760. dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
  761. xfs_trans_log_buf(tp, blk1->bp,
  762. XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
  763. dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
  764. xfs_trans_log_buf(tp, blk2->bp,
  765. XFS_DA_LOGRANGE(node2, &node2->hdr,
  766. dp->d_ops->node_hdr_size +
  767. (sizeof(btree2[0]) * nodehdr2.count)));
  768. /*
  769. * Record the last hashval from each block for upward propagation.
  770. * (note: don't use the swapped node pointers)
  771. */
  772. if (swap) {
  773. node1 = blk1->bp->b_addr;
  774. node2 = blk2->bp->b_addr;
  775. dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
  776. dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
  777. btree1 = dp->d_ops->node_tree_p(node1);
  778. btree2 = dp->d_ops->node_tree_p(node2);
  779. }
  780. blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
  781. blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
  782. /*
  783. * Adjust the expected index for insertion.
  784. */
  785. if (blk1->index >= nodehdr1.count) {
  786. blk2->index = blk1->index - nodehdr1.count;
  787. blk1->index = nodehdr1.count + 1; /* make it invalid */
  788. }
  789. }
  790. /*
  791. * Add a new entry to an intermediate node.
  792. */
  793. STATIC void
  794. xfs_da3_node_add(
  795. struct xfs_da_state *state,
  796. struct xfs_da_state_blk *oldblk,
  797. struct xfs_da_state_blk *newblk)
  798. {
  799. struct xfs_da_intnode *node;
  800. struct xfs_da3_icnode_hdr nodehdr;
  801. struct xfs_da_node_entry *btree;
  802. int tmp;
  803. struct xfs_inode *dp = state->args->dp;
  804. trace_xfs_da_node_add(state->args);
  805. node = oldblk->bp->b_addr;
  806. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  807. btree = dp->d_ops->node_tree_p(node);
  808. ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
  809. ASSERT(newblk->blkno != 0);
  810. if (state->args->whichfork == XFS_DATA_FORK)
  811. ASSERT(newblk->blkno >= state->args->geo->leafblk &&
  812. newblk->blkno < state->args->geo->freeblk);
  813. /*
  814. * We may need to make some room before we insert the new node.
  815. */
  816. tmp = 0;
  817. if (oldblk->index < nodehdr.count) {
  818. tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
  819. memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
  820. }
  821. btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
  822. btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
  823. xfs_trans_log_buf(state->args->trans, oldblk->bp,
  824. XFS_DA_LOGRANGE(node, &btree[oldblk->index],
  825. tmp + sizeof(*btree)));
  826. nodehdr.count += 1;
  827. dp->d_ops->node_hdr_to_disk(node, &nodehdr);
  828. xfs_trans_log_buf(state->args->trans, oldblk->bp,
  829. XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
  830. /*
  831. * Copy the last hash value from the oldblk to propagate upwards.
  832. */
  833. oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
  834. }
  835. /*========================================================================
  836. * Routines used for shrinking the Btree.
  837. *========================================================================*/
  838. /*
  839. * Deallocate an empty leaf node, remove it from its parent,
  840. * possibly deallocating that block, etc...
  841. */
  842. int
  843. xfs_da3_join(
  844. struct xfs_da_state *state)
  845. {
  846. struct xfs_da_state_blk *drop_blk;
  847. struct xfs_da_state_blk *save_blk;
  848. int action = 0;
  849. int error;
  850. trace_xfs_da_join(state->args);
  851. drop_blk = &state->path.blk[ state->path.active-1 ];
  852. save_blk = &state->altpath.blk[ state->path.active-1 ];
  853. ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
  854. ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
  855. drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
  856. /*
  857. * Walk back up the tree joining/deallocating as necessary.
  858. * When we stop dropping blocks, break out.
  859. */
  860. for ( ; state->path.active >= 2; drop_blk--, save_blk--,
  861. state->path.active--) {
  862. /*
  863. * See if we can combine the block with a neighbor.
  864. * (action == 0) => no options, just leave
  865. * (action == 1) => coalesce, then unlink
  866. * (action == 2) => block empty, unlink it
  867. */
  868. switch (drop_blk->magic) {
  869. case XFS_ATTR_LEAF_MAGIC:
  870. error = xfs_attr3_leaf_toosmall(state, &action);
  871. if (error)
  872. return error;
  873. if (action == 0)
  874. return 0;
  875. xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
  876. break;
  877. case XFS_DIR2_LEAFN_MAGIC:
  878. error = xfs_dir2_leafn_toosmall(state, &action);
  879. if (error)
  880. return error;
  881. if (action == 0)
  882. return 0;
  883. xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
  884. break;
  885. case XFS_DA_NODE_MAGIC:
  886. /*
  887. * Remove the offending node, fixup hashvals,
  888. * check for a toosmall neighbor.
  889. */
  890. xfs_da3_node_remove(state, drop_blk);
  891. xfs_da3_fixhashpath(state, &state->path);
  892. error = xfs_da3_node_toosmall(state, &action);
  893. if (error)
  894. return error;
  895. if (action == 0)
  896. return 0;
  897. xfs_da3_node_unbalance(state, drop_blk, save_blk);
  898. break;
  899. }
  900. xfs_da3_fixhashpath(state, &state->altpath);
  901. error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
  902. xfs_da_state_kill_altpath(state);
  903. if (error)
  904. return error;
  905. error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
  906. drop_blk->bp);
  907. drop_blk->bp = NULL;
  908. if (error)
  909. return error;
  910. }
  911. /*
  912. * We joined all the way to the top. If it turns out that
  913. * we only have one entry in the root, make the child block
  914. * the new root.
  915. */
  916. xfs_da3_node_remove(state, drop_blk);
  917. xfs_da3_fixhashpath(state, &state->path);
  918. error = xfs_da3_root_join(state, &state->path.blk[0]);
  919. return error;
  920. }
  921. #ifdef DEBUG
  922. static void
  923. xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
  924. {
  925. __be16 magic = blkinfo->magic;
  926. if (level == 1) {
  927. ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  928. magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
  929. magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
  930. magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
  931. } else {
  932. ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
  933. magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
  934. }
  935. ASSERT(!blkinfo->forw);
  936. ASSERT(!blkinfo->back);
  937. }
  938. #else /* !DEBUG */
  939. #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
  940. #endif /* !DEBUG */
  941. /*
  942. * We have only one entry in the root. Copy the only remaining child of
  943. * the old root to block 0 as the new root node.
  944. */
  945. STATIC int
  946. xfs_da3_root_join(
  947. struct xfs_da_state *state,
  948. struct xfs_da_state_blk *root_blk)
  949. {
  950. struct xfs_da_intnode *oldroot;
  951. struct xfs_da_args *args;
  952. xfs_dablk_t child;
  953. struct xfs_buf *bp;
  954. struct xfs_da3_icnode_hdr oldroothdr;
  955. struct xfs_da_node_entry *btree;
  956. int error;
  957. struct xfs_inode *dp = state->args->dp;
  958. trace_xfs_da_root_join(state->args);
  959. ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
  960. args = state->args;
  961. oldroot = root_blk->bp->b_addr;
  962. dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
  963. ASSERT(oldroothdr.forw == 0);
  964. ASSERT(oldroothdr.back == 0);
  965. /*
  966. * If the root has more than one child, then don't do anything.
  967. */
  968. if (oldroothdr.count > 1)
  969. return 0;
  970. /*
  971. * Read in the (only) child block, then copy those bytes into
  972. * the root block's buffer and free the original child block.
  973. */
  974. btree = dp->d_ops->node_tree_p(oldroot);
  975. child = be32_to_cpu(btree[0].before);
  976. ASSERT(child != 0);
  977. error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
  978. args->whichfork);
  979. if (error)
  980. return error;
  981. xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
  982. /*
  983. * This could be copying a leaf back into the root block in the case of
  984. * there only being a single leaf block left in the tree. Hence we have
  985. * to update the b_ops pointer as well to match the buffer type change
  986. * that could occur. For dir3 blocks we also need to update the block
  987. * number in the buffer header.
  988. */
  989. memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
  990. root_blk->bp->b_ops = bp->b_ops;
  991. xfs_trans_buf_copy_type(root_blk->bp, bp);
  992. if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
  993. struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
  994. da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
  995. }
  996. xfs_trans_log_buf(args->trans, root_blk->bp, 0,
  997. args->geo->blksize - 1);
  998. error = xfs_da_shrink_inode(args, child, bp);
  999. return error;
  1000. }
  1001. /*
  1002. * Check a node block and its neighbors to see if the block should be
  1003. * collapsed into one or the other neighbor. Always keep the block
  1004. * with the smaller block number.
  1005. * If the current block is over 50% full, don't try to join it, return 0.
  1006. * If the block is empty, fill in the state structure and return 2.
  1007. * If it can be collapsed, fill in the state structure and return 1.
  1008. * If nothing can be done, return 0.
  1009. */
  1010. STATIC int
  1011. xfs_da3_node_toosmall(
  1012. struct xfs_da_state *state,
  1013. int *action)
  1014. {
  1015. struct xfs_da_intnode *node;
  1016. struct xfs_da_state_blk *blk;
  1017. struct xfs_da_blkinfo *info;
  1018. xfs_dablk_t blkno;
  1019. struct xfs_buf *bp;
  1020. struct xfs_da3_icnode_hdr nodehdr;
  1021. int count;
  1022. int forward;
  1023. int error;
  1024. int retval;
  1025. int i;
  1026. struct xfs_inode *dp = state->args->dp;
  1027. trace_xfs_da_node_toosmall(state->args);
  1028. /*
  1029. * Check for the degenerate case of the block being over 50% full.
  1030. * If so, it's not worth even looking to see if we might be able
  1031. * to coalesce with a sibling.
  1032. */
  1033. blk = &state->path.blk[ state->path.active-1 ];
  1034. info = blk->bp->b_addr;
  1035. node = (xfs_da_intnode_t *)info;
  1036. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  1037. if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
  1038. *action = 0; /* blk over 50%, don't try to join */
  1039. return 0; /* blk over 50%, don't try to join */
  1040. }
  1041. /*
  1042. * Check for the degenerate case of the block being empty.
  1043. * If the block is empty, we'll simply delete it, no need to
  1044. * coalesce it with a sibling block. We choose (arbitrarily)
  1045. * to merge with the forward block unless it is NULL.
  1046. */
  1047. if (nodehdr.count == 0) {
  1048. /*
  1049. * Make altpath point to the block we want to keep and
  1050. * path point to the block we want to drop (this one).
  1051. */
  1052. forward = (info->forw != 0);
  1053. memcpy(&state->altpath, &state->path, sizeof(state->path));
  1054. error = xfs_da3_path_shift(state, &state->altpath, forward,
  1055. 0, &retval);
  1056. if (error)
  1057. return error;
  1058. if (retval) {
  1059. *action = 0;
  1060. } else {
  1061. *action = 2;
  1062. }
  1063. return 0;
  1064. }
  1065. /*
  1066. * Examine each sibling block to see if we can coalesce with
  1067. * at least 25% free space to spare. We need to figure out
  1068. * whether to merge with the forward or the backward block.
  1069. * We prefer coalescing with the lower numbered sibling so as
  1070. * to shrink a directory over time.
  1071. */
  1072. count = state->args->geo->node_ents;
  1073. count -= state->args->geo->node_ents >> 2;
  1074. count -= nodehdr.count;
  1075. /* start with smaller blk num */
  1076. forward = nodehdr.forw < nodehdr.back;
  1077. for (i = 0; i < 2; forward = !forward, i++) {
  1078. struct xfs_da3_icnode_hdr thdr;
  1079. if (forward)
  1080. blkno = nodehdr.forw;
  1081. else
  1082. blkno = nodehdr.back;
  1083. if (blkno == 0)
  1084. continue;
  1085. error = xfs_da3_node_read(state->args->trans, dp,
  1086. blkno, -1, &bp, state->args->whichfork);
  1087. if (error)
  1088. return error;
  1089. node = bp->b_addr;
  1090. dp->d_ops->node_hdr_from_disk(&thdr, node);
  1091. xfs_trans_brelse(state->args->trans, bp);
  1092. if (count - thdr.count >= 0)
  1093. break; /* fits with at least 25% to spare */
  1094. }
  1095. if (i >= 2) {
  1096. *action = 0;
  1097. return 0;
  1098. }
  1099. /*
  1100. * Make altpath point to the block we want to keep (the lower
  1101. * numbered block) and path point to the block we want to drop.
  1102. */
  1103. memcpy(&state->altpath, &state->path, sizeof(state->path));
  1104. if (blkno < blk->blkno) {
  1105. error = xfs_da3_path_shift(state, &state->altpath, forward,
  1106. 0, &retval);
  1107. } else {
  1108. error = xfs_da3_path_shift(state, &state->path, forward,
  1109. 0, &retval);
  1110. }
  1111. if (error)
  1112. return error;
  1113. if (retval) {
  1114. *action = 0;
  1115. return 0;
  1116. }
  1117. *action = 1;
  1118. return 0;
  1119. }
  1120. /*
  1121. * Pick up the last hashvalue from an intermediate node.
  1122. */
  1123. STATIC uint
  1124. xfs_da3_node_lasthash(
  1125. struct xfs_inode *dp,
  1126. struct xfs_buf *bp,
  1127. int *count)
  1128. {
  1129. struct xfs_da_intnode *node;
  1130. struct xfs_da_node_entry *btree;
  1131. struct xfs_da3_icnode_hdr nodehdr;
  1132. node = bp->b_addr;
  1133. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  1134. if (count)
  1135. *count = nodehdr.count;
  1136. if (!nodehdr.count)
  1137. return 0;
  1138. btree = dp->d_ops->node_tree_p(node);
  1139. return be32_to_cpu(btree[nodehdr.count - 1].hashval);
  1140. }
  1141. /*
  1142. * Walk back up the tree adjusting hash values as necessary,
  1143. * when we stop making changes, return.
  1144. */
  1145. void
  1146. xfs_da3_fixhashpath(
  1147. struct xfs_da_state *state,
  1148. struct xfs_da_state_path *path)
  1149. {
  1150. struct xfs_da_state_blk *blk;
  1151. struct xfs_da_intnode *node;
  1152. struct xfs_da_node_entry *btree;
  1153. xfs_dahash_t lasthash=0;
  1154. int level;
  1155. int count;
  1156. struct xfs_inode *dp = state->args->dp;
  1157. trace_xfs_da_fixhashpath(state->args);
  1158. level = path->active-1;
  1159. blk = &path->blk[ level ];
  1160. switch (blk->magic) {
  1161. case XFS_ATTR_LEAF_MAGIC:
  1162. lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
  1163. if (count == 0)
  1164. return;
  1165. break;
  1166. case XFS_DIR2_LEAFN_MAGIC:
  1167. lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
  1168. if (count == 0)
  1169. return;
  1170. break;
  1171. case XFS_DA_NODE_MAGIC:
  1172. lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
  1173. if (count == 0)
  1174. return;
  1175. break;
  1176. }
  1177. for (blk--, level--; level >= 0; blk--, level--) {
  1178. struct xfs_da3_icnode_hdr nodehdr;
  1179. node = blk->bp->b_addr;
  1180. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  1181. btree = dp->d_ops->node_tree_p(node);
  1182. if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
  1183. break;
  1184. blk->hashval = lasthash;
  1185. btree[blk->index].hashval = cpu_to_be32(lasthash);
  1186. xfs_trans_log_buf(state->args->trans, blk->bp,
  1187. XFS_DA_LOGRANGE(node, &btree[blk->index],
  1188. sizeof(*btree)));
  1189. lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
  1190. }
  1191. }
  1192. /*
  1193. * Remove an entry from an intermediate node.
  1194. */
  1195. STATIC void
  1196. xfs_da3_node_remove(
  1197. struct xfs_da_state *state,
  1198. struct xfs_da_state_blk *drop_blk)
  1199. {
  1200. struct xfs_da_intnode *node;
  1201. struct xfs_da3_icnode_hdr nodehdr;
  1202. struct xfs_da_node_entry *btree;
  1203. int index;
  1204. int tmp;
  1205. struct xfs_inode *dp = state->args->dp;
  1206. trace_xfs_da_node_remove(state->args);
  1207. node = drop_blk->bp->b_addr;
  1208. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  1209. ASSERT(drop_blk->index < nodehdr.count);
  1210. ASSERT(drop_blk->index >= 0);
  1211. /*
  1212. * Copy over the offending entry, or just zero it out.
  1213. */
  1214. index = drop_blk->index;
  1215. btree = dp->d_ops->node_tree_p(node);
  1216. if (index < nodehdr.count - 1) {
  1217. tmp = nodehdr.count - index - 1;
  1218. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  1219. memmove(&btree[index], &btree[index + 1], tmp);
  1220. xfs_trans_log_buf(state->args->trans, drop_blk->bp,
  1221. XFS_DA_LOGRANGE(node, &btree[index], tmp));
  1222. index = nodehdr.count - 1;
  1223. }
  1224. memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
  1225. xfs_trans_log_buf(state->args->trans, drop_blk->bp,
  1226. XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
  1227. nodehdr.count -= 1;
  1228. dp->d_ops->node_hdr_to_disk(node, &nodehdr);
  1229. xfs_trans_log_buf(state->args->trans, drop_blk->bp,
  1230. XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
  1231. /*
  1232. * Copy the last hash value from the block to propagate upwards.
  1233. */
  1234. drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
  1235. }
  1236. /*
  1237. * Unbalance the elements between two intermediate nodes,
  1238. * move all Btree elements from one node into another.
  1239. */
  1240. STATIC void
  1241. xfs_da3_node_unbalance(
  1242. struct xfs_da_state *state,
  1243. struct xfs_da_state_blk *drop_blk,
  1244. struct xfs_da_state_blk *save_blk)
  1245. {
  1246. struct xfs_da_intnode *drop_node;
  1247. struct xfs_da_intnode *save_node;
  1248. struct xfs_da_node_entry *drop_btree;
  1249. struct xfs_da_node_entry *save_btree;
  1250. struct xfs_da3_icnode_hdr drop_hdr;
  1251. struct xfs_da3_icnode_hdr save_hdr;
  1252. struct xfs_trans *tp;
  1253. int sindex;
  1254. int tmp;
  1255. struct xfs_inode *dp = state->args->dp;
  1256. trace_xfs_da_node_unbalance(state->args);
  1257. drop_node = drop_blk->bp->b_addr;
  1258. save_node = save_blk->bp->b_addr;
  1259. dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
  1260. dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
  1261. drop_btree = dp->d_ops->node_tree_p(drop_node);
  1262. save_btree = dp->d_ops->node_tree_p(save_node);
  1263. tp = state->args->trans;
  1264. /*
  1265. * If the dying block has lower hashvals, then move all the
  1266. * elements in the remaining block up to make a hole.
  1267. */
  1268. if ((be32_to_cpu(drop_btree[0].hashval) <
  1269. be32_to_cpu(save_btree[0].hashval)) ||
  1270. (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
  1271. be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
  1272. /* XXX: check this - is memmove dst correct? */
  1273. tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
  1274. memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
  1275. sindex = 0;
  1276. xfs_trans_log_buf(tp, save_blk->bp,
  1277. XFS_DA_LOGRANGE(save_node, &save_btree[0],
  1278. (save_hdr.count + drop_hdr.count) *
  1279. sizeof(xfs_da_node_entry_t)));
  1280. } else {
  1281. sindex = save_hdr.count;
  1282. xfs_trans_log_buf(tp, save_blk->bp,
  1283. XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
  1284. drop_hdr.count * sizeof(xfs_da_node_entry_t)));
  1285. }
  1286. /*
  1287. * Move all the B-tree elements from drop_blk to save_blk.
  1288. */
  1289. tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
  1290. memcpy(&save_btree[sindex], &drop_btree[0], tmp);
  1291. save_hdr.count += drop_hdr.count;
  1292. dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
  1293. xfs_trans_log_buf(tp, save_blk->bp,
  1294. XFS_DA_LOGRANGE(save_node, &save_node->hdr,
  1295. dp->d_ops->node_hdr_size));
  1296. /*
  1297. * Save the last hashval in the remaining block for upward propagation.
  1298. */
  1299. save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
  1300. }
  1301. /*========================================================================
  1302. * Routines used for finding things in the Btree.
  1303. *========================================================================*/
  1304. /*
  1305. * Walk down the Btree looking for a particular filename, filling
  1306. * in the state structure as we go.
  1307. *
  1308. * We will set the state structure to point to each of the elements
  1309. * in each of the nodes where either the hashval is or should be.
  1310. *
  1311. * We support duplicate hashval's so for each entry in the current
  1312. * node that could contain the desired hashval, descend. This is a
  1313. * pruned depth-first tree search.
  1314. */
  1315. int /* error */
  1316. xfs_da3_node_lookup_int(
  1317. struct xfs_da_state *state,
  1318. int *result)
  1319. {
  1320. struct xfs_da_state_blk *blk;
  1321. struct xfs_da_blkinfo *curr;
  1322. struct xfs_da_intnode *node;
  1323. struct xfs_da_node_entry *btree;
  1324. struct xfs_da3_icnode_hdr nodehdr;
  1325. struct xfs_da_args *args;
  1326. xfs_dablk_t blkno;
  1327. xfs_dahash_t hashval;
  1328. xfs_dahash_t btreehashval;
  1329. int probe;
  1330. int span;
  1331. int max;
  1332. int error;
  1333. int retval;
  1334. struct xfs_inode *dp = state->args->dp;
  1335. args = state->args;
  1336. /*
  1337. * Descend thru the B-tree searching each level for the right
  1338. * node to use, until the right hashval is found.
  1339. */
  1340. blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0;
  1341. for (blk = &state->path.blk[0], state->path.active = 1;
  1342. state->path.active <= XFS_DA_NODE_MAXDEPTH;
  1343. blk++, state->path.active++) {
  1344. /*
  1345. * Read the next node down in the tree.
  1346. */
  1347. blk->blkno = blkno;
  1348. error = xfs_da3_node_read(args->trans, args->dp, blkno,
  1349. -1, &blk->bp, args->whichfork);
  1350. if (error) {
  1351. blk->blkno = 0;
  1352. state->path.active--;
  1353. return error;
  1354. }
  1355. curr = blk->bp->b_addr;
  1356. blk->magic = be16_to_cpu(curr->magic);
  1357. if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
  1358. blk->magic == XFS_ATTR3_LEAF_MAGIC) {
  1359. blk->magic = XFS_ATTR_LEAF_MAGIC;
  1360. blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
  1361. break;
  1362. }
  1363. if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1364. blk->magic == XFS_DIR3_LEAFN_MAGIC) {
  1365. blk->magic = XFS_DIR2_LEAFN_MAGIC;
  1366. blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
  1367. blk->bp, NULL);
  1368. break;
  1369. }
  1370. blk->magic = XFS_DA_NODE_MAGIC;
  1371. /*
  1372. * Search an intermediate node for a match.
  1373. */
  1374. node = blk->bp->b_addr;
  1375. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  1376. btree = dp->d_ops->node_tree_p(node);
  1377. max = nodehdr.count;
  1378. blk->hashval = be32_to_cpu(btree[max - 1].hashval);
  1379. /*
  1380. * Binary search. (note: small blocks will skip loop)
  1381. */
  1382. probe = span = max / 2;
  1383. hashval = args->hashval;
  1384. while (span > 4) {
  1385. span /= 2;
  1386. btreehashval = be32_to_cpu(btree[probe].hashval);
  1387. if (btreehashval < hashval)
  1388. probe += span;
  1389. else if (btreehashval > hashval)
  1390. probe -= span;
  1391. else
  1392. break;
  1393. }
  1394. ASSERT((probe >= 0) && (probe < max));
  1395. ASSERT((span <= 4) ||
  1396. (be32_to_cpu(btree[probe].hashval) == hashval));
  1397. /*
  1398. * Since we may have duplicate hashval's, find the first
  1399. * matching hashval in the node.
  1400. */
  1401. while (probe > 0 &&
  1402. be32_to_cpu(btree[probe].hashval) >= hashval) {
  1403. probe--;
  1404. }
  1405. while (probe < max &&
  1406. be32_to_cpu(btree[probe].hashval) < hashval) {
  1407. probe++;
  1408. }
  1409. /*
  1410. * Pick the right block to descend on.
  1411. */
  1412. if (probe == max) {
  1413. blk->index = max - 1;
  1414. blkno = be32_to_cpu(btree[max - 1].before);
  1415. } else {
  1416. blk->index = probe;
  1417. blkno = be32_to_cpu(btree[probe].before);
  1418. }
  1419. }
  1420. /*
  1421. * A leaf block that ends in the hashval that we are interested in
  1422. * (final hashval == search hashval) means that the next block may
  1423. * contain more entries with the same hashval, shift upward to the
  1424. * next leaf and keep searching.
  1425. */
  1426. for (;;) {
  1427. if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
  1428. retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
  1429. &blk->index, state);
  1430. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1431. retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
  1432. blk->index = args->index;
  1433. args->blkno = blk->blkno;
  1434. } else {
  1435. ASSERT(0);
  1436. return -EFSCORRUPTED;
  1437. }
  1438. if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
  1439. (blk->hashval == args->hashval)) {
  1440. error = xfs_da3_path_shift(state, &state->path, 1, 1,
  1441. &retval);
  1442. if (error)
  1443. return error;
  1444. if (retval == 0) {
  1445. continue;
  1446. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1447. /* path_shift() gives ENOENT */
  1448. retval = -ENOATTR;
  1449. }
  1450. }
  1451. break;
  1452. }
  1453. *result = retval;
  1454. return 0;
  1455. }
  1456. /*========================================================================
  1457. * Utility routines.
  1458. *========================================================================*/
  1459. /*
  1460. * Compare two intermediate nodes for "order".
  1461. */
  1462. STATIC int
  1463. xfs_da3_node_order(
  1464. struct xfs_inode *dp,
  1465. struct xfs_buf *node1_bp,
  1466. struct xfs_buf *node2_bp)
  1467. {
  1468. struct xfs_da_intnode *node1;
  1469. struct xfs_da_intnode *node2;
  1470. struct xfs_da_node_entry *btree1;
  1471. struct xfs_da_node_entry *btree2;
  1472. struct xfs_da3_icnode_hdr node1hdr;
  1473. struct xfs_da3_icnode_hdr node2hdr;
  1474. node1 = node1_bp->b_addr;
  1475. node2 = node2_bp->b_addr;
  1476. dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
  1477. dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
  1478. btree1 = dp->d_ops->node_tree_p(node1);
  1479. btree2 = dp->d_ops->node_tree_p(node2);
  1480. if (node1hdr.count > 0 && node2hdr.count > 0 &&
  1481. ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
  1482. (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
  1483. be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
  1484. return 1;
  1485. }
  1486. return 0;
  1487. }
  1488. /*
  1489. * Link a new block into a doubly linked list of blocks (of whatever type).
  1490. */
  1491. int /* error */
  1492. xfs_da3_blk_link(
  1493. struct xfs_da_state *state,
  1494. struct xfs_da_state_blk *old_blk,
  1495. struct xfs_da_state_blk *new_blk)
  1496. {
  1497. struct xfs_da_blkinfo *old_info;
  1498. struct xfs_da_blkinfo *new_info;
  1499. struct xfs_da_blkinfo *tmp_info;
  1500. struct xfs_da_args *args;
  1501. struct xfs_buf *bp;
  1502. int before = 0;
  1503. int error;
  1504. struct xfs_inode *dp = state->args->dp;
  1505. /*
  1506. * Set up environment.
  1507. */
  1508. args = state->args;
  1509. ASSERT(args != NULL);
  1510. old_info = old_blk->bp->b_addr;
  1511. new_info = new_blk->bp->b_addr;
  1512. ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
  1513. old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1514. old_blk->magic == XFS_ATTR_LEAF_MAGIC);
  1515. switch (old_blk->magic) {
  1516. case XFS_ATTR_LEAF_MAGIC:
  1517. before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
  1518. break;
  1519. case XFS_DIR2_LEAFN_MAGIC:
  1520. before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
  1521. break;
  1522. case XFS_DA_NODE_MAGIC:
  1523. before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
  1524. break;
  1525. }
  1526. /*
  1527. * Link blocks in appropriate order.
  1528. */
  1529. if (before) {
  1530. /*
  1531. * Link new block in before existing block.
  1532. */
  1533. trace_xfs_da_link_before(args);
  1534. new_info->forw = cpu_to_be32(old_blk->blkno);
  1535. new_info->back = old_info->back;
  1536. if (old_info->back) {
  1537. error = xfs_da3_node_read(args->trans, dp,
  1538. be32_to_cpu(old_info->back),
  1539. -1, &bp, args->whichfork);
  1540. if (error)
  1541. return error;
  1542. ASSERT(bp != NULL);
  1543. tmp_info = bp->b_addr;
  1544. ASSERT(tmp_info->magic == old_info->magic);
  1545. ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
  1546. tmp_info->forw = cpu_to_be32(new_blk->blkno);
  1547. xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
  1548. }
  1549. old_info->back = cpu_to_be32(new_blk->blkno);
  1550. } else {
  1551. /*
  1552. * Link new block in after existing block.
  1553. */
  1554. trace_xfs_da_link_after(args);
  1555. new_info->forw = old_info->forw;
  1556. new_info->back = cpu_to_be32(old_blk->blkno);
  1557. if (old_info->forw) {
  1558. error = xfs_da3_node_read(args->trans, dp,
  1559. be32_to_cpu(old_info->forw),
  1560. -1, &bp, args->whichfork);
  1561. if (error)
  1562. return error;
  1563. ASSERT(bp != NULL);
  1564. tmp_info = bp->b_addr;
  1565. ASSERT(tmp_info->magic == old_info->magic);
  1566. ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
  1567. tmp_info->back = cpu_to_be32(new_blk->blkno);
  1568. xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
  1569. }
  1570. old_info->forw = cpu_to_be32(new_blk->blkno);
  1571. }
  1572. xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
  1573. xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
  1574. return 0;
  1575. }
  1576. /*
  1577. * Unlink a block from a doubly linked list of blocks.
  1578. */
  1579. STATIC int /* error */
  1580. xfs_da3_blk_unlink(
  1581. struct xfs_da_state *state,
  1582. struct xfs_da_state_blk *drop_blk,
  1583. struct xfs_da_state_blk *save_blk)
  1584. {
  1585. struct xfs_da_blkinfo *drop_info;
  1586. struct xfs_da_blkinfo *save_info;
  1587. struct xfs_da_blkinfo *tmp_info;
  1588. struct xfs_da_args *args;
  1589. struct xfs_buf *bp;
  1590. int error;
  1591. /*
  1592. * Set up environment.
  1593. */
  1594. args = state->args;
  1595. ASSERT(args != NULL);
  1596. save_info = save_blk->bp->b_addr;
  1597. drop_info = drop_blk->bp->b_addr;
  1598. ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
  1599. save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1600. save_blk->magic == XFS_ATTR_LEAF_MAGIC);
  1601. ASSERT(save_blk->magic == drop_blk->magic);
  1602. ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
  1603. (be32_to_cpu(save_info->back) == drop_blk->blkno));
  1604. ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
  1605. (be32_to_cpu(drop_info->back) == save_blk->blkno));
  1606. /*
  1607. * Unlink the leaf block from the doubly linked chain of leaves.
  1608. */
  1609. if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
  1610. trace_xfs_da_unlink_back(args);
  1611. save_info->back = drop_info->back;
  1612. if (drop_info->back) {
  1613. error = xfs_da3_node_read(args->trans, args->dp,
  1614. be32_to_cpu(drop_info->back),
  1615. -1, &bp, args->whichfork);
  1616. if (error)
  1617. return error;
  1618. ASSERT(bp != NULL);
  1619. tmp_info = bp->b_addr;
  1620. ASSERT(tmp_info->magic == save_info->magic);
  1621. ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
  1622. tmp_info->forw = cpu_to_be32(save_blk->blkno);
  1623. xfs_trans_log_buf(args->trans, bp, 0,
  1624. sizeof(*tmp_info) - 1);
  1625. }
  1626. } else {
  1627. trace_xfs_da_unlink_forward(args);
  1628. save_info->forw = drop_info->forw;
  1629. if (drop_info->forw) {
  1630. error = xfs_da3_node_read(args->trans, args->dp,
  1631. be32_to_cpu(drop_info->forw),
  1632. -1, &bp, args->whichfork);
  1633. if (error)
  1634. return error;
  1635. ASSERT(bp != NULL);
  1636. tmp_info = bp->b_addr;
  1637. ASSERT(tmp_info->magic == save_info->magic);
  1638. ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
  1639. tmp_info->back = cpu_to_be32(save_blk->blkno);
  1640. xfs_trans_log_buf(args->trans, bp, 0,
  1641. sizeof(*tmp_info) - 1);
  1642. }
  1643. }
  1644. xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
  1645. return 0;
  1646. }
  1647. /*
  1648. * Move a path "forward" or "!forward" one block at the current level.
  1649. *
  1650. * This routine will adjust a "path" to point to the next block
  1651. * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
  1652. * Btree, including updating pointers to the intermediate nodes between
  1653. * the new bottom and the root.
  1654. */
  1655. int /* error */
  1656. xfs_da3_path_shift(
  1657. struct xfs_da_state *state,
  1658. struct xfs_da_state_path *path,
  1659. int forward,
  1660. int release,
  1661. int *result)
  1662. {
  1663. struct xfs_da_state_blk *blk;
  1664. struct xfs_da_blkinfo *info;
  1665. struct xfs_da_intnode *node;
  1666. struct xfs_da_args *args;
  1667. struct xfs_da_node_entry *btree;
  1668. struct xfs_da3_icnode_hdr nodehdr;
  1669. struct xfs_buf *bp;
  1670. xfs_dablk_t blkno = 0;
  1671. int level;
  1672. int error;
  1673. struct xfs_inode *dp = state->args->dp;
  1674. trace_xfs_da_path_shift(state->args);
  1675. /*
  1676. * Roll up the Btree looking for the first block where our
  1677. * current index is not at the edge of the block. Note that
  1678. * we skip the bottom layer because we want the sibling block.
  1679. */
  1680. args = state->args;
  1681. ASSERT(args != NULL);
  1682. ASSERT(path != NULL);
  1683. ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
  1684. level = (path->active-1) - 1; /* skip bottom layer in path */
  1685. for (blk = &path->blk[level]; level >= 0; blk--, level--) {
  1686. node = blk->bp->b_addr;
  1687. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  1688. btree = dp->d_ops->node_tree_p(node);
  1689. if (forward && (blk->index < nodehdr.count - 1)) {
  1690. blk->index++;
  1691. blkno = be32_to_cpu(btree[blk->index].before);
  1692. break;
  1693. } else if (!forward && (blk->index > 0)) {
  1694. blk->index--;
  1695. blkno = be32_to_cpu(btree[blk->index].before);
  1696. break;
  1697. }
  1698. }
  1699. if (level < 0) {
  1700. *result = -ENOENT; /* we're out of our tree */
  1701. ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
  1702. return 0;
  1703. }
  1704. /*
  1705. * Roll down the edge of the subtree until we reach the
  1706. * same depth we were at originally.
  1707. */
  1708. for (blk++, level++; level < path->active; blk++, level++) {
  1709. /*
  1710. * Read the next child block into a local buffer.
  1711. */
  1712. error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
  1713. args->whichfork);
  1714. if (error)
  1715. return error;
  1716. /*
  1717. * Release the old block (if it's dirty, the trans doesn't
  1718. * actually let go) and swap the local buffer into the path
  1719. * structure. This ensures failure of the above read doesn't set
  1720. * a NULL buffer in an active slot in the path.
  1721. */
  1722. if (release)
  1723. xfs_trans_brelse(args->trans, blk->bp);
  1724. blk->blkno = blkno;
  1725. blk->bp = bp;
  1726. info = blk->bp->b_addr;
  1727. ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
  1728. info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
  1729. info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  1730. info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
  1731. info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
  1732. info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
  1733. /*
  1734. * Note: we flatten the magic number to a single type so we
  1735. * don't have to compare against crc/non-crc types elsewhere.
  1736. */
  1737. switch (be16_to_cpu(info->magic)) {
  1738. case XFS_DA_NODE_MAGIC:
  1739. case XFS_DA3_NODE_MAGIC:
  1740. blk->magic = XFS_DA_NODE_MAGIC;
  1741. node = (xfs_da_intnode_t *)info;
  1742. dp->d_ops->node_hdr_from_disk(&nodehdr, node);
  1743. btree = dp->d_ops->node_tree_p(node);
  1744. blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
  1745. if (forward)
  1746. blk->index = 0;
  1747. else
  1748. blk->index = nodehdr.count - 1;
  1749. blkno = be32_to_cpu(btree[blk->index].before);
  1750. break;
  1751. case XFS_ATTR_LEAF_MAGIC:
  1752. case XFS_ATTR3_LEAF_MAGIC:
  1753. blk->magic = XFS_ATTR_LEAF_MAGIC;
  1754. ASSERT(level == path->active-1);
  1755. blk->index = 0;
  1756. blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
  1757. break;
  1758. case XFS_DIR2_LEAFN_MAGIC:
  1759. case XFS_DIR3_LEAFN_MAGIC:
  1760. blk->magic = XFS_DIR2_LEAFN_MAGIC;
  1761. ASSERT(level == path->active-1);
  1762. blk->index = 0;
  1763. blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
  1764. blk->bp, NULL);
  1765. break;
  1766. default:
  1767. ASSERT(0);
  1768. break;
  1769. }
  1770. }
  1771. *result = 0;
  1772. return 0;
  1773. }
  1774. /*========================================================================
  1775. * Utility routines.
  1776. *========================================================================*/
  1777. /*
  1778. * Implement a simple hash on a character string.
  1779. * Rotate the hash value by 7 bits, then XOR each character in.
  1780. * This is implemented with some source-level loop unrolling.
  1781. */
  1782. xfs_dahash_t
  1783. xfs_da_hashname(const __uint8_t *name, int namelen)
  1784. {
  1785. xfs_dahash_t hash;
  1786. /*
  1787. * Do four characters at a time as long as we can.
  1788. */
  1789. for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
  1790. hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
  1791. (name[3] << 0) ^ rol32(hash, 7 * 4);
  1792. /*
  1793. * Now do the rest of the characters.
  1794. */
  1795. switch (namelen) {
  1796. case 3:
  1797. return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
  1798. rol32(hash, 7 * 3);
  1799. case 2:
  1800. return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
  1801. case 1:
  1802. return (name[0] << 0) ^ rol32(hash, 7 * 1);
  1803. default: /* case 0: */
  1804. return hash;
  1805. }
  1806. }
  1807. enum xfs_dacmp
  1808. xfs_da_compname(
  1809. struct xfs_da_args *args,
  1810. const unsigned char *name,
  1811. int len)
  1812. {
  1813. return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
  1814. XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
  1815. }
  1816. static xfs_dahash_t
  1817. xfs_default_hashname(
  1818. struct xfs_name *name)
  1819. {
  1820. return xfs_da_hashname(name->name, name->len);
  1821. }
  1822. const struct xfs_nameops xfs_default_nameops = {
  1823. .hashname = xfs_default_hashname,
  1824. .compname = xfs_da_compname
  1825. };
  1826. int
  1827. xfs_da_grow_inode_int(
  1828. struct xfs_da_args *args,
  1829. xfs_fileoff_t *bno,
  1830. int count)
  1831. {
  1832. struct xfs_trans *tp = args->trans;
  1833. struct xfs_inode *dp = args->dp;
  1834. int w = args->whichfork;
  1835. xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
  1836. struct xfs_bmbt_irec map, *mapp;
  1837. int nmap, error, got, i, mapi;
  1838. /*
  1839. * Find a spot in the file space to put the new block.
  1840. */
  1841. error = xfs_bmap_first_unused(tp, dp, count, bno, w);
  1842. if (error)
  1843. return error;
  1844. /*
  1845. * Try mapping it in one filesystem block.
  1846. */
  1847. nmap = 1;
  1848. ASSERT(args->firstblock != NULL);
  1849. error = xfs_bmapi_write(tp, dp, *bno, count,
  1850. xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
  1851. args->firstblock, args->total, &map, &nmap,
  1852. args->flist);
  1853. if (error)
  1854. return error;
  1855. ASSERT(nmap <= 1);
  1856. if (nmap == 1) {
  1857. mapp = &map;
  1858. mapi = 1;
  1859. } else if (nmap == 0 && count > 1) {
  1860. xfs_fileoff_t b;
  1861. int c;
  1862. /*
  1863. * If we didn't get it and the block might work if fragmented,
  1864. * try without the CONTIG flag. Loop until we get it all.
  1865. */
  1866. mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
  1867. for (b = *bno, mapi = 0; b < *bno + count; ) {
  1868. nmap = MIN(XFS_BMAP_MAX_NMAP, count);
  1869. c = (int)(*bno + count - b);
  1870. error = xfs_bmapi_write(tp, dp, b, c,
  1871. xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
  1872. args->firstblock, args->total,
  1873. &mapp[mapi], &nmap, args->flist);
  1874. if (error)
  1875. goto out_free_map;
  1876. if (nmap < 1)
  1877. break;
  1878. mapi += nmap;
  1879. b = mapp[mapi - 1].br_startoff +
  1880. mapp[mapi - 1].br_blockcount;
  1881. }
  1882. } else {
  1883. mapi = 0;
  1884. mapp = NULL;
  1885. }
  1886. /*
  1887. * Count the blocks we got, make sure it matches the total.
  1888. */
  1889. for (i = 0, got = 0; i < mapi; i++)
  1890. got += mapp[i].br_blockcount;
  1891. if (got != count || mapp[0].br_startoff != *bno ||
  1892. mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
  1893. *bno + count) {
  1894. error = -ENOSPC;
  1895. goto out_free_map;
  1896. }
  1897. /* account for newly allocated blocks in reserved blocks total */
  1898. args->total -= dp->i_d.di_nblocks - nblks;
  1899. out_free_map:
  1900. if (mapp != &map)
  1901. kmem_free(mapp);
  1902. return error;
  1903. }
  1904. /*
  1905. * Add a block to the btree ahead of the file.
  1906. * Return the new block number to the caller.
  1907. */
  1908. int
  1909. xfs_da_grow_inode(
  1910. struct xfs_da_args *args,
  1911. xfs_dablk_t *new_blkno)
  1912. {
  1913. xfs_fileoff_t bno;
  1914. int error;
  1915. trace_xfs_da_grow_inode(args);
  1916. bno = args->geo->leafblk;
  1917. error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
  1918. if (!error)
  1919. *new_blkno = (xfs_dablk_t)bno;
  1920. return error;
  1921. }
  1922. /*
  1923. * Ick. We need to always be able to remove a btree block, even
  1924. * if there's no space reservation because the filesystem is full.
  1925. * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
  1926. * It swaps the target block with the last block in the file. The
  1927. * last block in the file can always be removed since it can't cause
  1928. * a bmap btree split to do that.
  1929. */
  1930. STATIC int
  1931. xfs_da3_swap_lastblock(
  1932. struct xfs_da_args *args,
  1933. xfs_dablk_t *dead_blknop,
  1934. struct xfs_buf **dead_bufp)
  1935. {
  1936. struct xfs_da_blkinfo *dead_info;
  1937. struct xfs_da_blkinfo *sib_info;
  1938. struct xfs_da_intnode *par_node;
  1939. struct xfs_da_intnode *dead_node;
  1940. struct xfs_dir2_leaf *dead_leaf2;
  1941. struct xfs_da_node_entry *btree;
  1942. struct xfs_da3_icnode_hdr par_hdr;
  1943. struct xfs_inode *dp;
  1944. struct xfs_trans *tp;
  1945. struct xfs_mount *mp;
  1946. struct xfs_buf *dead_buf;
  1947. struct xfs_buf *last_buf;
  1948. struct xfs_buf *sib_buf;
  1949. struct xfs_buf *par_buf;
  1950. xfs_dahash_t dead_hash;
  1951. xfs_fileoff_t lastoff;
  1952. xfs_dablk_t dead_blkno;
  1953. xfs_dablk_t last_blkno;
  1954. xfs_dablk_t sib_blkno;
  1955. xfs_dablk_t par_blkno;
  1956. int error;
  1957. int w;
  1958. int entno;
  1959. int level;
  1960. int dead_level;
  1961. trace_xfs_da_swap_lastblock(args);
  1962. dead_buf = *dead_bufp;
  1963. dead_blkno = *dead_blknop;
  1964. tp = args->trans;
  1965. dp = args->dp;
  1966. w = args->whichfork;
  1967. ASSERT(w == XFS_DATA_FORK);
  1968. mp = dp->i_mount;
  1969. lastoff = args->geo->freeblk;
  1970. error = xfs_bmap_last_before(tp, dp, &lastoff, w);
  1971. if (error)
  1972. return error;
  1973. if (unlikely(lastoff == 0)) {
  1974. XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
  1975. mp);
  1976. return -EFSCORRUPTED;
  1977. }
  1978. /*
  1979. * Read the last block in the btree space.
  1980. */
  1981. last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
  1982. error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
  1983. if (error)
  1984. return error;
  1985. /*
  1986. * Copy the last block into the dead buffer and log it.
  1987. */
  1988. memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
  1989. xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
  1990. dead_info = dead_buf->b_addr;
  1991. /*
  1992. * Get values from the moved block.
  1993. */
  1994. if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  1995. dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
  1996. struct xfs_dir3_icleaf_hdr leafhdr;
  1997. struct xfs_dir2_leaf_entry *ents;
  1998. dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
  1999. dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
  2000. ents = dp->d_ops->leaf_ents_p(dead_leaf2);
  2001. dead_level = 0;
  2002. dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
  2003. } else {
  2004. struct xfs_da3_icnode_hdr deadhdr;
  2005. dead_node = (xfs_da_intnode_t *)dead_info;
  2006. dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
  2007. btree = dp->d_ops->node_tree_p(dead_node);
  2008. dead_level = deadhdr.level;
  2009. dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
  2010. }
  2011. sib_buf = par_buf = NULL;
  2012. /*
  2013. * If the moved block has a left sibling, fix up the pointers.
  2014. */
  2015. if ((sib_blkno = be32_to_cpu(dead_info->back))) {
  2016. error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
  2017. if (error)
  2018. goto done;
  2019. sib_info = sib_buf->b_addr;
  2020. if (unlikely(
  2021. be32_to_cpu(sib_info->forw) != last_blkno ||
  2022. sib_info->magic != dead_info->magic)) {
  2023. XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
  2024. XFS_ERRLEVEL_LOW, mp);
  2025. error = -EFSCORRUPTED;
  2026. goto done;
  2027. }
  2028. sib_info->forw = cpu_to_be32(dead_blkno);
  2029. xfs_trans_log_buf(tp, sib_buf,
  2030. XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
  2031. sizeof(sib_info->forw)));
  2032. sib_buf = NULL;
  2033. }
  2034. /*
  2035. * If the moved block has a right sibling, fix up the pointers.
  2036. */
  2037. if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
  2038. error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
  2039. if (error)
  2040. goto done;
  2041. sib_info = sib_buf->b_addr;
  2042. if (unlikely(
  2043. be32_to_cpu(sib_info->back) != last_blkno ||
  2044. sib_info->magic != dead_info->magic)) {
  2045. XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
  2046. XFS_ERRLEVEL_LOW, mp);
  2047. error = -EFSCORRUPTED;
  2048. goto done;
  2049. }
  2050. sib_info->back = cpu_to_be32(dead_blkno);
  2051. xfs_trans_log_buf(tp, sib_buf,
  2052. XFS_DA_LOGRANGE(sib_info, &sib_info->back,
  2053. sizeof(sib_info->back)));
  2054. sib_buf = NULL;
  2055. }
  2056. par_blkno = args->geo->leafblk;
  2057. level = -1;
  2058. /*
  2059. * Walk down the tree looking for the parent of the moved block.
  2060. */
  2061. for (;;) {
  2062. error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
  2063. if (error)
  2064. goto done;
  2065. par_node = par_buf->b_addr;
  2066. dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
  2067. if (level >= 0 && level != par_hdr.level + 1) {
  2068. XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
  2069. XFS_ERRLEVEL_LOW, mp);
  2070. error = -EFSCORRUPTED;
  2071. goto done;
  2072. }
  2073. level = par_hdr.level;
  2074. btree = dp->d_ops->node_tree_p(par_node);
  2075. for (entno = 0;
  2076. entno < par_hdr.count &&
  2077. be32_to_cpu(btree[entno].hashval) < dead_hash;
  2078. entno++)
  2079. continue;
  2080. if (entno == par_hdr.count) {
  2081. XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
  2082. XFS_ERRLEVEL_LOW, mp);
  2083. error = -EFSCORRUPTED;
  2084. goto done;
  2085. }
  2086. par_blkno = be32_to_cpu(btree[entno].before);
  2087. if (level == dead_level + 1)
  2088. break;
  2089. xfs_trans_brelse(tp, par_buf);
  2090. par_buf = NULL;
  2091. }
  2092. /*
  2093. * We're in the right parent block.
  2094. * Look for the right entry.
  2095. */
  2096. for (;;) {
  2097. for (;
  2098. entno < par_hdr.count &&
  2099. be32_to_cpu(btree[entno].before) != last_blkno;
  2100. entno++)
  2101. continue;
  2102. if (entno < par_hdr.count)
  2103. break;
  2104. par_blkno = par_hdr.forw;
  2105. xfs_trans_brelse(tp, par_buf);
  2106. par_buf = NULL;
  2107. if (unlikely(par_blkno == 0)) {
  2108. XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
  2109. XFS_ERRLEVEL_LOW, mp);
  2110. error = -EFSCORRUPTED;
  2111. goto done;
  2112. }
  2113. error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
  2114. if (error)
  2115. goto done;
  2116. par_node = par_buf->b_addr;
  2117. dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
  2118. if (par_hdr.level != level) {
  2119. XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
  2120. XFS_ERRLEVEL_LOW, mp);
  2121. error = -EFSCORRUPTED;
  2122. goto done;
  2123. }
  2124. btree = dp->d_ops->node_tree_p(par_node);
  2125. entno = 0;
  2126. }
  2127. /*
  2128. * Update the parent entry pointing to the moved block.
  2129. */
  2130. btree[entno].before = cpu_to_be32(dead_blkno);
  2131. xfs_trans_log_buf(tp, par_buf,
  2132. XFS_DA_LOGRANGE(par_node, &btree[entno].before,
  2133. sizeof(btree[entno].before)));
  2134. *dead_blknop = last_blkno;
  2135. *dead_bufp = last_buf;
  2136. return 0;
  2137. done:
  2138. if (par_buf)
  2139. xfs_trans_brelse(tp, par_buf);
  2140. if (sib_buf)
  2141. xfs_trans_brelse(tp, sib_buf);
  2142. xfs_trans_brelse(tp, last_buf);
  2143. return error;
  2144. }
  2145. /*
  2146. * Remove a btree block from a directory or attribute.
  2147. */
  2148. int
  2149. xfs_da_shrink_inode(
  2150. xfs_da_args_t *args,
  2151. xfs_dablk_t dead_blkno,
  2152. struct xfs_buf *dead_buf)
  2153. {
  2154. xfs_inode_t *dp;
  2155. int done, error, w, count;
  2156. xfs_trans_t *tp;
  2157. trace_xfs_da_shrink_inode(args);
  2158. dp = args->dp;
  2159. w = args->whichfork;
  2160. tp = args->trans;
  2161. count = args->geo->fsbcount;
  2162. for (;;) {
  2163. /*
  2164. * Remove extents. If we get ENOSPC for a dir we have to move
  2165. * the last block to the place we want to kill.
  2166. */
  2167. error = xfs_bunmapi(tp, dp, dead_blkno, count,
  2168. xfs_bmapi_aflag(w), 0, args->firstblock,
  2169. args->flist, &done);
  2170. if (error == -ENOSPC) {
  2171. if (w != XFS_DATA_FORK)
  2172. break;
  2173. error = xfs_da3_swap_lastblock(args, &dead_blkno,
  2174. &dead_buf);
  2175. if (error)
  2176. break;
  2177. } else {
  2178. break;
  2179. }
  2180. }
  2181. xfs_trans_binval(tp, dead_buf);
  2182. return error;
  2183. }
  2184. /*
  2185. * See if the mapping(s) for this btree block are valid, i.e.
  2186. * don't contain holes, are logically contiguous, and cover the whole range.
  2187. */
  2188. STATIC int
  2189. xfs_da_map_covers_blocks(
  2190. int nmap,
  2191. xfs_bmbt_irec_t *mapp,
  2192. xfs_dablk_t bno,
  2193. int count)
  2194. {
  2195. int i;
  2196. xfs_fileoff_t off;
  2197. for (i = 0, off = bno; i < nmap; i++) {
  2198. if (mapp[i].br_startblock == HOLESTARTBLOCK ||
  2199. mapp[i].br_startblock == DELAYSTARTBLOCK) {
  2200. return 0;
  2201. }
  2202. if (off != mapp[i].br_startoff) {
  2203. return 0;
  2204. }
  2205. off += mapp[i].br_blockcount;
  2206. }
  2207. return off == bno + count;
  2208. }
  2209. /*
  2210. * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
  2211. *
  2212. * For the single map case, it is assumed that the caller has provided a pointer
  2213. * to a valid xfs_buf_map. For the multiple map case, this function will
  2214. * allocate the xfs_buf_map to hold all the maps and replace the caller's single
  2215. * map pointer with the allocated map.
  2216. */
  2217. static int
  2218. xfs_buf_map_from_irec(
  2219. struct xfs_mount *mp,
  2220. struct xfs_buf_map **mapp,
  2221. int *nmaps,
  2222. struct xfs_bmbt_irec *irecs,
  2223. int nirecs)
  2224. {
  2225. struct xfs_buf_map *map;
  2226. int i;
  2227. ASSERT(*nmaps == 1);
  2228. ASSERT(nirecs >= 1);
  2229. if (nirecs > 1) {
  2230. map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
  2231. KM_SLEEP | KM_NOFS);
  2232. if (!map)
  2233. return -ENOMEM;
  2234. *mapp = map;
  2235. }
  2236. *nmaps = nirecs;
  2237. map = *mapp;
  2238. for (i = 0; i < *nmaps; i++) {
  2239. ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
  2240. irecs[i].br_startblock != HOLESTARTBLOCK);
  2241. map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
  2242. map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
  2243. }
  2244. return 0;
  2245. }
  2246. /*
  2247. * Map the block we are given ready for reading. There are three possible return
  2248. * values:
  2249. * -1 - will be returned if we land in a hole and mappedbno == -2 so the
  2250. * caller knows not to execute a subsequent read.
  2251. * 0 - if we mapped the block successfully
  2252. * >0 - positive error number if there was an error.
  2253. */
  2254. static int
  2255. xfs_dabuf_map(
  2256. struct xfs_inode *dp,
  2257. xfs_dablk_t bno,
  2258. xfs_daddr_t mappedbno,
  2259. int whichfork,
  2260. struct xfs_buf_map **map,
  2261. int *nmaps)
  2262. {
  2263. struct xfs_mount *mp = dp->i_mount;
  2264. int nfsb;
  2265. int error = 0;
  2266. struct xfs_bmbt_irec irec;
  2267. struct xfs_bmbt_irec *irecs = &irec;
  2268. int nirecs;
  2269. ASSERT(map && *map);
  2270. ASSERT(*nmaps == 1);
  2271. if (whichfork == XFS_DATA_FORK)
  2272. nfsb = mp->m_dir_geo->fsbcount;
  2273. else
  2274. nfsb = mp->m_attr_geo->fsbcount;
  2275. /*
  2276. * Caller doesn't have a mapping. -2 means don't complain
  2277. * if we land in a hole.
  2278. */
  2279. if (mappedbno == -1 || mappedbno == -2) {
  2280. /*
  2281. * Optimize the one-block case.
  2282. */
  2283. if (nfsb != 1)
  2284. irecs = kmem_zalloc(sizeof(irec) * nfsb,
  2285. KM_SLEEP | KM_NOFS);
  2286. nirecs = nfsb;
  2287. error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
  2288. &nirecs, xfs_bmapi_aflag(whichfork));
  2289. if (error)
  2290. goto out;
  2291. } else {
  2292. irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
  2293. irecs->br_startoff = (xfs_fileoff_t)bno;
  2294. irecs->br_blockcount = nfsb;
  2295. irecs->br_state = 0;
  2296. nirecs = 1;
  2297. }
  2298. if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
  2299. error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
  2300. if (unlikely(error == -EFSCORRUPTED)) {
  2301. if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
  2302. int i;
  2303. xfs_alert(mp, "%s: bno %lld dir: inode %lld",
  2304. __func__, (long long)bno,
  2305. (long long)dp->i_ino);
  2306. for (i = 0; i < *nmaps; i++) {
  2307. xfs_alert(mp,
  2308. "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
  2309. i,
  2310. (long long)irecs[i].br_startoff,
  2311. (long long)irecs[i].br_startblock,
  2312. (long long)irecs[i].br_blockcount,
  2313. irecs[i].br_state);
  2314. }
  2315. }
  2316. XFS_ERROR_REPORT("xfs_da_do_buf(1)",
  2317. XFS_ERRLEVEL_LOW, mp);
  2318. }
  2319. goto out;
  2320. }
  2321. error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
  2322. out:
  2323. if (irecs != &irec)
  2324. kmem_free(irecs);
  2325. return error;
  2326. }
  2327. /*
  2328. * Get a buffer for the dir/attr block.
  2329. */
  2330. int
  2331. xfs_da_get_buf(
  2332. struct xfs_trans *trans,
  2333. struct xfs_inode *dp,
  2334. xfs_dablk_t bno,
  2335. xfs_daddr_t mappedbno,
  2336. struct xfs_buf **bpp,
  2337. int whichfork)
  2338. {
  2339. struct xfs_buf *bp;
  2340. struct xfs_buf_map map;
  2341. struct xfs_buf_map *mapp;
  2342. int nmap;
  2343. int error;
  2344. *bpp = NULL;
  2345. mapp = &map;
  2346. nmap = 1;
  2347. error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
  2348. &mapp, &nmap);
  2349. if (error) {
  2350. /* mapping a hole is not an error, but we don't continue */
  2351. if (error == -1)
  2352. error = 0;
  2353. goto out_free;
  2354. }
  2355. bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
  2356. mapp, nmap, 0);
  2357. error = bp ? bp->b_error : -EIO;
  2358. if (error) {
  2359. if (bp)
  2360. xfs_trans_brelse(trans, bp);
  2361. goto out_free;
  2362. }
  2363. *bpp = bp;
  2364. out_free:
  2365. if (mapp != &map)
  2366. kmem_free(mapp);
  2367. return error;
  2368. }
  2369. /*
  2370. * Get a buffer for the dir/attr block, fill in the contents.
  2371. */
  2372. int
  2373. xfs_da_read_buf(
  2374. struct xfs_trans *trans,
  2375. struct xfs_inode *dp,
  2376. xfs_dablk_t bno,
  2377. xfs_daddr_t mappedbno,
  2378. struct xfs_buf **bpp,
  2379. int whichfork,
  2380. const struct xfs_buf_ops *ops)
  2381. {
  2382. struct xfs_buf *bp;
  2383. struct xfs_buf_map map;
  2384. struct xfs_buf_map *mapp;
  2385. int nmap;
  2386. int error;
  2387. *bpp = NULL;
  2388. mapp = &map;
  2389. nmap = 1;
  2390. error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
  2391. &mapp, &nmap);
  2392. if (error) {
  2393. /* mapping a hole is not an error, but we don't continue */
  2394. if (error == -1)
  2395. error = 0;
  2396. goto out_free;
  2397. }
  2398. error = xfs_trans_read_buf_map(dp->i_mount, trans,
  2399. dp->i_mount->m_ddev_targp,
  2400. mapp, nmap, 0, &bp, ops);
  2401. if (error)
  2402. goto out_free;
  2403. if (whichfork == XFS_ATTR_FORK)
  2404. xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
  2405. else
  2406. xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
  2407. *bpp = bp;
  2408. out_free:
  2409. if (mapp != &map)
  2410. kmem_free(mapp);
  2411. return error;
  2412. }
  2413. /*
  2414. * Readahead the dir/attr block.
  2415. */
  2416. xfs_daddr_t
  2417. xfs_da_reada_buf(
  2418. struct xfs_inode *dp,
  2419. xfs_dablk_t bno,
  2420. xfs_daddr_t mappedbno,
  2421. int whichfork,
  2422. const struct xfs_buf_ops *ops)
  2423. {
  2424. struct xfs_buf_map map;
  2425. struct xfs_buf_map *mapp;
  2426. int nmap;
  2427. int error;
  2428. mapp = &map;
  2429. nmap = 1;
  2430. error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
  2431. &mapp, &nmap);
  2432. if (error) {
  2433. /* mapping a hole is not an error, but we don't continue */
  2434. if (error == -1)
  2435. error = 0;
  2436. goto out_free;
  2437. }
  2438. mappedbno = mapp[0].bm_bn;
  2439. xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
  2440. out_free:
  2441. if (mapp != &map)
  2442. kmem_free(mapp);
  2443. if (error)
  2444. return -1;
  2445. return mappedbno;
  2446. }