xattr.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070
  1. /*
  2. * linux/fs/ext4/xattr.c
  3. *
  4. * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  5. *
  6. * Fix by Harrison Xing <harrison@mountainviewdata.com>.
  7. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
  8. * Extended attributes for symlinks and special files added per
  9. * suggestion of Luka Renko <luka.renko@hermes.si>.
  10. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
  11. * Red Hat Inc.
  12. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
  13. * and Andreas Gruenbacher <agruen@suse.de>.
  14. */
  15. /*
  16. * Extended attributes are stored directly in inodes (on file systems with
  17. * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
  18. * field contains the block number if an inode uses an additional block. All
  19. * attributes must fit in the inode and one additional block. Blocks that
  20. * contain the identical set of attributes may be shared among several inodes.
  21. * Identical blocks are detected by keeping a cache of blocks that have
  22. * recently been accessed.
  23. *
  24. * The attributes in inodes and on blocks have a different header; the entries
  25. * are stored in the same format:
  26. *
  27. * +------------------+
  28. * | header |
  29. * | entry 1 | |
  30. * | entry 2 | | growing downwards
  31. * | entry 3 | v
  32. * | four null bytes |
  33. * | . . . |
  34. * | value 1 | ^
  35. * | value 3 | | growing upwards
  36. * | value 2 | |
  37. * +------------------+
  38. *
  39. * The header is followed by multiple entry descriptors. In disk blocks, the
  40. * entry descriptors are kept sorted. In inodes, they are unsorted. The
  41. * attribute values are aligned to the end of the block in no specific order.
  42. *
  43. * Locking strategy
  44. * ----------------
  45. * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
  46. * EA blocks are only changed if they are exclusive to an inode, so
  47. * holding xattr_sem also means that nothing but the EA block's reference
  48. * count can change. Multiple writers to the same block are synchronized
  49. * by the buffer lock.
  50. */
  51. #include <linux/init.h>
  52. #include <linux/fs.h>
  53. #include <linux/slab.h>
  54. #include <linux/mbcache.h>
  55. #include <linux/quotaops.h>
  56. #include "ext4_jbd2.h"
  57. #include "ext4.h"
  58. #include "xattr.h"
  59. #include "acl.h"
  60. #ifdef EXT4_XATTR_DEBUG
  61. # define ea_idebug(inode, fmt, ...) \
  62. printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
  63. inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
  64. # define ea_bdebug(bh, fmt, ...) \
  65. printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
  66. bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
  67. #else
  68. # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  69. # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  70. #endif
  71. static void ext4_xattr_block_cache_insert(struct mb_cache *,
  72. struct buffer_head *);
  73. static struct buffer_head *
  74. ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
  75. struct mb_cache_entry **);
  76. static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
  77. size_t value_count);
  78. static void ext4_xattr_rehash(struct ext4_xattr_header *);
  79. static const struct xattr_handler * const ext4_xattr_handler_map[] = {
  80. [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
  81. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  82. [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
  83. [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
  84. #endif
  85. [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
  86. #ifdef CONFIG_EXT4_FS_SECURITY
  87. [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
  88. #endif
  89. };
  90. const struct xattr_handler *ext4_xattr_handlers[] = {
  91. &ext4_xattr_user_handler,
  92. &ext4_xattr_trusted_handler,
  93. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  94. &posix_acl_access_xattr_handler,
  95. &posix_acl_default_xattr_handler,
  96. #endif
  97. #ifdef CONFIG_EXT4_FS_SECURITY
  98. &ext4_xattr_security_handler,
  99. #endif
  100. NULL
  101. };
  102. #define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \
  103. inode->i_sb->s_fs_info)->s_ea_block_cache)
  104. #define EA_INODE_CACHE(inode) (((struct ext4_sb_info *) \
  105. inode->i_sb->s_fs_info)->s_ea_inode_cache)
  106. static int
  107. ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
  108. struct inode *inode);
  109. #ifdef CONFIG_LOCKDEP
  110. void ext4_xattr_inode_set_class(struct inode *ea_inode)
  111. {
  112. lockdep_set_subclass(&ea_inode->i_rwsem, 1);
  113. }
  114. #endif
  115. static __le32 ext4_xattr_block_csum(struct inode *inode,
  116. sector_t block_nr,
  117. struct ext4_xattr_header *hdr)
  118. {
  119. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  120. __u32 csum;
  121. __le64 dsk_block_nr = cpu_to_le64(block_nr);
  122. __u32 dummy_csum = 0;
  123. int offset = offsetof(struct ext4_xattr_header, h_checksum);
  124. csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
  125. sizeof(dsk_block_nr));
  126. csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
  127. csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
  128. offset += sizeof(dummy_csum);
  129. csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
  130. EXT4_BLOCK_SIZE(inode->i_sb) - offset);
  131. return cpu_to_le32(csum);
  132. }
  133. static int ext4_xattr_block_csum_verify(struct inode *inode,
  134. struct buffer_head *bh)
  135. {
  136. struct ext4_xattr_header *hdr = BHDR(bh);
  137. int ret = 1;
  138. if (ext4_has_metadata_csum(inode->i_sb)) {
  139. lock_buffer(bh);
  140. ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
  141. bh->b_blocknr, hdr));
  142. unlock_buffer(bh);
  143. }
  144. return ret;
  145. }
  146. static void ext4_xattr_block_csum_set(struct inode *inode,
  147. struct buffer_head *bh)
  148. {
  149. if (ext4_has_metadata_csum(inode->i_sb))
  150. BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
  151. bh->b_blocknr, BHDR(bh));
  152. }
  153. static inline const struct xattr_handler *
  154. ext4_xattr_handler(int name_index)
  155. {
  156. const struct xattr_handler *handler = NULL;
  157. if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
  158. handler = ext4_xattr_handler_map[name_index];
  159. return handler;
  160. }
  161. static int
  162. ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
  163. void *value_start)
  164. {
  165. struct ext4_xattr_entry *e = entry;
  166. /* Find the end of the names list */
  167. while (!IS_LAST_ENTRY(e)) {
  168. struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
  169. if ((void *)next >= end)
  170. return -EFSCORRUPTED;
  171. e = next;
  172. }
  173. /* Check the values */
  174. while (!IS_LAST_ENTRY(entry)) {
  175. if (entry->e_value_size != 0 &&
  176. entry->e_value_inum == 0) {
  177. u16 offs = le16_to_cpu(entry->e_value_offs);
  178. u32 size = le32_to_cpu(entry->e_value_size);
  179. void *value;
  180. /*
  181. * The value cannot overlap the names, and the value
  182. * with padding cannot extend beyond 'end'. Check both
  183. * the padded and unpadded sizes, since the size may
  184. * overflow to 0 when adding padding.
  185. */
  186. if (offs > end - value_start)
  187. return -EFSCORRUPTED;
  188. value = value_start + offs;
  189. if (value < (void *)e + sizeof(u32) ||
  190. size > end - value ||
  191. EXT4_XATTR_SIZE(size) > end - value)
  192. return -EFSCORRUPTED;
  193. }
  194. entry = EXT4_XATTR_NEXT(entry);
  195. }
  196. return 0;
  197. }
  198. static inline int
  199. ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
  200. {
  201. int error;
  202. if (buffer_verified(bh))
  203. return 0;
  204. if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
  205. BHDR(bh)->h_blocks != cpu_to_le32(1))
  206. return -EFSCORRUPTED;
  207. if (!ext4_xattr_block_csum_verify(inode, bh))
  208. return -EFSBADCRC;
  209. error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
  210. bh->b_data);
  211. if (!error)
  212. set_buffer_verified(bh);
  213. return error;
  214. }
  215. static int
  216. __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
  217. void *end, const char *function, unsigned int line)
  218. {
  219. int error = -EFSCORRUPTED;
  220. if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
  221. (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
  222. goto errout;
  223. error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
  224. errout:
  225. if (error)
  226. __ext4_error_inode(inode, function, line, 0,
  227. "corrupted in-inode xattr");
  228. return error;
  229. }
  230. #define xattr_check_inode(inode, header, end) \
  231. __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
  232. static int
  233. ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
  234. const char *name, int sorted)
  235. {
  236. struct ext4_xattr_entry *entry;
  237. size_t name_len;
  238. int cmp = 1;
  239. if (name == NULL)
  240. return -EINVAL;
  241. name_len = strlen(name);
  242. entry = *pentry;
  243. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  244. cmp = name_index - entry->e_name_index;
  245. if (!cmp)
  246. cmp = name_len - entry->e_name_len;
  247. if (!cmp)
  248. cmp = memcmp(name, entry->e_name, name_len);
  249. if (cmp <= 0 && (sorted || cmp == 0))
  250. break;
  251. }
  252. *pentry = entry;
  253. return cmp ? -ENODATA : 0;
  254. }
  255. static u32
  256. ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
  257. {
  258. return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
  259. }
  260. static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
  261. {
  262. return ((u64)ea_inode->i_ctime.tv_sec << 32) |
  263. ((u32)ea_inode->i_version);
  264. }
  265. static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
  266. {
  267. ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
  268. ea_inode->i_version = (u32)ref_count;
  269. }
  270. static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
  271. {
  272. return (u32)ea_inode->i_atime.tv_sec;
  273. }
  274. static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
  275. {
  276. ea_inode->i_atime.tv_sec = hash;
  277. }
  278. /*
  279. * Read the EA value from an inode.
  280. */
  281. static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
  282. {
  283. unsigned long block = 0;
  284. struct buffer_head *bh;
  285. int blocksize = ea_inode->i_sb->s_blocksize;
  286. size_t csize, copied = 0;
  287. void *copy_pos = buf;
  288. while (copied < size) {
  289. csize = (size - copied) > blocksize ? blocksize : size - copied;
  290. bh = ext4_bread(NULL, ea_inode, block, 0);
  291. if (IS_ERR(bh))
  292. return PTR_ERR(bh);
  293. if (!bh)
  294. return -EFSCORRUPTED;
  295. memcpy(copy_pos, bh->b_data, csize);
  296. brelse(bh);
  297. copy_pos += csize;
  298. block += 1;
  299. copied += csize;
  300. }
  301. return 0;
  302. }
  303. static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
  304. struct inode **ea_inode)
  305. {
  306. struct inode *inode;
  307. int err;
  308. inode = ext4_iget(parent->i_sb, ea_ino);
  309. if (IS_ERR(inode)) {
  310. err = PTR_ERR(inode);
  311. ext4_error(parent->i_sb,
  312. "error while reading EA inode %lu err=%d", ea_ino,
  313. err);
  314. return err;
  315. }
  316. if (is_bad_inode(inode)) {
  317. ext4_error(parent->i_sb,
  318. "error while reading EA inode %lu is_bad_inode",
  319. ea_ino);
  320. err = -EIO;
  321. goto error;
  322. }
  323. if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
  324. ext4_error(parent->i_sb,
  325. "EA inode %lu does not have EXT4_EA_INODE_FL flag",
  326. ea_ino);
  327. err = -EINVAL;
  328. goto error;
  329. }
  330. *ea_inode = inode;
  331. return 0;
  332. error:
  333. iput(inode);
  334. return err;
  335. }
  336. static int
  337. ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
  338. struct ext4_xattr_entry *entry, void *buffer,
  339. size_t size)
  340. {
  341. u32 hash;
  342. /* Verify stored hash matches calculated hash. */
  343. hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size);
  344. if (hash != ext4_xattr_inode_get_hash(ea_inode))
  345. return -EFSCORRUPTED;
  346. if (entry) {
  347. __le32 e_hash, tmp_data;
  348. /* Verify entry hash. */
  349. tmp_data = cpu_to_le32(hash);
  350. e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
  351. &tmp_data, 1);
  352. if (e_hash != entry->e_hash)
  353. return -EFSCORRUPTED;
  354. }
  355. return 0;
  356. }
  357. #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
  358. /*
  359. * Read xattr value from the EA inode.
  360. */
  361. static int
  362. ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry,
  363. void *buffer, size_t size)
  364. {
  365. struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
  366. struct inode *ea_inode;
  367. int err;
  368. err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum),
  369. &ea_inode);
  370. if (err) {
  371. ea_inode = NULL;
  372. goto out;
  373. }
  374. if (i_size_read(ea_inode) != size) {
  375. ext4_warning_inode(ea_inode,
  376. "ea_inode file size=%llu entry size=%zu",
  377. i_size_read(ea_inode), size);
  378. err = -EFSCORRUPTED;
  379. goto out;
  380. }
  381. err = ext4_xattr_inode_read(ea_inode, buffer, size);
  382. if (err)
  383. goto out;
  384. err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer, size);
  385. /*
  386. * Compatibility check for old Lustre ea_inode implementation. Old
  387. * version does not have hash validation, but it has a backpointer
  388. * from ea_inode to the parent inode.
  389. */
  390. if (err == -EFSCORRUPTED) {
  391. if (EXT4_XATTR_INODE_GET_PARENT(ea_inode) != inode->i_ino ||
  392. ea_inode->i_generation != inode->i_generation) {
  393. ext4_warning_inode(ea_inode,
  394. "EA inode hash validation failed");
  395. goto out;
  396. }
  397. /* Do not add ea_inode to the cache. */
  398. ea_inode_cache = NULL;
  399. } else if (err)
  400. goto out;
  401. if (ea_inode_cache)
  402. mb_cache_entry_create(ea_inode_cache, GFP_NOFS,
  403. ext4_xattr_inode_get_hash(ea_inode),
  404. ea_inode->i_ino, true /* reusable */);
  405. out:
  406. iput(ea_inode);
  407. return err;
  408. }
  409. static int
  410. ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
  411. void *buffer, size_t buffer_size)
  412. {
  413. struct buffer_head *bh = NULL;
  414. struct ext4_xattr_entry *entry;
  415. size_t size;
  416. int error;
  417. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  418. ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
  419. name_index, name, buffer, (long)buffer_size);
  420. error = -ENODATA;
  421. if (!EXT4_I(inode)->i_file_acl)
  422. goto cleanup;
  423. ea_idebug(inode, "reading block %llu",
  424. (unsigned long long)EXT4_I(inode)->i_file_acl);
  425. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  426. if (!bh)
  427. goto cleanup;
  428. ea_bdebug(bh, "b_count=%d, refcount=%d",
  429. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  430. if (ext4_xattr_check_block(inode, bh)) {
  431. EXT4_ERROR_INODE(inode, "bad block %llu",
  432. EXT4_I(inode)->i_file_acl);
  433. error = -EFSCORRUPTED;
  434. goto cleanup;
  435. }
  436. ext4_xattr_block_cache_insert(ea_block_cache, bh);
  437. entry = BFIRST(bh);
  438. error = ext4_xattr_find_entry(&entry, name_index, name, 1);
  439. if (error)
  440. goto cleanup;
  441. size = le32_to_cpu(entry->e_value_size);
  442. if (buffer) {
  443. error = -ERANGE;
  444. if (size > buffer_size)
  445. goto cleanup;
  446. if (entry->e_value_inum) {
  447. error = ext4_xattr_inode_get(inode, entry, buffer,
  448. size);
  449. if (error)
  450. goto cleanup;
  451. } else {
  452. memcpy(buffer, bh->b_data +
  453. le16_to_cpu(entry->e_value_offs), size);
  454. }
  455. }
  456. error = size;
  457. cleanup:
  458. brelse(bh);
  459. return error;
  460. }
  461. int
  462. ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
  463. void *buffer, size_t buffer_size)
  464. {
  465. struct ext4_xattr_ibody_header *header;
  466. struct ext4_xattr_entry *entry;
  467. struct ext4_inode *raw_inode;
  468. struct ext4_iloc iloc;
  469. size_t size;
  470. void *end;
  471. int error;
  472. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  473. return -ENODATA;
  474. error = ext4_get_inode_loc(inode, &iloc);
  475. if (error)
  476. return error;
  477. raw_inode = ext4_raw_inode(&iloc);
  478. header = IHDR(inode, raw_inode);
  479. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  480. error = xattr_check_inode(inode, header, end);
  481. if (error)
  482. goto cleanup;
  483. entry = IFIRST(header);
  484. error = ext4_xattr_find_entry(&entry, name_index, name, 0);
  485. if (error)
  486. goto cleanup;
  487. size = le32_to_cpu(entry->e_value_size);
  488. if (buffer) {
  489. error = -ERANGE;
  490. if (size > buffer_size)
  491. goto cleanup;
  492. if (entry->e_value_inum) {
  493. error = ext4_xattr_inode_get(inode, entry, buffer,
  494. size);
  495. if (error)
  496. goto cleanup;
  497. } else {
  498. memcpy(buffer, (void *)IFIRST(header) +
  499. le16_to_cpu(entry->e_value_offs), size);
  500. }
  501. }
  502. error = size;
  503. cleanup:
  504. brelse(iloc.bh);
  505. return error;
  506. }
  507. /*
  508. * ext4_xattr_get()
  509. *
  510. * Copy an extended attribute into the buffer
  511. * provided, or compute the buffer size required.
  512. * Buffer is NULL to compute the size of the buffer required.
  513. *
  514. * Returns a negative error number on failure, or the number of bytes
  515. * used / required on success.
  516. */
  517. int
  518. ext4_xattr_get(struct inode *inode, int name_index, const char *name,
  519. void *buffer, size_t buffer_size)
  520. {
  521. int error;
  522. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  523. return -EIO;
  524. if (strlen(name) > 255)
  525. return -ERANGE;
  526. down_read(&EXT4_I(inode)->xattr_sem);
  527. error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
  528. buffer_size);
  529. if (error == -ENODATA)
  530. error = ext4_xattr_block_get(inode, name_index, name, buffer,
  531. buffer_size);
  532. up_read(&EXT4_I(inode)->xattr_sem);
  533. return error;
  534. }
  535. static int
  536. ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
  537. char *buffer, size_t buffer_size)
  538. {
  539. size_t rest = buffer_size;
  540. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  541. const struct xattr_handler *handler =
  542. ext4_xattr_handler(entry->e_name_index);
  543. if (handler && (!handler->list || handler->list(dentry))) {
  544. const char *prefix = handler->prefix ?: handler->name;
  545. size_t prefix_len = strlen(prefix);
  546. size_t size = prefix_len + entry->e_name_len + 1;
  547. if (buffer) {
  548. if (size > rest)
  549. return -ERANGE;
  550. memcpy(buffer, prefix, prefix_len);
  551. buffer += prefix_len;
  552. memcpy(buffer, entry->e_name, entry->e_name_len);
  553. buffer += entry->e_name_len;
  554. *buffer++ = 0;
  555. }
  556. rest -= size;
  557. }
  558. }
  559. return buffer_size - rest; /* total size */
  560. }
  561. static int
  562. ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  563. {
  564. struct inode *inode = d_inode(dentry);
  565. struct buffer_head *bh = NULL;
  566. int error;
  567. ea_idebug(inode, "buffer=%p, buffer_size=%ld",
  568. buffer, (long)buffer_size);
  569. error = 0;
  570. if (!EXT4_I(inode)->i_file_acl)
  571. goto cleanup;
  572. ea_idebug(inode, "reading block %llu",
  573. (unsigned long long)EXT4_I(inode)->i_file_acl);
  574. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  575. error = -EIO;
  576. if (!bh)
  577. goto cleanup;
  578. ea_bdebug(bh, "b_count=%d, refcount=%d",
  579. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  580. if (ext4_xattr_check_block(inode, bh)) {
  581. EXT4_ERROR_INODE(inode, "bad block %llu",
  582. EXT4_I(inode)->i_file_acl);
  583. error = -EFSCORRUPTED;
  584. goto cleanup;
  585. }
  586. ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
  587. error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
  588. cleanup:
  589. brelse(bh);
  590. return error;
  591. }
  592. static int
  593. ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  594. {
  595. struct inode *inode = d_inode(dentry);
  596. struct ext4_xattr_ibody_header *header;
  597. struct ext4_inode *raw_inode;
  598. struct ext4_iloc iloc;
  599. void *end;
  600. int error;
  601. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  602. return 0;
  603. error = ext4_get_inode_loc(inode, &iloc);
  604. if (error)
  605. return error;
  606. raw_inode = ext4_raw_inode(&iloc);
  607. header = IHDR(inode, raw_inode);
  608. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  609. error = xattr_check_inode(inode, header, end);
  610. if (error)
  611. goto cleanup;
  612. error = ext4_xattr_list_entries(dentry, IFIRST(header),
  613. buffer, buffer_size);
  614. cleanup:
  615. brelse(iloc.bh);
  616. return error;
  617. }
  618. /*
  619. * Inode operation listxattr()
  620. *
  621. * d_inode(dentry)->i_rwsem: don't care
  622. *
  623. * Copy a list of attribute names into the buffer
  624. * provided, or compute the buffer size required.
  625. * Buffer is NULL to compute the size of the buffer required.
  626. *
  627. * Returns a negative error number on failure, or the number of bytes
  628. * used / required on success.
  629. */
  630. ssize_t
  631. ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
  632. {
  633. int ret, ret2;
  634. down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
  635. ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
  636. if (ret < 0)
  637. goto errout;
  638. if (buffer) {
  639. buffer += ret;
  640. buffer_size -= ret;
  641. }
  642. ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
  643. if (ret < 0)
  644. goto errout;
  645. ret += ret2;
  646. errout:
  647. up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
  648. return ret;
  649. }
  650. /*
  651. * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
  652. * not set, set it.
  653. */
  654. static void ext4_xattr_update_super_block(handle_t *handle,
  655. struct super_block *sb)
  656. {
  657. if (ext4_has_feature_xattr(sb))
  658. return;
  659. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  660. if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
  661. ext4_set_feature_xattr(sb);
  662. ext4_handle_dirty_super(handle, sb);
  663. }
  664. }
  665. int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
  666. {
  667. struct ext4_iloc iloc = { .bh = NULL };
  668. struct buffer_head *bh = NULL;
  669. struct ext4_inode *raw_inode;
  670. struct ext4_xattr_ibody_header *header;
  671. struct ext4_xattr_entry *entry;
  672. qsize_t ea_inode_refs = 0;
  673. void *end;
  674. int ret;
  675. lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
  676. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  677. ret = ext4_get_inode_loc(inode, &iloc);
  678. if (ret)
  679. goto out;
  680. raw_inode = ext4_raw_inode(&iloc);
  681. header = IHDR(inode, raw_inode);
  682. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  683. ret = xattr_check_inode(inode, header, end);
  684. if (ret)
  685. goto out;
  686. for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
  687. entry = EXT4_XATTR_NEXT(entry))
  688. if (entry->e_value_inum)
  689. ea_inode_refs++;
  690. }
  691. if (EXT4_I(inode)->i_file_acl) {
  692. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  693. if (!bh) {
  694. ret = -EIO;
  695. goto out;
  696. }
  697. if (ext4_xattr_check_block(inode, bh)) {
  698. ret = -EFSCORRUPTED;
  699. goto out;
  700. }
  701. for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
  702. entry = EXT4_XATTR_NEXT(entry))
  703. if (entry->e_value_inum)
  704. ea_inode_refs++;
  705. }
  706. *usage = ea_inode_refs + 1;
  707. ret = 0;
  708. out:
  709. brelse(iloc.bh);
  710. brelse(bh);
  711. return ret;
  712. }
  713. static inline size_t round_up_cluster(struct inode *inode, size_t length)
  714. {
  715. struct super_block *sb = inode->i_sb;
  716. size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits +
  717. inode->i_blkbits);
  718. size_t mask = ~(cluster_size - 1);
  719. return (length + cluster_size - 1) & mask;
  720. }
  721. static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len)
  722. {
  723. int err;
  724. err = dquot_alloc_inode(inode);
  725. if (err)
  726. return err;
  727. err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len));
  728. if (err)
  729. dquot_free_inode(inode);
  730. return err;
  731. }
  732. static void ext4_xattr_inode_free_quota(struct inode *inode, size_t len)
  733. {
  734. dquot_free_space_nodirty(inode, round_up_cluster(inode, len));
  735. dquot_free_inode(inode);
  736. }
  737. int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
  738. struct buffer_head *block_bh, size_t value_len,
  739. bool is_create)
  740. {
  741. int credits;
  742. int blocks;
  743. /*
  744. * 1) Owner inode update
  745. * 2) Ref count update on old xattr block
  746. * 3) new xattr block
  747. * 4) block bitmap update for new xattr block
  748. * 5) group descriptor for new xattr block
  749. * 6) block bitmap update for old xattr block
  750. * 7) group descriptor for old block
  751. *
  752. * 6 & 7 can happen if we have two racing threads T_a and T_b
  753. * which are each trying to set an xattr on inodes I_a and I_b
  754. * which were both initially sharing an xattr block.
  755. */
  756. credits = 7;
  757. /* Quota updates. */
  758. credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb);
  759. /*
  760. * In case of inline data, we may push out the data to a block,
  761. * so we need to reserve credits for this eventuality
  762. */
  763. if (inode && ext4_has_inline_data(inode))
  764. credits += ext4_writepage_trans_blocks(inode) + 1;
  765. /* We are done if ea_inode feature is not enabled. */
  766. if (!ext4_has_feature_ea_inode(sb))
  767. return credits;
  768. /* New ea_inode, inode map, block bitmap, group descriptor. */
  769. credits += 4;
  770. /* Data blocks. */
  771. blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
  772. /* Indirection block or one level of extent tree. */
  773. blocks += 1;
  774. /* Block bitmap and group descriptor updates for each block. */
  775. credits += blocks * 2;
  776. /* Blocks themselves. */
  777. credits += blocks;
  778. if (!is_create) {
  779. /* Dereference ea_inode holding old xattr value.
  780. * Old ea_inode, inode map, block bitmap, group descriptor.
  781. */
  782. credits += 4;
  783. /* Data blocks for old ea_inode. */
  784. blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits;
  785. /* Indirection block or one level of extent tree for old
  786. * ea_inode.
  787. */
  788. blocks += 1;
  789. /* Block bitmap and group descriptor updates for each block. */
  790. credits += blocks * 2;
  791. }
  792. /* We may need to clone the existing xattr block in which case we need
  793. * to increment ref counts for existing ea_inodes referenced by it.
  794. */
  795. if (block_bh) {
  796. struct ext4_xattr_entry *entry = BFIRST(block_bh);
  797. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry))
  798. if (entry->e_value_inum)
  799. /* Ref count update on ea_inode. */
  800. credits += 1;
  801. }
  802. return credits;
  803. }
  804. static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
  805. int credits, struct buffer_head *bh,
  806. bool dirty, bool block_csum)
  807. {
  808. int error;
  809. if (!ext4_handle_valid(handle))
  810. return 0;
  811. if (handle->h_buffer_credits >= credits)
  812. return 0;
  813. error = ext4_journal_extend(handle, credits - handle->h_buffer_credits);
  814. if (!error)
  815. return 0;
  816. if (error < 0) {
  817. ext4_warning(inode->i_sb, "Extend journal (error %d)", error);
  818. return error;
  819. }
  820. if (bh && dirty) {
  821. if (block_csum)
  822. ext4_xattr_block_csum_set(inode, bh);
  823. error = ext4_handle_dirty_metadata(handle, NULL, bh);
  824. if (error) {
  825. ext4_warning(inode->i_sb, "Handle metadata (error %d)",
  826. error);
  827. return error;
  828. }
  829. }
  830. error = ext4_journal_restart(handle, credits);
  831. if (error) {
  832. ext4_warning(inode->i_sb, "Restart journal (error %d)", error);
  833. return error;
  834. }
  835. if (bh) {
  836. error = ext4_journal_get_write_access(handle, bh);
  837. if (error) {
  838. ext4_warning(inode->i_sb,
  839. "Get write access failed (error %d)",
  840. error);
  841. return error;
  842. }
  843. }
  844. return 0;
  845. }
  846. static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
  847. int ref_change)
  848. {
  849. struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
  850. struct ext4_iloc iloc;
  851. s64 ref_count;
  852. u32 hash;
  853. int ret;
  854. inode_lock(ea_inode);
  855. ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
  856. if (ret) {
  857. iloc.bh = NULL;
  858. goto out;
  859. }
  860. ref_count = ext4_xattr_inode_get_ref(ea_inode);
  861. ref_count += ref_change;
  862. ext4_xattr_inode_set_ref(ea_inode, ref_count);
  863. if (ref_change > 0) {
  864. WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
  865. ea_inode->i_ino, ref_count);
  866. if (ref_count == 1) {
  867. WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
  868. ea_inode->i_ino, ea_inode->i_nlink);
  869. set_nlink(ea_inode, 1);
  870. ext4_orphan_del(handle, ea_inode);
  871. if (ea_inode_cache) {
  872. hash = ext4_xattr_inode_get_hash(ea_inode);
  873. mb_cache_entry_create(ea_inode_cache,
  874. GFP_NOFS, hash,
  875. ea_inode->i_ino,
  876. true /* reusable */);
  877. }
  878. }
  879. } else {
  880. WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
  881. ea_inode->i_ino, ref_count);
  882. if (ref_count == 0) {
  883. WARN_ONCE(ea_inode->i_nlink != 1,
  884. "EA inode %lu i_nlink=%u",
  885. ea_inode->i_ino, ea_inode->i_nlink);
  886. clear_nlink(ea_inode);
  887. ext4_orphan_add(handle, ea_inode);
  888. if (ea_inode_cache) {
  889. hash = ext4_xattr_inode_get_hash(ea_inode);
  890. mb_cache_entry_delete(ea_inode_cache, hash,
  891. ea_inode->i_ino);
  892. }
  893. }
  894. }
  895. ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
  896. iloc.bh = NULL;
  897. if (ret)
  898. ext4_warning_inode(ea_inode,
  899. "ext4_mark_iloc_dirty() failed ret=%d", ret);
  900. out:
  901. brelse(iloc.bh);
  902. inode_unlock(ea_inode);
  903. return ret;
  904. }
  905. static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode)
  906. {
  907. return ext4_xattr_inode_update_ref(handle, ea_inode, 1);
  908. }
  909. static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode)
  910. {
  911. return ext4_xattr_inode_update_ref(handle, ea_inode, -1);
  912. }
  913. static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent,
  914. struct ext4_xattr_entry *first)
  915. {
  916. struct inode *ea_inode;
  917. struct ext4_xattr_entry *entry;
  918. struct ext4_xattr_entry *failed_entry;
  919. unsigned int ea_ino;
  920. int err, saved_err;
  921. for (entry = first; !IS_LAST_ENTRY(entry);
  922. entry = EXT4_XATTR_NEXT(entry)) {
  923. if (!entry->e_value_inum)
  924. continue;
  925. ea_ino = le32_to_cpu(entry->e_value_inum);
  926. err = ext4_xattr_inode_iget(parent, ea_ino, &ea_inode);
  927. if (err)
  928. goto cleanup;
  929. err = ext4_xattr_inode_inc_ref(handle, ea_inode);
  930. if (err) {
  931. ext4_warning_inode(ea_inode, "inc ref error %d", err);
  932. iput(ea_inode);
  933. goto cleanup;
  934. }
  935. iput(ea_inode);
  936. }
  937. return 0;
  938. cleanup:
  939. saved_err = err;
  940. failed_entry = entry;
  941. for (entry = first; entry != failed_entry;
  942. entry = EXT4_XATTR_NEXT(entry)) {
  943. if (!entry->e_value_inum)
  944. continue;
  945. ea_ino = le32_to_cpu(entry->e_value_inum);
  946. err = ext4_xattr_inode_iget(parent, ea_ino, &ea_inode);
  947. if (err) {
  948. ext4_warning(parent->i_sb,
  949. "cleanup ea_ino %u iget error %d", ea_ino,
  950. err);
  951. continue;
  952. }
  953. err = ext4_xattr_inode_dec_ref(handle, ea_inode);
  954. if (err)
  955. ext4_warning_inode(ea_inode, "cleanup dec ref error %d",
  956. err);
  957. iput(ea_inode);
  958. }
  959. return saved_err;
  960. }
  961. static void
  962. ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
  963. struct buffer_head *bh,
  964. struct ext4_xattr_entry *first, bool block_csum,
  965. struct ext4_xattr_inode_array **ea_inode_array,
  966. int extra_credits, bool skip_quota)
  967. {
  968. struct inode *ea_inode;
  969. struct ext4_xattr_entry *entry;
  970. bool dirty = false;
  971. unsigned int ea_ino;
  972. int err;
  973. int credits;
  974. /* One credit for dec ref on ea_inode, one for orphan list addition, */
  975. credits = 2 + extra_credits;
  976. for (entry = first; !IS_LAST_ENTRY(entry);
  977. entry = EXT4_XATTR_NEXT(entry)) {
  978. if (!entry->e_value_inum)
  979. continue;
  980. ea_ino = le32_to_cpu(entry->e_value_inum);
  981. err = ext4_xattr_inode_iget(parent, ea_ino, &ea_inode);
  982. if (err)
  983. continue;
  984. err = ext4_expand_inode_array(ea_inode_array, ea_inode);
  985. if (err) {
  986. ext4_warning_inode(ea_inode,
  987. "Expand inode array err=%d", err);
  988. iput(ea_inode);
  989. continue;
  990. }
  991. err = ext4_xattr_ensure_credits(handle, parent, credits, bh,
  992. dirty, block_csum);
  993. if (err) {
  994. ext4_warning_inode(ea_inode, "Ensure credits err=%d",
  995. err);
  996. continue;
  997. }
  998. err = ext4_xattr_inode_dec_ref(handle, ea_inode);
  999. if (err) {
  1000. ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d",
  1001. err);
  1002. continue;
  1003. }
  1004. if (!skip_quota)
  1005. ext4_xattr_inode_free_quota(parent,
  1006. le32_to_cpu(entry->e_value_size));
  1007. /*
  1008. * Forget about ea_inode within the same transaction that
  1009. * decrements the ref count. This avoids duplicate decrements in
  1010. * case the rest of the work spills over to subsequent
  1011. * transactions.
  1012. */
  1013. entry->e_value_inum = 0;
  1014. entry->e_value_size = 0;
  1015. dirty = true;
  1016. }
  1017. if (dirty) {
  1018. /*
  1019. * Note that we are deliberately skipping csum calculation for
  1020. * the final update because we do not expect any journal
  1021. * restarts until xattr block is freed.
  1022. */
  1023. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  1024. if (err)
  1025. ext4_warning_inode(parent,
  1026. "handle dirty metadata err=%d", err);
  1027. }
  1028. }
  1029. /*
  1030. * Release the xattr block BH: If the reference count is > 1, decrement it;
  1031. * otherwise free the block.
  1032. */
  1033. static void
  1034. ext4_xattr_release_block(handle_t *handle, struct inode *inode,
  1035. struct buffer_head *bh,
  1036. struct ext4_xattr_inode_array **ea_inode_array,
  1037. int extra_credits)
  1038. {
  1039. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  1040. u32 hash, ref;
  1041. int error = 0;
  1042. BUFFER_TRACE(bh, "get_write_access");
  1043. error = ext4_journal_get_write_access(handle, bh);
  1044. if (error)
  1045. goto out;
  1046. lock_buffer(bh);
  1047. hash = le32_to_cpu(BHDR(bh)->h_hash);
  1048. ref = le32_to_cpu(BHDR(bh)->h_refcount);
  1049. if (ref == 1) {
  1050. ea_bdebug(bh, "refcount now=0; freeing");
  1051. /*
  1052. * This must happen under buffer lock for
  1053. * ext4_xattr_block_set() to reliably detect freed block
  1054. */
  1055. if (ea_block_cache)
  1056. mb_cache_entry_delete(ea_block_cache, hash,
  1057. bh->b_blocknr);
  1058. get_bh(bh);
  1059. unlock_buffer(bh);
  1060. if (ext4_has_feature_ea_inode(inode->i_sb))
  1061. ext4_xattr_inode_dec_ref_all(handle, inode, bh,
  1062. BFIRST(bh),
  1063. true /* block_csum */,
  1064. ea_inode_array,
  1065. extra_credits,
  1066. true /* skip_quota */);
  1067. ext4_free_blocks(handle, inode, bh, 0, 1,
  1068. EXT4_FREE_BLOCKS_METADATA |
  1069. EXT4_FREE_BLOCKS_FORGET);
  1070. } else {
  1071. ref--;
  1072. BHDR(bh)->h_refcount = cpu_to_le32(ref);
  1073. if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
  1074. struct mb_cache_entry *ce;
  1075. if (ea_block_cache) {
  1076. ce = mb_cache_entry_get(ea_block_cache, hash,
  1077. bh->b_blocknr);
  1078. if (ce) {
  1079. ce->e_reusable = 1;
  1080. mb_cache_entry_put(ea_block_cache, ce);
  1081. }
  1082. }
  1083. }
  1084. ext4_xattr_block_csum_set(inode, bh);
  1085. /*
  1086. * Beware of this ugliness: Releasing of xattr block references
  1087. * from different inodes can race and so we have to protect
  1088. * from a race where someone else frees the block (and releases
  1089. * its journal_head) before we are done dirtying the buffer. In
  1090. * nojournal mode this race is harmless and we actually cannot
  1091. * call ext4_handle_dirty_metadata() with locked buffer as
  1092. * that function can call sync_dirty_buffer() so for that case
  1093. * we handle the dirtying after unlocking the buffer.
  1094. */
  1095. if (ext4_handle_valid(handle))
  1096. error = ext4_handle_dirty_metadata(handle, inode, bh);
  1097. unlock_buffer(bh);
  1098. if (!ext4_handle_valid(handle))
  1099. error = ext4_handle_dirty_metadata(handle, inode, bh);
  1100. if (IS_SYNC(inode))
  1101. ext4_handle_sync(handle);
  1102. dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
  1103. ea_bdebug(bh, "refcount now=%d; releasing",
  1104. le32_to_cpu(BHDR(bh)->h_refcount));
  1105. }
  1106. out:
  1107. ext4_std_error(inode->i_sb, error);
  1108. return;
  1109. }
  1110. /*
  1111. * Find the available free space for EAs. This also returns the total number of
  1112. * bytes used by EA entries.
  1113. */
  1114. static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
  1115. size_t *min_offs, void *base, int *total)
  1116. {
  1117. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1118. if (!last->e_value_inum && last->e_value_size) {
  1119. size_t offs = le16_to_cpu(last->e_value_offs);
  1120. if (offs < *min_offs)
  1121. *min_offs = offs;
  1122. }
  1123. if (total)
  1124. *total += EXT4_XATTR_LEN(last->e_name_len);
  1125. }
  1126. return (*min_offs - ((void *)last - base) - sizeof(__u32));
  1127. }
  1128. /*
  1129. * Write the value of the EA in an inode.
  1130. */
  1131. static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
  1132. const void *buf, int bufsize)
  1133. {
  1134. struct buffer_head *bh = NULL;
  1135. unsigned long block = 0;
  1136. int blocksize = ea_inode->i_sb->s_blocksize;
  1137. int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
  1138. int csize, wsize = 0;
  1139. int ret = 0;
  1140. int retries = 0;
  1141. retry:
  1142. while (ret >= 0 && ret < max_blocks) {
  1143. struct ext4_map_blocks map;
  1144. map.m_lblk = block += ret;
  1145. map.m_len = max_blocks -= ret;
  1146. ret = ext4_map_blocks(handle, ea_inode, &map,
  1147. EXT4_GET_BLOCKS_CREATE);
  1148. if (ret <= 0) {
  1149. ext4_mark_inode_dirty(handle, ea_inode);
  1150. if (ret == -ENOSPC &&
  1151. ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
  1152. ret = 0;
  1153. goto retry;
  1154. }
  1155. break;
  1156. }
  1157. }
  1158. if (ret < 0)
  1159. return ret;
  1160. block = 0;
  1161. while (wsize < bufsize) {
  1162. if (bh != NULL)
  1163. brelse(bh);
  1164. csize = (bufsize - wsize) > blocksize ? blocksize :
  1165. bufsize - wsize;
  1166. bh = ext4_getblk(handle, ea_inode, block, 0);
  1167. if (IS_ERR(bh))
  1168. return PTR_ERR(bh);
  1169. ret = ext4_journal_get_write_access(handle, bh);
  1170. if (ret)
  1171. goto out;
  1172. memcpy(bh->b_data, buf, csize);
  1173. set_buffer_uptodate(bh);
  1174. ext4_handle_dirty_metadata(handle, ea_inode, bh);
  1175. buf += csize;
  1176. wsize += csize;
  1177. block += 1;
  1178. }
  1179. inode_lock(ea_inode);
  1180. i_size_write(ea_inode, wsize);
  1181. ext4_update_i_disksize(ea_inode, wsize);
  1182. inode_unlock(ea_inode);
  1183. ext4_mark_inode_dirty(handle, ea_inode);
  1184. out:
  1185. brelse(bh);
  1186. return ret;
  1187. }
  1188. /*
  1189. * Create an inode to store the value of a large EA.
  1190. */
  1191. static struct inode *ext4_xattr_inode_create(handle_t *handle,
  1192. struct inode *inode, u32 hash)
  1193. {
  1194. struct inode *ea_inode = NULL;
  1195. uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
  1196. int err;
  1197. /*
  1198. * Let the next inode be the goal, so we try and allocate the EA inode
  1199. * in the same group, or nearby one.
  1200. */
  1201. ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
  1202. S_IFREG | 0600, NULL, inode->i_ino + 1, owner,
  1203. EXT4_EA_INODE_FL);
  1204. if (!IS_ERR(ea_inode)) {
  1205. ea_inode->i_op = &ext4_file_inode_operations;
  1206. ea_inode->i_fop = &ext4_file_operations;
  1207. ext4_set_aops(ea_inode);
  1208. ext4_xattr_inode_set_class(ea_inode);
  1209. unlock_new_inode(ea_inode);
  1210. ext4_xattr_inode_set_ref(ea_inode, 1);
  1211. ext4_xattr_inode_set_hash(ea_inode, hash);
  1212. err = ext4_mark_inode_dirty(handle, ea_inode);
  1213. if (!err)
  1214. err = ext4_inode_attach_jinode(ea_inode);
  1215. if (err) {
  1216. iput(ea_inode);
  1217. return ERR_PTR(err);
  1218. }
  1219. /*
  1220. * Xattr inodes are shared therefore quota charging is performed
  1221. * at a higher level.
  1222. */
  1223. dquot_free_inode(ea_inode);
  1224. dquot_drop(ea_inode);
  1225. inode_lock(ea_inode);
  1226. ea_inode->i_flags |= S_NOQUOTA;
  1227. inode_unlock(ea_inode);
  1228. }
  1229. return ea_inode;
  1230. }
  1231. static struct inode *
  1232. ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
  1233. size_t value_len, u32 hash)
  1234. {
  1235. struct inode *ea_inode;
  1236. struct mb_cache_entry *ce;
  1237. struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
  1238. void *ea_data;
  1239. if (!ea_inode_cache)
  1240. return NULL;
  1241. ce = mb_cache_entry_find_first(ea_inode_cache, hash);
  1242. if (!ce)
  1243. return NULL;
  1244. ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
  1245. if (!ea_data) {
  1246. mb_cache_entry_put(ea_inode_cache, ce);
  1247. return NULL;
  1248. }
  1249. while (ce) {
  1250. ea_inode = ext4_iget(inode->i_sb, ce->e_value);
  1251. if (!IS_ERR(ea_inode) &&
  1252. !is_bad_inode(ea_inode) &&
  1253. (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
  1254. i_size_read(ea_inode) == value_len &&
  1255. !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
  1256. !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
  1257. value_len) &&
  1258. !memcmp(value, ea_data, value_len)) {
  1259. mb_cache_entry_touch(ea_inode_cache, ce);
  1260. mb_cache_entry_put(ea_inode_cache, ce);
  1261. kvfree(ea_data);
  1262. return ea_inode;
  1263. }
  1264. if (!IS_ERR(ea_inode))
  1265. iput(ea_inode);
  1266. ce = mb_cache_entry_find_next(ea_inode_cache, ce);
  1267. }
  1268. kvfree(ea_data);
  1269. return NULL;
  1270. }
  1271. /*
  1272. * Add value of the EA in an inode.
  1273. */
  1274. static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
  1275. const void *value, size_t value_len,
  1276. struct inode **ret_inode)
  1277. {
  1278. struct inode *ea_inode;
  1279. u32 hash;
  1280. int err;
  1281. hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
  1282. ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
  1283. if (ea_inode) {
  1284. err = ext4_xattr_inode_inc_ref(handle, ea_inode);
  1285. if (err) {
  1286. iput(ea_inode);
  1287. return err;
  1288. }
  1289. *ret_inode = ea_inode;
  1290. return 0;
  1291. }
  1292. /* Create an inode for the EA value */
  1293. ea_inode = ext4_xattr_inode_create(handle, inode, hash);
  1294. if (IS_ERR(ea_inode))
  1295. return PTR_ERR(ea_inode);
  1296. err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
  1297. if (err) {
  1298. ext4_xattr_inode_dec_ref(handle, ea_inode);
  1299. iput(ea_inode);
  1300. return err;
  1301. }
  1302. if (EA_INODE_CACHE(inode))
  1303. mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
  1304. ea_inode->i_ino, true /* reusable */);
  1305. *ret_inode = ea_inode;
  1306. return 0;
  1307. }
  1308. /*
  1309. * Reserve min(block_size/8, 1024) bytes for xattr entries/names if ea_inode
  1310. * feature is enabled.
  1311. */
  1312. #define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U)
  1313. static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
  1314. struct ext4_xattr_search *s,
  1315. handle_t *handle, struct inode *inode,
  1316. bool is_block)
  1317. {
  1318. struct ext4_xattr_entry *last;
  1319. struct ext4_xattr_entry *here = s->here;
  1320. size_t min_offs = s->end - s->base, name_len = strlen(i->name);
  1321. int in_inode = i->in_inode;
  1322. struct inode *old_ea_inode = NULL;
  1323. struct inode *new_ea_inode = NULL;
  1324. size_t old_size, new_size;
  1325. int ret;
  1326. /* Space used by old and new values. */
  1327. old_size = (!s->not_found && !here->e_value_inum) ?
  1328. EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0;
  1329. new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0;
  1330. /*
  1331. * Optimization for the simple case when old and new values have the
  1332. * same padded sizes. Not applicable if external inodes are involved.
  1333. */
  1334. if (new_size && new_size == old_size) {
  1335. size_t offs = le16_to_cpu(here->e_value_offs);
  1336. void *val = s->base + offs;
  1337. here->e_value_size = cpu_to_le32(i->value_len);
  1338. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  1339. memset(val, 0, new_size);
  1340. } else {
  1341. memcpy(val, i->value, i->value_len);
  1342. /* Clear padding bytes. */
  1343. memset(val + i->value_len, 0, new_size - i->value_len);
  1344. }
  1345. return 0;
  1346. }
  1347. /* Compute min_offs and last. */
  1348. last = s->first;
  1349. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1350. if (!last->e_value_inum && last->e_value_size) {
  1351. size_t offs = le16_to_cpu(last->e_value_offs);
  1352. if (offs < min_offs)
  1353. min_offs = offs;
  1354. }
  1355. }
  1356. /* Check whether we have enough space. */
  1357. if (i->value) {
  1358. size_t free;
  1359. free = min_offs - ((void *)last - s->base) - sizeof(__u32);
  1360. if (!s->not_found)
  1361. free += EXT4_XATTR_LEN(name_len) + old_size;
  1362. if (free < EXT4_XATTR_LEN(name_len) + new_size) {
  1363. ret = -ENOSPC;
  1364. goto out;
  1365. }
  1366. /*
  1367. * If storing the value in an external inode is an option,
  1368. * reserve space for xattr entries/names in the external
  1369. * attribute block so that a long value does not occupy the
  1370. * whole space and prevent futher entries being added.
  1371. */
  1372. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  1373. new_size && is_block &&
  1374. (min_offs + old_size - new_size) <
  1375. EXT4_XATTR_BLOCK_RESERVE(inode)) {
  1376. ret = -ENOSPC;
  1377. goto out;
  1378. }
  1379. }
  1380. /*
  1381. * Getting access to old and new ea inodes is subject to failures.
  1382. * Finish that work before doing any modifications to the xattr data.
  1383. */
  1384. if (!s->not_found && here->e_value_inum) {
  1385. ret = ext4_xattr_inode_iget(inode,
  1386. le32_to_cpu(here->e_value_inum),
  1387. &old_ea_inode);
  1388. if (ret) {
  1389. old_ea_inode = NULL;
  1390. goto out;
  1391. }
  1392. }
  1393. if (i->value && in_inode) {
  1394. WARN_ON_ONCE(!i->value_len);
  1395. ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
  1396. if (ret)
  1397. goto out;
  1398. ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
  1399. i->value_len,
  1400. &new_ea_inode);
  1401. if (ret) {
  1402. new_ea_inode = NULL;
  1403. ext4_xattr_inode_free_quota(inode, i->value_len);
  1404. goto out;
  1405. }
  1406. }
  1407. if (old_ea_inode) {
  1408. /* We are ready to release ref count on the old_ea_inode. */
  1409. ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
  1410. if (ret) {
  1411. /* Release newly required ref count on new_ea_inode. */
  1412. if (new_ea_inode) {
  1413. int err;
  1414. err = ext4_xattr_inode_dec_ref(handle,
  1415. new_ea_inode);
  1416. if (err)
  1417. ext4_warning_inode(new_ea_inode,
  1418. "dec ref new_ea_inode err=%d",
  1419. err);
  1420. ext4_xattr_inode_free_quota(inode,
  1421. i->value_len);
  1422. }
  1423. goto out;
  1424. }
  1425. ext4_xattr_inode_free_quota(inode,
  1426. le32_to_cpu(here->e_value_size));
  1427. }
  1428. /* No failures allowed past this point. */
  1429. if (!s->not_found && here->e_value_offs) {
  1430. /* Remove the old value. */
  1431. void *first_val = s->base + min_offs;
  1432. size_t offs = le16_to_cpu(here->e_value_offs);
  1433. void *val = s->base + offs;
  1434. memmove(first_val + old_size, first_val, val - first_val);
  1435. memset(first_val, 0, old_size);
  1436. min_offs += old_size;
  1437. /* Adjust all value offsets. */
  1438. last = s->first;
  1439. while (!IS_LAST_ENTRY(last)) {
  1440. size_t o = le16_to_cpu(last->e_value_offs);
  1441. if (!last->e_value_inum &&
  1442. last->e_value_size && o < offs)
  1443. last->e_value_offs = cpu_to_le16(o + old_size);
  1444. last = EXT4_XATTR_NEXT(last);
  1445. }
  1446. }
  1447. if (!i->value) {
  1448. /* Remove old name. */
  1449. size_t size = EXT4_XATTR_LEN(name_len);
  1450. last = ENTRY((void *)last - size);
  1451. memmove(here, (void *)here + size,
  1452. (void *)last - (void *)here + sizeof(__u32));
  1453. memset(last, 0, size);
  1454. } else if (s->not_found) {
  1455. /* Insert new name. */
  1456. size_t size = EXT4_XATTR_LEN(name_len);
  1457. size_t rest = (void *)last - (void *)here + sizeof(__u32);
  1458. memmove((void *)here + size, here, rest);
  1459. memset(here, 0, size);
  1460. here->e_name_index = i->name_index;
  1461. here->e_name_len = name_len;
  1462. memcpy(here->e_name, i->name, name_len);
  1463. } else {
  1464. /* This is an update, reset value info. */
  1465. here->e_value_inum = 0;
  1466. here->e_value_offs = 0;
  1467. here->e_value_size = 0;
  1468. }
  1469. if (i->value) {
  1470. /* Insert new value. */
  1471. if (in_inode) {
  1472. here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino);
  1473. } else if (i->value_len) {
  1474. void *val = s->base + min_offs - new_size;
  1475. here->e_value_offs = cpu_to_le16(min_offs - new_size);
  1476. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  1477. memset(val, 0, new_size);
  1478. } else {
  1479. memcpy(val, i->value, i->value_len);
  1480. /* Clear padding bytes. */
  1481. memset(val + i->value_len, 0,
  1482. new_size - i->value_len);
  1483. }
  1484. }
  1485. here->e_value_size = cpu_to_le32(i->value_len);
  1486. }
  1487. if (i->value) {
  1488. __le32 hash = 0;
  1489. /* Entry hash calculation. */
  1490. if (in_inode) {
  1491. __le32 crc32c_hash;
  1492. /*
  1493. * Feed crc32c hash instead of the raw value for entry
  1494. * hash calculation. This is to avoid walking
  1495. * potentially long value buffer again.
  1496. */
  1497. crc32c_hash = cpu_to_le32(
  1498. ext4_xattr_inode_get_hash(new_ea_inode));
  1499. hash = ext4_xattr_hash_entry(here->e_name,
  1500. here->e_name_len,
  1501. &crc32c_hash, 1);
  1502. } else if (is_block) {
  1503. __le32 *value = s->base + min_offs - new_size;
  1504. hash = ext4_xattr_hash_entry(here->e_name,
  1505. here->e_name_len, value,
  1506. new_size >> 2);
  1507. }
  1508. here->e_hash = hash;
  1509. }
  1510. if (is_block)
  1511. ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
  1512. ret = 0;
  1513. out:
  1514. iput(old_ea_inode);
  1515. iput(new_ea_inode);
  1516. return ret;
  1517. }
  1518. struct ext4_xattr_block_find {
  1519. struct ext4_xattr_search s;
  1520. struct buffer_head *bh;
  1521. };
  1522. static int
  1523. ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
  1524. struct ext4_xattr_block_find *bs)
  1525. {
  1526. struct super_block *sb = inode->i_sb;
  1527. int error;
  1528. ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
  1529. i->name_index, i->name, i->value, (long)i->value_len);
  1530. if (EXT4_I(inode)->i_file_acl) {
  1531. /* The inode already has an extended attribute block. */
  1532. bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
  1533. error = -EIO;
  1534. if (!bs->bh)
  1535. goto cleanup;
  1536. ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
  1537. atomic_read(&(bs->bh->b_count)),
  1538. le32_to_cpu(BHDR(bs->bh)->h_refcount));
  1539. if (ext4_xattr_check_block(inode, bs->bh)) {
  1540. EXT4_ERROR_INODE(inode, "bad block %llu",
  1541. EXT4_I(inode)->i_file_acl);
  1542. error = -EFSCORRUPTED;
  1543. goto cleanup;
  1544. }
  1545. /* Find the named attribute. */
  1546. bs->s.base = BHDR(bs->bh);
  1547. bs->s.first = BFIRST(bs->bh);
  1548. bs->s.end = bs->bh->b_data + bs->bh->b_size;
  1549. bs->s.here = bs->s.first;
  1550. error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
  1551. i->name, 1);
  1552. if (error && error != -ENODATA)
  1553. goto cleanup;
  1554. bs->s.not_found = error;
  1555. }
  1556. error = 0;
  1557. cleanup:
  1558. return error;
  1559. }
  1560. static int
  1561. ext4_xattr_block_set(handle_t *handle, struct inode *inode,
  1562. struct ext4_xattr_info *i,
  1563. struct ext4_xattr_block_find *bs)
  1564. {
  1565. struct super_block *sb = inode->i_sb;
  1566. struct buffer_head *new_bh = NULL;
  1567. struct ext4_xattr_search s_copy = bs->s;
  1568. struct ext4_xattr_search *s = &s_copy;
  1569. struct mb_cache_entry *ce = NULL;
  1570. int error = 0;
  1571. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  1572. struct inode *ea_inode = NULL;
  1573. size_t old_ea_inode_size = 0;
  1574. #define header(x) ((struct ext4_xattr_header *)(x))
  1575. if (s->base) {
  1576. BUFFER_TRACE(bs->bh, "get_write_access");
  1577. error = ext4_journal_get_write_access(handle, bs->bh);
  1578. if (error)
  1579. goto cleanup;
  1580. lock_buffer(bs->bh);
  1581. if (header(s->base)->h_refcount == cpu_to_le32(1)) {
  1582. __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
  1583. /*
  1584. * This must happen under buffer lock for
  1585. * ext4_xattr_block_set() to reliably detect modified
  1586. * block
  1587. */
  1588. if (ea_block_cache)
  1589. mb_cache_entry_delete(ea_block_cache, hash,
  1590. bs->bh->b_blocknr);
  1591. ea_bdebug(bs->bh, "modifying in-place");
  1592. error = ext4_xattr_set_entry(i, s, handle, inode,
  1593. true /* is_block */);
  1594. if (!error)
  1595. ext4_xattr_block_cache_insert(ea_block_cache,
  1596. bs->bh);
  1597. ext4_xattr_block_csum_set(inode, bs->bh);
  1598. unlock_buffer(bs->bh);
  1599. if (error == -EFSCORRUPTED)
  1600. goto bad_block;
  1601. if (!error)
  1602. error = ext4_handle_dirty_metadata(handle,
  1603. inode,
  1604. bs->bh);
  1605. if (error)
  1606. goto cleanup;
  1607. goto inserted;
  1608. } else {
  1609. int offset = (char *)s->here - bs->bh->b_data;
  1610. unlock_buffer(bs->bh);
  1611. ea_bdebug(bs->bh, "cloning");
  1612. s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
  1613. error = -ENOMEM;
  1614. if (s->base == NULL)
  1615. goto cleanup;
  1616. memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
  1617. s->first = ENTRY(header(s->base)+1);
  1618. header(s->base)->h_refcount = cpu_to_le32(1);
  1619. s->here = ENTRY(s->base + offset);
  1620. s->end = s->base + bs->bh->b_size;
  1621. /*
  1622. * If existing entry points to an xattr inode, we need
  1623. * to prevent ext4_xattr_set_entry() from decrementing
  1624. * ref count on it because the reference belongs to the
  1625. * original block. In this case, make the entry look
  1626. * like it has an empty value.
  1627. */
  1628. if (!s->not_found && s->here->e_value_inum) {
  1629. /*
  1630. * Defer quota free call for previous inode
  1631. * until success is guaranteed.
  1632. */
  1633. old_ea_inode_size = le32_to_cpu(
  1634. s->here->e_value_size);
  1635. s->here->e_value_inum = 0;
  1636. s->here->e_value_size = 0;
  1637. }
  1638. }
  1639. } else {
  1640. /* Allocate a buffer where we construct the new block. */
  1641. s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
  1642. /* assert(header == s->base) */
  1643. error = -ENOMEM;
  1644. if (s->base == NULL)
  1645. goto cleanup;
  1646. header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1647. header(s->base)->h_blocks = cpu_to_le32(1);
  1648. header(s->base)->h_refcount = cpu_to_le32(1);
  1649. s->first = ENTRY(header(s->base)+1);
  1650. s->here = ENTRY(header(s->base)+1);
  1651. s->end = s->base + sb->s_blocksize;
  1652. }
  1653. error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
  1654. if (error == -EFSCORRUPTED)
  1655. goto bad_block;
  1656. if (error)
  1657. goto cleanup;
  1658. if (i->value && s->here->e_value_inum) {
  1659. unsigned int ea_ino;
  1660. /*
  1661. * A ref count on ea_inode has been taken as part of the call to
  1662. * ext4_xattr_set_entry() above. We would like to drop this
  1663. * extra ref but we have to wait until the xattr block is
  1664. * initialized and has its own ref count on the ea_inode.
  1665. */
  1666. ea_ino = le32_to_cpu(s->here->e_value_inum);
  1667. error = ext4_xattr_inode_iget(inode, ea_ino, &ea_inode);
  1668. if (error) {
  1669. ea_inode = NULL;
  1670. goto cleanup;
  1671. }
  1672. }
  1673. inserted:
  1674. if (!IS_LAST_ENTRY(s->first)) {
  1675. new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
  1676. &ce);
  1677. if (new_bh) {
  1678. /* We found an identical block in the cache. */
  1679. if (new_bh == bs->bh)
  1680. ea_bdebug(new_bh, "keeping");
  1681. else {
  1682. u32 ref;
  1683. WARN_ON_ONCE(dquot_initialize_needed(inode));
  1684. /* The old block is released after updating
  1685. the inode. */
  1686. error = dquot_alloc_block(inode,
  1687. EXT4_C2B(EXT4_SB(sb), 1));
  1688. if (error)
  1689. goto cleanup;
  1690. BUFFER_TRACE(new_bh, "get_write_access");
  1691. error = ext4_journal_get_write_access(handle,
  1692. new_bh);
  1693. if (error)
  1694. goto cleanup_dquot;
  1695. lock_buffer(new_bh);
  1696. /*
  1697. * We have to be careful about races with
  1698. * freeing, rehashing or adding references to
  1699. * xattr block. Once we hold buffer lock xattr
  1700. * block's state is stable so we can check
  1701. * whether the block got freed / rehashed or
  1702. * not. Since we unhash mbcache entry under
  1703. * buffer lock when freeing / rehashing xattr
  1704. * block, checking whether entry is still
  1705. * hashed is reliable. Same rules hold for
  1706. * e_reusable handling.
  1707. */
  1708. if (hlist_bl_unhashed(&ce->e_hash_list) ||
  1709. !ce->e_reusable) {
  1710. /*
  1711. * Undo everything and check mbcache
  1712. * again.
  1713. */
  1714. unlock_buffer(new_bh);
  1715. dquot_free_block(inode,
  1716. EXT4_C2B(EXT4_SB(sb),
  1717. 1));
  1718. brelse(new_bh);
  1719. mb_cache_entry_put(ea_block_cache, ce);
  1720. ce = NULL;
  1721. new_bh = NULL;
  1722. goto inserted;
  1723. }
  1724. ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
  1725. BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
  1726. if (ref >= EXT4_XATTR_REFCOUNT_MAX)
  1727. ce->e_reusable = 0;
  1728. ea_bdebug(new_bh, "reusing; refcount now=%d",
  1729. ref);
  1730. ext4_xattr_block_csum_set(inode, new_bh);
  1731. unlock_buffer(new_bh);
  1732. error = ext4_handle_dirty_metadata(handle,
  1733. inode,
  1734. new_bh);
  1735. if (error)
  1736. goto cleanup_dquot;
  1737. }
  1738. mb_cache_entry_touch(ea_block_cache, ce);
  1739. mb_cache_entry_put(ea_block_cache, ce);
  1740. ce = NULL;
  1741. } else if (bs->bh && s->base == bs->bh->b_data) {
  1742. /* We were modifying this block in-place. */
  1743. ea_bdebug(bs->bh, "keeping this block");
  1744. new_bh = bs->bh;
  1745. get_bh(new_bh);
  1746. } else {
  1747. /* We need to allocate a new block */
  1748. ext4_fsblk_t goal, block;
  1749. WARN_ON_ONCE(dquot_initialize_needed(inode));
  1750. goal = ext4_group_first_block_no(sb,
  1751. EXT4_I(inode)->i_block_group);
  1752. /* non-extent files can't have physical blocks past 2^32 */
  1753. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  1754. goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
  1755. block = ext4_new_meta_blocks(handle, inode, goal, 0,
  1756. NULL, &error);
  1757. if (error)
  1758. goto cleanup;
  1759. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  1760. BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
  1761. ea_idebug(inode, "creating block %llu",
  1762. (unsigned long long)block);
  1763. new_bh = sb_getblk(sb, block);
  1764. if (unlikely(!new_bh)) {
  1765. error = -ENOMEM;
  1766. getblk_failed:
  1767. ext4_free_blocks(handle, inode, NULL, block, 1,
  1768. EXT4_FREE_BLOCKS_METADATA);
  1769. goto cleanup;
  1770. }
  1771. error = ext4_xattr_inode_inc_ref_all(handle, inode,
  1772. ENTRY(header(s->base)+1));
  1773. if (error)
  1774. goto getblk_failed;
  1775. if (ea_inode) {
  1776. /* Drop the extra ref on ea_inode. */
  1777. error = ext4_xattr_inode_dec_ref(handle,
  1778. ea_inode);
  1779. if (error)
  1780. ext4_warning_inode(ea_inode,
  1781. "dec ref error=%d",
  1782. error);
  1783. iput(ea_inode);
  1784. ea_inode = NULL;
  1785. }
  1786. lock_buffer(new_bh);
  1787. error = ext4_journal_get_create_access(handle, new_bh);
  1788. if (error) {
  1789. unlock_buffer(new_bh);
  1790. error = -EIO;
  1791. goto getblk_failed;
  1792. }
  1793. memcpy(new_bh->b_data, s->base, new_bh->b_size);
  1794. ext4_xattr_block_csum_set(inode, new_bh);
  1795. set_buffer_uptodate(new_bh);
  1796. unlock_buffer(new_bh);
  1797. ext4_xattr_block_cache_insert(ea_block_cache, new_bh);
  1798. error = ext4_handle_dirty_metadata(handle, inode,
  1799. new_bh);
  1800. if (error)
  1801. goto cleanup;
  1802. }
  1803. }
  1804. if (old_ea_inode_size)
  1805. ext4_xattr_inode_free_quota(inode, old_ea_inode_size);
  1806. /* Update the inode. */
  1807. EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
  1808. /* Drop the previous xattr block. */
  1809. if (bs->bh && bs->bh != new_bh) {
  1810. struct ext4_xattr_inode_array *ea_inode_array = NULL;
  1811. ext4_xattr_release_block(handle, inode, bs->bh,
  1812. &ea_inode_array,
  1813. 0 /* extra_credits */);
  1814. ext4_xattr_inode_array_free(ea_inode_array);
  1815. }
  1816. error = 0;
  1817. cleanup:
  1818. if (ea_inode) {
  1819. int error2;
  1820. error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
  1821. if (error2)
  1822. ext4_warning_inode(ea_inode, "dec ref error=%d",
  1823. error2);
  1824. /* If there was an error, revert the quota charge. */
  1825. if (error)
  1826. ext4_xattr_inode_free_quota(inode,
  1827. i_size_read(ea_inode));
  1828. iput(ea_inode);
  1829. }
  1830. if (ce)
  1831. mb_cache_entry_put(ea_block_cache, ce);
  1832. brelse(new_bh);
  1833. if (!(bs->bh && s->base == bs->bh->b_data))
  1834. kfree(s->base);
  1835. return error;
  1836. cleanup_dquot:
  1837. dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
  1838. goto cleanup;
  1839. bad_block:
  1840. EXT4_ERROR_INODE(inode, "bad block %llu",
  1841. EXT4_I(inode)->i_file_acl);
  1842. goto cleanup;
  1843. #undef header
  1844. }
  1845. int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
  1846. struct ext4_xattr_ibody_find *is)
  1847. {
  1848. struct ext4_xattr_ibody_header *header;
  1849. struct ext4_inode *raw_inode;
  1850. int error;
  1851. if (EXT4_I(inode)->i_extra_isize == 0)
  1852. return 0;
  1853. raw_inode = ext4_raw_inode(&is->iloc);
  1854. header = IHDR(inode, raw_inode);
  1855. is->s.base = is->s.first = IFIRST(header);
  1856. is->s.here = is->s.first;
  1857. is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  1858. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  1859. error = xattr_check_inode(inode, header, is->s.end);
  1860. if (error)
  1861. return error;
  1862. /* Find the named attribute. */
  1863. error = ext4_xattr_find_entry(&is->s.here, i->name_index,
  1864. i->name, 0);
  1865. if (error && error != -ENODATA)
  1866. return error;
  1867. is->s.not_found = error;
  1868. }
  1869. return 0;
  1870. }
  1871. int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
  1872. struct ext4_xattr_info *i,
  1873. struct ext4_xattr_ibody_find *is)
  1874. {
  1875. struct ext4_xattr_ibody_header *header;
  1876. struct ext4_xattr_search *s = &is->s;
  1877. int error;
  1878. if (EXT4_I(inode)->i_extra_isize == 0)
  1879. return -ENOSPC;
  1880. error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
  1881. if (error) {
  1882. if (error == -ENOSPC &&
  1883. ext4_has_inline_data(inode)) {
  1884. error = ext4_try_to_evict_inline_data(handle, inode,
  1885. EXT4_XATTR_LEN(strlen(i->name) +
  1886. EXT4_XATTR_SIZE(i->value_len)));
  1887. if (error)
  1888. return error;
  1889. error = ext4_xattr_ibody_find(inode, i, is);
  1890. if (error)
  1891. return error;
  1892. error = ext4_xattr_set_entry(i, s, handle, inode,
  1893. false /* is_block */);
  1894. }
  1895. if (error)
  1896. return error;
  1897. }
  1898. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  1899. if (!IS_LAST_ENTRY(s->first)) {
  1900. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1901. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  1902. } else {
  1903. header->h_magic = cpu_to_le32(0);
  1904. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  1905. }
  1906. return 0;
  1907. }
  1908. static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
  1909. struct ext4_xattr_info *i,
  1910. struct ext4_xattr_ibody_find *is)
  1911. {
  1912. struct ext4_xattr_ibody_header *header;
  1913. struct ext4_xattr_search *s = &is->s;
  1914. int error;
  1915. if (EXT4_I(inode)->i_extra_isize == 0)
  1916. return -ENOSPC;
  1917. error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
  1918. if (error)
  1919. return error;
  1920. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  1921. if (!IS_LAST_ENTRY(s->first)) {
  1922. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1923. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  1924. } else {
  1925. header->h_magic = cpu_to_le32(0);
  1926. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  1927. }
  1928. return 0;
  1929. }
  1930. static int ext4_xattr_value_same(struct ext4_xattr_search *s,
  1931. struct ext4_xattr_info *i)
  1932. {
  1933. void *value;
  1934. /* When e_value_inum is set the value is stored externally. */
  1935. if (s->here->e_value_inum)
  1936. return 0;
  1937. if (le32_to_cpu(s->here->e_value_size) != i->value_len)
  1938. return 0;
  1939. value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
  1940. return !memcmp(value, i->value, i->value_len);
  1941. }
  1942. static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
  1943. {
  1944. struct buffer_head *bh;
  1945. int error;
  1946. if (!EXT4_I(inode)->i_file_acl)
  1947. return NULL;
  1948. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  1949. if (!bh)
  1950. return ERR_PTR(-EIO);
  1951. error = ext4_xattr_check_block(inode, bh);
  1952. if (error)
  1953. return ERR_PTR(error);
  1954. return bh;
  1955. }
  1956. /*
  1957. * ext4_xattr_set_handle()
  1958. *
  1959. * Create, replace or remove an extended attribute for this inode. Value
  1960. * is NULL to remove an existing extended attribute, and non-NULL to
  1961. * either replace an existing extended attribute, or create a new extended
  1962. * attribute. The flags XATTR_REPLACE and XATTR_CREATE
  1963. * specify that an extended attribute must exist and must not exist
  1964. * previous to the call, respectively.
  1965. *
  1966. * Returns 0, or a negative error number on failure.
  1967. */
  1968. int
  1969. ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
  1970. const char *name, const void *value, size_t value_len,
  1971. int flags)
  1972. {
  1973. struct ext4_xattr_info i = {
  1974. .name_index = name_index,
  1975. .name = name,
  1976. .value = value,
  1977. .value_len = value_len,
  1978. .in_inode = 0,
  1979. };
  1980. struct ext4_xattr_ibody_find is = {
  1981. .s = { .not_found = -ENODATA, },
  1982. };
  1983. struct ext4_xattr_block_find bs = {
  1984. .s = { .not_found = -ENODATA, },
  1985. };
  1986. int no_expand;
  1987. int error;
  1988. if (!name)
  1989. return -EINVAL;
  1990. if (strlen(name) > 255)
  1991. return -ERANGE;
  1992. ext4_write_lock_xattr(inode, &no_expand);
  1993. /* Check journal credits under write lock. */
  1994. if (ext4_handle_valid(handle)) {
  1995. struct buffer_head *bh;
  1996. int credits;
  1997. bh = ext4_xattr_get_block(inode);
  1998. if (IS_ERR(bh)) {
  1999. error = PTR_ERR(bh);
  2000. goto cleanup;
  2001. }
  2002. credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
  2003. value_len,
  2004. flags & XATTR_CREATE);
  2005. brelse(bh);
  2006. if (!ext4_handle_has_enough_credits(handle, credits)) {
  2007. error = -ENOSPC;
  2008. goto cleanup;
  2009. }
  2010. }
  2011. error = ext4_reserve_inode_write(handle, inode, &is.iloc);
  2012. if (error)
  2013. goto cleanup;
  2014. if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
  2015. struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
  2016. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  2017. ext4_clear_inode_state(inode, EXT4_STATE_NEW);
  2018. }
  2019. error = ext4_xattr_ibody_find(inode, &i, &is);
  2020. if (error)
  2021. goto cleanup;
  2022. if (is.s.not_found)
  2023. error = ext4_xattr_block_find(inode, &i, &bs);
  2024. if (error)
  2025. goto cleanup;
  2026. if (is.s.not_found && bs.s.not_found) {
  2027. error = -ENODATA;
  2028. if (flags & XATTR_REPLACE)
  2029. goto cleanup;
  2030. error = 0;
  2031. if (!value)
  2032. goto cleanup;
  2033. } else {
  2034. error = -EEXIST;
  2035. if (flags & XATTR_CREATE)
  2036. goto cleanup;
  2037. }
  2038. if (!value) {
  2039. if (!is.s.not_found)
  2040. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  2041. else if (!bs.s.not_found)
  2042. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  2043. } else {
  2044. error = 0;
  2045. /* Xattr value did not change? Save us some work and bail out */
  2046. if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
  2047. goto cleanup;
  2048. if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
  2049. goto cleanup;
  2050. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  2051. (EXT4_XATTR_SIZE(i.value_len) >
  2052. EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
  2053. i.in_inode = 1;
  2054. retry_inode:
  2055. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  2056. if (!error && !bs.s.not_found) {
  2057. i.value = NULL;
  2058. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  2059. } else if (error == -ENOSPC) {
  2060. if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
  2061. error = ext4_xattr_block_find(inode, &i, &bs);
  2062. if (error)
  2063. goto cleanup;
  2064. }
  2065. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  2066. if (!error && !is.s.not_found) {
  2067. i.value = NULL;
  2068. error = ext4_xattr_ibody_set(handle, inode, &i,
  2069. &is);
  2070. } else if (error == -ENOSPC) {
  2071. /*
  2072. * Xattr does not fit in the block, store at
  2073. * external inode if possible.
  2074. */
  2075. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  2076. !i.in_inode) {
  2077. i.in_inode = 1;
  2078. goto retry_inode;
  2079. }
  2080. }
  2081. }
  2082. }
  2083. if (!error) {
  2084. ext4_xattr_update_super_block(handle, inode->i_sb);
  2085. inode->i_ctime = current_time(inode);
  2086. if (!value)
  2087. no_expand = 0;
  2088. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  2089. /*
  2090. * The bh is consumed by ext4_mark_iloc_dirty, even with
  2091. * error != 0.
  2092. */
  2093. is.iloc.bh = NULL;
  2094. if (IS_SYNC(inode))
  2095. ext4_handle_sync(handle);
  2096. }
  2097. cleanup:
  2098. brelse(is.iloc.bh);
  2099. brelse(bs.bh);
  2100. ext4_write_unlock_xattr(inode, &no_expand);
  2101. return error;
  2102. }
  2103. int ext4_xattr_set_credits(struct inode *inode, size_t value_len,
  2104. bool is_create, int *credits)
  2105. {
  2106. struct buffer_head *bh;
  2107. int err;
  2108. *credits = 0;
  2109. if (!EXT4_SB(inode->i_sb)->s_journal)
  2110. return 0;
  2111. down_read(&EXT4_I(inode)->xattr_sem);
  2112. bh = ext4_xattr_get_block(inode);
  2113. if (IS_ERR(bh)) {
  2114. err = PTR_ERR(bh);
  2115. } else {
  2116. *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
  2117. value_len, is_create);
  2118. brelse(bh);
  2119. err = 0;
  2120. }
  2121. up_read(&EXT4_I(inode)->xattr_sem);
  2122. return err;
  2123. }
  2124. /*
  2125. * ext4_xattr_set()
  2126. *
  2127. * Like ext4_xattr_set_handle, but start from an inode. This extended
  2128. * attribute modification is a filesystem transaction by itself.
  2129. *
  2130. * Returns 0, or a negative error number on failure.
  2131. */
  2132. int
  2133. ext4_xattr_set(struct inode *inode, int name_index, const char *name,
  2134. const void *value, size_t value_len, int flags)
  2135. {
  2136. handle_t *handle;
  2137. struct super_block *sb = inode->i_sb;
  2138. int error, retries = 0;
  2139. int credits;
  2140. error = dquot_initialize(inode);
  2141. if (error)
  2142. return error;
  2143. retry:
  2144. error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE,
  2145. &credits);
  2146. if (error)
  2147. return error;
  2148. handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
  2149. if (IS_ERR(handle)) {
  2150. error = PTR_ERR(handle);
  2151. } else {
  2152. int error2;
  2153. error = ext4_xattr_set_handle(handle, inode, name_index, name,
  2154. value, value_len, flags);
  2155. error2 = ext4_journal_stop(handle);
  2156. if (error == -ENOSPC &&
  2157. ext4_should_retry_alloc(sb, &retries))
  2158. goto retry;
  2159. if (error == 0)
  2160. error = error2;
  2161. }
  2162. return error;
  2163. }
  2164. /*
  2165. * Shift the EA entries in the inode to create space for the increased
  2166. * i_extra_isize.
  2167. */
  2168. static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
  2169. int value_offs_shift, void *to,
  2170. void *from, size_t n)
  2171. {
  2172. struct ext4_xattr_entry *last = entry;
  2173. int new_offs;
  2174. /* We always shift xattr headers further thus offsets get lower */
  2175. BUG_ON(value_offs_shift > 0);
  2176. /* Adjust the value offsets of the entries */
  2177. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  2178. if (!last->e_value_inum && last->e_value_size) {
  2179. new_offs = le16_to_cpu(last->e_value_offs) +
  2180. value_offs_shift;
  2181. last->e_value_offs = cpu_to_le16(new_offs);
  2182. }
  2183. }
  2184. /* Shift the entries by n bytes */
  2185. memmove(to, from, n);
  2186. }
  2187. /*
  2188. * Move xattr pointed to by 'entry' from inode into external xattr block
  2189. */
  2190. static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
  2191. struct ext4_inode *raw_inode,
  2192. struct ext4_xattr_entry *entry)
  2193. {
  2194. struct ext4_xattr_ibody_find *is = NULL;
  2195. struct ext4_xattr_block_find *bs = NULL;
  2196. char *buffer = NULL, *b_entry_name = NULL;
  2197. size_t value_size = le32_to_cpu(entry->e_value_size);
  2198. struct ext4_xattr_info i = {
  2199. .value = NULL,
  2200. .value_len = 0,
  2201. .name_index = entry->e_name_index,
  2202. .in_inode = !!entry->e_value_inum,
  2203. };
  2204. struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
  2205. int error;
  2206. is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
  2207. bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
  2208. buffer = kmalloc(value_size, GFP_NOFS);
  2209. b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
  2210. if (!is || !bs || !buffer || !b_entry_name) {
  2211. error = -ENOMEM;
  2212. goto out;
  2213. }
  2214. is->s.not_found = -ENODATA;
  2215. bs->s.not_found = -ENODATA;
  2216. is->iloc.bh = NULL;
  2217. bs->bh = NULL;
  2218. /* Save the entry name and the entry value */
  2219. if (entry->e_value_inum) {
  2220. error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
  2221. if (error)
  2222. goto out;
  2223. } else {
  2224. size_t value_offs = le16_to_cpu(entry->e_value_offs);
  2225. memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
  2226. }
  2227. memcpy(b_entry_name, entry->e_name, entry->e_name_len);
  2228. b_entry_name[entry->e_name_len] = '\0';
  2229. i.name = b_entry_name;
  2230. error = ext4_get_inode_loc(inode, &is->iloc);
  2231. if (error)
  2232. goto out;
  2233. error = ext4_xattr_ibody_find(inode, &i, is);
  2234. if (error)
  2235. goto out;
  2236. /* Remove the chosen entry from the inode */
  2237. error = ext4_xattr_ibody_set(handle, inode, &i, is);
  2238. if (error)
  2239. goto out;
  2240. i.value = buffer;
  2241. i.value_len = value_size;
  2242. error = ext4_xattr_block_find(inode, &i, bs);
  2243. if (error)
  2244. goto out;
  2245. /* Add entry which was removed from the inode into the block */
  2246. error = ext4_xattr_block_set(handle, inode, &i, bs);
  2247. if (error)
  2248. goto out;
  2249. error = 0;
  2250. out:
  2251. kfree(b_entry_name);
  2252. kfree(buffer);
  2253. if (is)
  2254. brelse(is->iloc.bh);
  2255. kfree(is);
  2256. kfree(bs);
  2257. return error;
  2258. }
  2259. static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
  2260. struct ext4_inode *raw_inode,
  2261. int isize_diff, size_t ifree,
  2262. size_t bfree, int *total_ino)
  2263. {
  2264. struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
  2265. struct ext4_xattr_entry *small_entry;
  2266. struct ext4_xattr_entry *entry;
  2267. struct ext4_xattr_entry *last;
  2268. unsigned int entry_size; /* EA entry size */
  2269. unsigned int total_size; /* EA entry size + value size */
  2270. unsigned int min_total_size;
  2271. int error;
  2272. while (isize_diff > ifree) {
  2273. entry = NULL;
  2274. small_entry = NULL;
  2275. min_total_size = ~0U;
  2276. last = IFIRST(header);
  2277. /* Find the entry best suited to be pushed into EA block */
  2278. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  2279. total_size = EXT4_XATTR_LEN(last->e_name_len);
  2280. if (!last->e_value_inum)
  2281. total_size += EXT4_XATTR_SIZE(
  2282. le32_to_cpu(last->e_value_size));
  2283. if (total_size <= bfree &&
  2284. total_size < min_total_size) {
  2285. if (total_size + ifree < isize_diff) {
  2286. small_entry = last;
  2287. } else {
  2288. entry = last;
  2289. min_total_size = total_size;
  2290. }
  2291. }
  2292. }
  2293. if (entry == NULL) {
  2294. if (small_entry == NULL)
  2295. return -ENOSPC;
  2296. entry = small_entry;
  2297. }
  2298. entry_size = EXT4_XATTR_LEN(entry->e_name_len);
  2299. total_size = entry_size;
  2300. if (!entry->e_value_inum)
  2301. total_size += EXT4_XATTR_SIZE(
  2302. le32_to_cpu(entry->e_value_size));
  2303. error = ext4_xattr_move_to_block(handle, inode, raw_inode,
  2304. entry);
  2305. if (error)
  2306. return error;
  2307. *total_ino -= entry_size;
  2308. ifree += total_size;
  2309. bfree -= total_size;
  2310. }
  2311. return 0;
  2312. }
  2313. /*
  2314. * Expand an inode by new_extra_isize bytes when EAs are present.
  2315. * Returns 0 on success or negative error number on failure.
  2316. */
  2317. int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
  2318. struct ext4_inode *raw_inode, handle_t *handle)
  2319. {
  2320. struct ext4_xattr_ibody_header *header;
  2321. struct buffer_head *bh = NULL;
  2322. size_t min_offs;
  2323. size_t ifree, bfree;
  2324. int total_ino;
  2325. void *base, *end;
  2326. int error = 0, tried_min_extra_isize = 0;
  2327. int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
  2328. int isize_diff; /* How much do we need to grow i_extra_isize */
  2329. int no_expand;
  2330. if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
  2331. return 0;
  2332. retry:
  2333. isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
  2334. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
  2335. goto out;
  2336. header = IHDR(inode, raw_inode);
  2337. /*
  2338. * Check if enough free space is available in the inode to shift the
  2339. * entries ahead by new_extra_isize.
  2340. */
  2341. base = IFIRST(header);
  2342. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  2343. min_offs = end - base;
  2344. total_ino = sizeof(struct ext4_xattr_ibody_header);
  2345. error = xattr_check_inode(inode, header, end);
  2346. if (error)
  2347. goto cleanup;
  2348. ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
  2349. if (ifree >= isize_diff)
  2350. goto shift;
  2351. /*
  2352. * Enough free space isn't available in the inode, check if
  2353. * EA block can hold new_extra_isize bytes.
  2354. */
  2355. if (EXT4_I(inode)->i_file_acl) {
  2356. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  2357. error = -EIO;
  2358. if (!bh)
  2359. goto cleanup;
  2360. if (ext4_xattr_check_block(inode, bh)) {
  2361. EXT4_ERROR_INODE(inode, "bad block %llu",
  2362. EXT4_I(inode)->i_file_acl);
  2363. error = -EFSCORRUPTED;
  2364. goto cleanup;
  2365. }
  2366. base = BHDR(bh);
  2367. end = bh->b_data + bh->b_size;
  2368. min_offs = end - base;
  2369. bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
  2370. NULL);
  2371. if (bfree + ifree < isize_diff) {
  2372. if (!tried_min_extra_isize && s_min_extra_isize) {
  2373. tried_min_extra_isize++;
  2374. new_extra_isize = s_min_extra_isize;
  2375. brelse(bh);
  2376. goto retry;
  2377. }
  2378. error = -ENOSPC;
  2379. goto cleanup;
  2380. }
  2381. } else {
  2382. bfree = inode->i_sb->s_blocksize;
  2383. }
  2384. error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
  2385. isize_diff, ifree, bfree,
  2386. &total_ino);
  2387. if (error) {
  2388. if (error == -ENOSPC && !tried_min_extra_isize &&
  2389. s_min_extra_isize) {
  2390. tried_min_extra_isize++;
  2391. new_extra_isize = s_min_extra_isize;
  2392. brelse(bh);
  2393. goto retry;
  2394. }
  2395. goto cleanup;
  2396. }
  2397. shift:
  2398. /* Adjust the offsets and shift the remaining entries ahead */
  2399. ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
  2400. - new_extra_isize, (void *)raw_inode +
  2401. EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
  2402. (void *)header, total_ino);
  2403. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  2404. brelse(bh);
  2405. out:
  2406. ext4_write_unlock_xattr(inode, &no_expand);
  2407. return 0;
  2408. cleanup:
  2409. brelse(bh);
  2410. /*
  2411. * Inode size expansion failed; don't try again
  2412. */
  2413. no_expand = 1;
  2414. ext4_write_unlock_xattr(inode, &no_expand);
  2415. return error;
  2416. }
  2417. #define EIA_INCR 16 /* must be 2^n */
  2418. #define EIA_MASK (EIA_INCR - 1)
  2419. /* Add the large xattr @inode into @ea_inode_array for deferred iput().
  2420. * If @ea_inode_array is new or full it will be grown and the old
  2421. * contents copied over.
  2422. */
  2423. static int
  2424. ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
  2425. struct inode *inode)
  2426. {
  2427. if (*ea_inode_array == NULL) {
  2428. /*
  2429. * Start with 15 inodes, so it fits into a power-of-two size.
  2430. * If *ea_inode_array is NULL, this is essentially offsetof()
  2431. */
  2432. (*ea_inode_array) =
  2433. kmalloc(offsetof(struct ext4_xattr_inode_array,
  2434. inodes[EIA_MASK]),
  2435. GFP_NOFS);
  2436. if (*ea_inode_array == NULL)
  2437. return -ENOMEM;
  2438. (*ea_inode_array)->count = 0;
  2439. } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
  2440. /* expand the array once all 15 + n * 16 slots are full */
  2441. struct ext4_xattr_inode_array *new_array = NULL;
  2442. int count = (*ea_inode_array)->count;
  2443. /* if new_array is NULL, this is essentially offsetof() */
  2444. new_array = kmalloc(
  2445. offsetof(struct ext4_xattr_inode_array,
  2446. inodes[count + EIA_INCR]),
  2447. GFP_NOFS);
  2448. if (new_array == NULL)
  2449. return -ENOMEM;
  2450. memcpy(new_array, *ea_inode_array,
  2451. offsetof(struct ext4_xattr_inode_array, inodes[count]));
  2452. kfree(*ea_inode_array);
  2453. *ea_inode_array = new_array;
  2454. }
  2455. (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
  2456. return 0;
  2457. }
  2458. /*
  2459. * ext4_xattr_delete_inode()
  2460. *
  2461. * Free extended attribute resources associated with this inode. Traverse
  2462. * all entries and decrement reference on any xattr inodes associated with this
  2463. * inode. This is called immediately before an inode is freed. We have exclusive
  2464. * access to the inode. If an orphan inode is deleted it will also release its
  2465. * references on xattr block and xattr inodes.
  2466. */
  2467. int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
  2468. struct ext4_xattr_inode_array **ea_inode_array,
  2469. int extra_credits)
  2470. {
  2471. struct buffer_head *bh = NULL;
  2472. struct ext4_xattr_ibody_header *header;
  2473. struct ext4_iloc iloc = { .bh = NULL };
  2474. struct ext4_xattr_entry *entry;
  2475. int error;
  2476. error = ext4_xattr_ensure_credits(handle, inode, extra_credits,
  2477. NULL /* bh */,
  2478. false /* dirty */,
  2479. false /* block_csum */);
  2480. if (error) {
  2481. EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
  2482. goto cleanup;
  2483. }
  2484. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  2485. ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  2486. error = ext4_get_inode_loc(inode, &iloc);
  2487. if (error) {
  2488. EXT4_ERROR_INODE(inode, "inode loc (error %d)", error);
  2489. goto cleanup;
  2490. }
  2491. error = ext4_journal_get_write_access(handle, iloc.bh);
  2492. if (error) {
  2493. EXT4_ERROR_INODE(inode, "write access (error %d)",
  2494. error);
  2495. goto cleanup;
  2496. }
  2497. header = IHDR(inode, ext4_raw_inode(&iloc));
  2498. if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC))
  2499. ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh,
  2500. IFIRST(header),
  2501. false /* block_csum */,
  2502. ea_inode_array,
  2503. extra_credits,
  2504. false /* skip_quota */);
  2505. }
  2506. if (EXT4_I(inode)->i_file_acl) {
  2507. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  2508. if (!bh) {
  2509. EXT4_ERROR_INODE(inode, "block %llu read error",
  2510. EXT4_I(inode)->i_file_acl);
  2511. error = -EIO;
  2512. goto cleanup;
  2513. }
  2514. error = ext4_xattr_check_block(inode, bh);
  2515. if (error) {
  2516. EXT4_ERROR_INODE(inode, "bad block %llu (error %d)",
  2517. EXT4_I(inode)->i_file_acl, error);
  2518. goto cleanup;
  2519. }
  2520. if (ext4_has_feature_ea_inode(inode->i_sb)) {
  2521. for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
  2522. entry = EXT4_XATTR_NEXT(entry))
  2523. if (entry->e_value_inum)
  2524. ext4_xattr_inode_free_quota(inode,
  2525. le32_to_cpu(entry->e_value_size));
  2526. }
  2527. ext4_xattr_release_block(handle, inode, bh, ea_inode_array,
  2528. extra_credits);
  2529. /*
  2530. * Update i_file_acl value in the same transaction that releases
  2531. * block.
  2532. */
  2533. EXT4_I(inode)->i_file_acl = 0;
  2534. error = ext4_mark_inode_dirty(handle, inode);
  2535. if (error) {
  2536. EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)",
  2537. error);
  2538. goto cleanup;
  2539. }
  2540. }
  2541. error = 0;
  2542. cleanup:
  2543. brelse(iloc.bh);
  2544. brelse(bh);
  2545. return error;
  2546. }
  2547. void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
  2548. {
  2549. int idx;
  2550. if (ea_inode_array == NULL)
  2551. return;
  2552. for (idx = 0; idx < ea_inode_array->count; ++idx)
  2553. iput(ea_inode_array->inodes[idx]);
  2554. kfree(ea_inode_array);
  2555. }
  2556. /*
  2557. * ext4_xattr_block_cache_insert()
  2558. *
  2559. * Create a new entry in the extended attribute block cache, and insert
  2560. * it unless such an entry is already in the cache.
  2561. *
  2562. * Returns 0, or a negative error number on failure.
  2563. */
  2564. static void
  2565. ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
  2566. struct buffer_head *bh)
  2567. {
  2568. struct ext4_xattr_header *header = BHDR(bh);
  2569. __u32 hash = le32_to_cpu(header->h_hash);
  2570. int reusable = le32_to_cpu(header->h_refcount) <
  2571. EXT4_XATTR_REFCOUNT_MAX;
  2572. int error;
  2573. if (!ea_block_cache)
  2574. return;
  2575. error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash,
  2576. bh->b_blocknr, reusable);
  2577. if (error) {
  2578. if (error == -EBUSY)
  2579. ea_bdebug(bh, "already in cache");
  2580. } else
  2581. ea_bdebug(bh, "inserting [%x]", (int)hash);
  2582. }
  2583. /*
  2584. * ext4_xattr_cmp()
  2585. *
  2586. * Compare two extended attribute blocks for equality.
  2587. *
  2588. * Returns 0 if the blocks are equal, 1 if they differ, and
  2589. * a negative error number on errors.
  2590. */
  2591. static int
  2592. ext4_xattr_cmp(struct ext4_xattr_header *header1,
  2593. struct ext4_xattr_header *header2)
  2594. {
  2595. struct ext4_xattr_entry *entry1, *entry2;
  2596. entry1 = ENTRY(header1+1);
  2597. entry2 = ENTRY(header2+1);
  2598. while (!IS_LAST_ENTRY(entry1)) {
  2599. if (IS_LAST_ENTRY(entry2))
  2600. return 1;
  2601. if (entry1->e_hash != entry2->e_hash ||
  2602. entry1->e_name_index != entry2->e_name_index ||
  2603. entry1->e_name_len != entry2->e_name_len ||
  2604. entry1->e_value_size != entry2->e_value_size ||
  2605. entry1->e_value_inum != entry2->e_value_inum ||
  2606. memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
  2607. return 1;
  2608. if (!entry1->e_value_inum &&
  2609. memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
  2610. (char *)header2 + le16_to_cpu(entry2->e_value_offs),
  2611. le32_to_cpu(entry1->e_value_size)))
  2612. return 1;
  2613. entry1 = EXT4_XATTR_NEXT(entry1);
  2614. entry2 = EXT4_XATTR_NEXT(entry2);
  2615. }
  2616. if (!IS_LAST_ENTRY(entry2))
  2617. return 1;
  2618. return 0;
  2619. }
  2620. /*
  2621. * ext4_xattr_block_cache_find()
  2622. *
  2623. * Find an identical extended attribute block.
  2624. *
  2625. * Returns a pointer to the block found, or NULL if such a block was
  2626. * not found or an error occurred.
  2627. */
  2628. static struct buffer_head *
  2629. ext4_xattr_block_cache_find(struct inode *inode,
  2630. struct ext4_xattr_header *header,
  2631. struct mb_cache_entry **pce)
  2632. {
  2633. __u32 hash = le32_to_cpu(header->h_hash);
  2634. struct mb_cache_entry *ce;
  2635. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  2636. if (!ea_block_cache)
  2637. return NULL;
  2638. if (!header->h_hash)
  2639. return NULL; /* never share */
  2640. ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
  2641. ce = mb_cache_entry_find_first(ea_block_cache, hash);
  2642. while (ce) {
  2643. struct buffer_head *bh;
  2644. bh = sb_bread(inode->i_sb, ce->e_value);
  2645. if (!bh) {
  2646. EXT4_ERROR_INODE(inode, "block %lu read error",
  2647. (unsigned long)ce->e_value);
  2648. } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
  2649. *pce = ce;
  2650. return bh;
  2651. }
  2652. brelse(bh);
  2653. ce = mb_cache_entry_find_next(ea_block_cache, ce);
  2654. }
  2655. return NULL;
  2656. }
  2657. #define NAME_HASH_SHIFT 5
  2658. #define VALUE_HASH_SHIFT 16
  2659. /*
  2660. * ext4_xattr_hash_entry()
  2661. *
  2662. * Compute the hash of an extended attribute.
  2663. */
  2664. static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
  2665. size_t value_count)
  2666. {
  2667. __u32 hash = 0;
  2668. while (name_len--) {
  2669. hash = (hash << NAME_HASH_SHIFT) ^
  2670. (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
  2671. *name++;
  2672. }
  2673. while (value_count--) {
  2674. hash = (hash << VALUE_HASH_SHIFT) ^
  2675. (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
  2676. le32_to_cpu(*value++);
  2677. }
  2678. return cpu_to_le32(hash);
  2679. }
  2680. #undef NAME_HASH_SHIFT
  2681. #undef VALUE_HASH_SHIFT
  2682. #define BLOCK_HASH_SHIFT 16
  2683. /*
  2684. * ext4_xattr_rehash()
  2685. *
  2686. * Re-compute the extended attribute hash value after an entry has changed.
  2687. */
  2688. static void ext4_xattr_rehash(struct ext4_xattr_header *header)
  2689. {
  2690. struct ext4_xattr_entry *here;
  2691. __u32 hash = 0;
  2692. here = ENTRY(header+1);
  2693. while (!IS_LAST_ENTRY(here)) {
  2694. if (!here->e_hash) {
  2695. /* Block is not shared if an entry's hash value == 0 */
  2696. hash = 0;
  2697. break;
  2698. }
  2699. hash = (hash << BLOCK_HASH_SHIFT) ^
  2700. (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
  2701. le32_to_cpu(here->e_hash);
  2702. here = EXT4_XATTR_NEXT(here);
  2703. }
  2704. header->h_hash = cpu_to_le32(hash);
  2705. }
  2706. #undef BLOCK_HASH_SHIFT
  2707. #define HASH_BUCKET_BITS 10
  2708. struct mb_cache *
  2709. ext4_xattr_create_cache(void)
  2710. {
  2711. return mb_cache_create(HASH_BUCKET_BITS);
  2712. }
  2713. void ext4_xattr_destroy_cache(struct mb_cache *cache)
  2714. {
  2715. if (cache)
  2716. mb_cache_destroy(cache);
  2717. }