xattr.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ext4/xattr.c
  4. *
  5. * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  6. *
  7. * Fix by Harrison Xing <harrison@mountainviewdata.com>.
  8. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
  9. * Extended attributes for symlinks and special files added per
  10. * suggestion of Luka Renko <luka.renko@hermes.si>.
  11. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
  12. * Red Hat Inc.
  13. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
  14. * and Andreas Gruenbacher <agruen@suse.de>.
  15. */
  16. /*
  17. * Extended attributes are stored directly in inodes (on file systems with
  18. * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
  19. * field contains the block number if an inode uses an additional block. All
  20. * attributes must fit in the inode and one additional block. Blocks that
  21. * contain the identical set of attributes may be shared among several inodes.
  22. * Identical blocks are detected by keeping a cache of blocks that have
  23. * recently been accessed.
  24. *
  25. * The attributes in inodes and on blocks have a different header; the entries
  26. * are stored in the same format:
  27. *
  28. * +------------------+
  29. * | header |
  30. * | entry 1 | |
  31. * | entry 2 | | growing downwards
  32. * | entry 3 | v
  33. * | four null bytes |
  34. * | . . . |
  35. * | value 1 | ^
  36. * | value 3 | | growing upwards
  37. * | value 2 | |
  38. * +------------------+
  39. *
  40. * The header is followed by multiple entry descriptors. In disk blocks, the
  41. * entry descriptors are kept sorted. In inodes, they are unsorted. The
  42. * attribute values are aligned to the end of the block in no specific order.
  43. *
  44. * Locking strategy
  45. * ----------------
  46. * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
  47. * EA blocks are only changed if they are exclusive to an inode, so
  48. * holding xattr_sem also means that nothing but the EA block's reference
  49. * count can change. Multiple writers to the same block are synchronized
  50. * by the buffer lock.
  51. */
  52. #include <linux/init.h>
  53. #include <linux/fs.h>
  54. #include <linux/slab.h>
  55. #include <linux/mbcache.h>
  56. #include <linux/quotaops.h>
  57. #include <linux/iversion.h>
  58. #include "ext4_jbd2.h"
  59. #include "ext4.h"
  60. #include "xattr.h"
  61. #include "acl.h"
  62. #ifdef EXT4_XATTR_DEBUG
  63. # define ea_idebug(inode, fmt, ...) \
  64. printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
  65. inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
  66. # define ea_bdebug(bh, fmt, ...) \
  67. printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
  68. bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
  69. #else
  70. # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  71. # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  72. #endif
  73. static void ext4_xattr_block_cache_insert(struct mb_cache *,
  74. struct buffer_head *);
  75. static struct buffer_head *
  76. ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
  77. struct mb_cache_entry **);
  78. static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
  79. size_t value_count);
  80. static void ext4_xattr_rehash(struct ext4_xattr_header *);
  81. static const struct xattr_handler * const ext4_xattr_handler_map[] = {
  82. [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
  83. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  84. [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
  85. [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
  86. #endif
  87. [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
  88. #ifdef CONFIG_EXT4_FS_SECURITY
  89. [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
  90. #endif
  91. };
  92. const struct xattr_handler *ext4_xattr_handlers[] = {
  93. &ext4_xattr_user_handler,
  94. &ext4_xattr_trusted_handler,
  95. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  96. &posix_acl_access_xattr_handler,
  97. &posix_acl_default_xattr_handler,
  98. #endif
  99. #ifdef CONFIG_EXT4_FS_SECURITY
  100. &ext4_xattr_security_handler,
  101. #endif
  102. NULL
  103. };
  104. #define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \
  105. inode->i_sb->s_fs_info)->s_ea_block_cache)
  106. #define EA_INODE_CACHE(inode) (((struct ext4_sb_info *) \
  107. inode->i_sb->s_fs_info)->s_ea_inode_cache)
  108. static int
  109. ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
  110. struct inode *inode);
  111. #ifdef CONFIG_LOCKDEP
  112. void ext4_xattr_inode_set_class(struct inode *ea_inode)
  113. {
  114. lockdep_set_subclass(&ea_inode->i_rwsem, 1);
  115. }
  116. #endif
  117. static __le32 ext4_xattr_block_csum(struct inode *inode,
  118. sector_t block_nr,
  119. struct ext4_xattr_header *hdr)
  120. {
  121. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  122. __u32 csum;
  123. __le64 dsk_block_nr = cpu_to_le64(block_nr);
  124. __u32 dummy_csum = 0;
  125. int offset = offsetof(struct ext4_xattr_header, h_checksum);
  126. csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
  127. sizeof(dsk_block_nr));
  128. csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
  129. csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
  130. offset += sizeof(dummy_csum);
  131. csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
  132. EXT4_BLOCK_SIZE(inode->i_sb) - offset);
  133. return cpu_to_le32(csum);
  134. }
  135. static int ext4_xattr_block_csum_verify(struct inode *inode,
  136. struct buffer_head *bh)
  137. {
  138. struct ext4_xattr_header *hdr = BHDR(bh);
  139. int ret = 1;
  140. if (ext4_has_metadata_csum(inode->i_sb)) {
  141. lock_buffer(bh);
  142. ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
  143. bh->b_blocknr, hdr));
  144. unlock_buffer(bh);
  145. }
  146. return ret;
  147. }
  148. static void ext4_xattr_block_csum_set(struct inode *inode,
  149. struct buffer_head *bh)
  150. {
  151. if (ext4_has_metadata_csum(inode->i_sb))
  152. BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
  153. bh->b_blocknr, BHDR(bh));
  154. }
  155. static inline const struct xattr_handler *
  156. ext4_xattr_handler(int name_index)
  157. {
  158. const struct xattr_handler *handler = NULL;
  159. if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
  160. handler = ext4_xattr_handler_map[name_index];
  161. return handler;
  162. }
  163. static int
  164. ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
  165. void *value_start)
  166. {
  167. struct ext4_xattr_entry *e = entry;
  168. /* Find the end of the names list */
  169. while (!IS_LAST_ENTRY(e)) {
  170. struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
  171. if ((void *)next >= end)
  172. return -EFSCORRUPTED;
  173. e = next;
  174. }
  175. /* Check the values */
  176. while (!IS_LAST_ENTRY(entry)) {
  177. if (entry->e_value_size != 0 &&
  178. entry->e_value_inum == 0) {
  179. u16 offs = le16_to_cpu(entry->e_value_offs);
  180. u32 size = le32_to_cpu(entry->e_value_size);
  181. void *value;
  182. /*
  183. * The value cannot overlap the names, and the value
  184. * with padding cannot extend beyond 'end'. Check both
  185. * the padded and unpadded sizes, since the size may
  186. * overflow to 0 when adding padding.
  187. */
  188. if (offs > end - value_start)
  189. return -EFSCORRUPTED;
  190. value = value_start + offs;
  191. if (value < (void *)e + sizeof(u32) ||
  192. size > end - value ||
  193. EXT4_XATTR_SIZE(size) > end - value)
  194. return -EFSCORRUPTED;
  195. }
  196. entry = EXT4_XATTR_NEXT(entry);
  197. }
  198. return 0;
  199. }
  200. static inline int
  201. ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
  202. {
  203. int error;
  204. if (buffer_verified(bh))
  205. return 0;
  206. if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
  207. BHDR(bh)->h_blocks != cpu_to_le32(1))
  208. return -EFSCORRUPTED;
  209. if (!ext4_xattr_block_csum_verify(inode, bh))
  210. return -EFSBADCRC;
  211. error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
  212. bh->b_data);
  213. if (!error)
  214. set_buffer_verified(bh);
  215. return error;
  216. }
  217. static int
  218. __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
  219. void *end, const char *function, unsigned int line)
  220. {
  221. int error = -EFSCORRUPTED;
  222. if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
  223. (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
  224. goto errout;
  225. error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
  226. errout:
  227. if (error)
  228. __ext4_error_inode(inode, function, line, 0,
  229. "corrupted in-inode xattr");
  230. return error;
  231. }
  232. #define xattr_check_inode(inode, header, end) \
  233. __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
  234. static int
  235. ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
  236. const char *name, int sorted)
  237. {
  238. struct ext4_xattr_entry *entry;
  239. size_t name_len;
  240. int cmp = 1;
  241. if (name == NULL)
  242. return -EINVAL;
  243. name_len = strlen(name);
  244. entry = *pentry;
  245. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  246. cmp = name_index - entry->e_name_index;
  247. if (!cmp)
  248. cmp = name_len - entry->e_name_len;
  249. if (!cmp)
  250. cmp = memcmp(name, entry->e_name, name_len);
  251. if (cmp <= 0 && (sorted || cmp == 0))
  252. break;
  253. }
  254. *pentry = entry;
  255. return cmp ? -ENODATA : 0;
  256. }
  257. static u32
  258. ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
  259. {
  260. return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
  261. }
  262. static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
  263. {
  264. return ((u64)ea_inode->i_ctime.tv_sec << 32) |
  265. (u32) inode_peek_iversion_raw(ea_inode);
  266. }
  267. static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
  268. {
  269. ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
  270. inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff);
  271. }
  272. static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
  273. {
  274. return (u32)ea_inode->i_atime.tv_sec;
  275. }
  276. static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
  277. {
  278. ea_inode->i_atime.tv_sec = hash;
  279. }
  280. /*
  281. * Read the EA value from an inode.
  282. */
  283. static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
  284. {
  285. int blocksize = 1 << ea_inode->i_blkbits;
  286. int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits;
  287. int tail_size = (size % blocksize) ?: blocksize;
  288. struct buffer_head *bhs_inline[8];
  289. struct buffer_head **bhs = bhs_inline;
  290. int i, ret;
  291. if (bh_count > ARRAY_SIZE(bhs_inline)) {
  292. bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS);
  293. if (!bhs)
  294. return -ENOMEM;
  295. }
  296. ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count,
  297. true /* wait */, bhs);
  298. if (ret)
  299. goto free_bhs;
  300. for (i = 0; i < bh_count; i++) {
  301. /* There shouldn't be any holes in ea_inode. */
  302. if (!bhs[i]) {
  303. ret = -EFSCORRUPTED;
  304. goto put_bhs;
  305. }
  306. memcpy((char *)buf + blocksize * i, bhs[i]->b_data,
  307. i < bh_count - 1 ? blocksize : tail_size);
  308. }
  309. ret = 0;
  310. put_bhs:
  311. for (i = 0; i < bh_count; i++)
  312. brelse(bhs[i]);
  313. free_bhs:
  314. if (bhs != bhs_inline)
  315. kfree(bhs);
  316. return ret;
  317. }
  318. #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
  319. static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
  320. u32 ea_inode_hash, struct inode **ea_inode)
  321. {
  322. struct inode *inode;
  323. int err;
  324. inode = ext4_iget(parent->i_sb, ea_ino);
  325. if (IS_ERR(inode)) {
  326. err = PTR_ERR(inode);
  327. ext4_error(parent->i_sb,
  328. "error while reading EA inode %lu err=%d", ea_ino,
  329. err);
  330. return err;
  331. }
  332. if (is_bad_inode(inode)) {
  333. ext4_error(parent->i_sb,
  334. "error while reading EA inode %lu is_bad_inode",
  335. ea_ino);
  336. err = -EIO;
  337. goto error;
  338. }
  339. if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
  340. ext4_error(parent->i_sb,
  341. "EA inode %lu does not have EXT4_EA_INODE_FL flag",
  342. ea_ino);
  343. err = -EINVAL;
  344. goto error;
  345. }
  346. ext4_xattr_inode_set_class(inode);
  347. /*
  348. * Check whether this is an old Lustre-style xattr inode. Lustre
  349. * implementation does not have hash validation, rather it has a
  350. * backpointer from ea_inode to the parent inode.
  351. */
  352. if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) &&
  353. EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino &&
  354. inode->i_generation == parent->i_generation) {
  355. ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
  356. ext4_xattr_inode_set_ref(inode, 1);
  357. } else {
  358. inode_lock(inode);
  359. inode->i_flags |= S_NOQUOTA;
  360. inode_unlock(inode);
  361. }
  362. *ea_inode = inode;
  363. return 0;
  364. error:
  365. iput(inode);
  366. return err;
  367. }
  368. static int
  369. ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
  370. struct ext4_xattr_entry *entry, void *buffer,
  371. size_t size)
  372. {
  373. u32 hash;
  374. /* Verify stored hash matches calculated hash. */
  375. hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size);
  376. if (hash != ext4_xattr_inode_get_hash(ea_inode))
  377. return -EFSCORRUPTED;
  378. if (entry) {
  379. __le32 e_hash, tmp_data;
  380. /* Verify entry hash. */
  381. tmp_data = cpu_to_le32(hash);
  382. e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
  383. &tmp_data, 1);
  384. if (e_hash != entry->e_hash)
  385. return -EFSCORRUPTED;
  386. }
  387. return 0;
  388. }
  389. /*
  390. * Read xattr value from the EA inode.
  391. */
  392. static int
  393. ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry,
  394. void *buffer, size_t size)
  395. {
  396. struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
  397. struct inode *ea_inode;
  398. int err;
  399. err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum),
  400. le32_to_cpu(entry->e_hash), &ea_inode);
  401. if (err) {
  402. ea_inode = NULL;
  403. goto out;
  404. }
  405. if (i_size_read(ea_inode) != size) {
  406. ext4_warning_inode(ea_inode,
  407. "ea_inode file size=%llu entry size=%zu",
  408. i_size_read(ea_inode), size);
  409. err = -EFSCORRUPTED;
  410. goto out;
  411. }
  412. err = ext4_xattr_inode_read(ea_inode, buffer, size);
  413. if (err)
  414. goto out;
  415. if (!ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) {
  416. err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer,
  417. size);
  418. if (err) {
  419. ext4_warning_inode(ea_inode,
  420. "EA inode hash validation failed");
  421. goto out;
  422. }
  423. if (ea_inode_cache)
  424. mb_cache_entry_create(ea_inode_cache, GFP_NOFS,
  425. ext4_xattr_inode_get_hash(ea_inode),
  426. ea_inode->i_ino, true /* reusable */);
  427. }
  428. out:
  429. iput(ea_inode);
  430. return err;
  431. }
  432. static int
  433. ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
  434. void *buffer, size_t buffer_size)
  435. {
  436. struct buffer_head *bh = NULL;
  437. struct ext4_xattr_entry *entry;
  438. size_t size;
  439. int error;
  440. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  441. ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
  442. name_index, name, buffer, (long)buffer_size);
  443. error = -ENODATA;
  444. if (!EXT4_I(inode)->i_file_acl)
  445. goto cleanup;
  446. ea_idebug(inode, "reading block %llu",
  447. (unsigned long long)EXT4_I(inode)->i_file_acl);
  448. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  449. if (!bh)
  450. goto cleanup;
  451. ea_bdebug(bh, "b_count=%d, refcount=%d",
  452. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  453. if (ext4_xattr_check_block(inode, bh)) {
  454. EXT4_ERROR_INODE(inode, "bad block %llu",
  455. EXT4_I(inode)->i_file_acl);
  456. error = -EFSCORRUPTED;
  457. goto cleanup;
  458. }
  459. ext4_xattr_block_cache_insert(ea_block_cache, bh);
  460. entry = BFIRST(bh);
  461. error = ext4_xattr_find_entry(&entry, name_index, name, 1);
  462. if (error)
  463. goto cleanup;
  464. size = le32_to_cpu(entry->e_value_size);
  465. if (buffer) {
  466. error = -ERANGE;
  467. if (size > buffer_size)
  468. goto cleanup;
  469. if (entry->e_value_inum) {
  470. error = ext4_xattr_inode_get(inode, entry, buffer,
  471. size);
  472. if (error)
  473. goto cleanup;
  474. } else {
  475. memcpy(buffer, bh->b_data +
  476. le16_to_cpu(entry->e_value_offs), size);
  477. }
  478. }
  479. error = size;
  480. cleanup:
  481. brelse(bh);
  482. return error;
  483. }
  484. int
  485. ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
  486. void *buffer, size_t buffer_size)
  487. {
  488. struct ext4_xattr_ibody_header *header;
  489. struct ext4_xattr_entry *entry;
  490. struct ext4_inode *raw_inode;
  491. struct ext4_iloc iloc;
  492. size_t size;
  493. void *end;
  494. int error;
  495. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  496. return -ENODATA;
  497. error = ext4_get_inode_loc(inode, &iloc);
  498. if (error)
  499. return error;
  500. raw_inode = ext4_raw_inode(&iloc);
  501. header = IHDR(inode, raw_inode);
  502. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  503. error = xattr_check_inode(inode, header, end);
  504. if (error)
  505. goto cleanup;
  506. entry = IFIRST(header);
  507. error = ext4_xattr_find_entry(&entry, name_index, name, 0);
  508. if (error)
  509. goto cleanup;
  510. size = le32_to_cpu(entry->e_value_size);
  511. if (buffer) {
  512. error = -ERANGE;
  513. if (size > buffer_size)
  514. goto cleanup;
  515. if (entry->e_value_inum) {
  516. error = ext4_xattr_inode_get(inode, entry, buffer,
  517. size);
  518. if (error)
  519. goto cleanup;
  520. } else {
  521. memcpy(buffer, (void *)IFIRST(header) +
  522. le16_to_cpu(entry->e_value_offs), size);
  523. }
  524. }
  525. error = size;
  526. cleanup:
  527. brelse(iloc.bh);
  528. return error;
  529. }
  530. /*
  531. * ext4_xattr_get()
  532. *
  533. * Copy an extended attribute into the buffer
  534. * provided, or compute the buffer size required.
  535. * Buffer is NULL to compute the size of the buffer required.
  536. *
  537. * Returns a negative error number on failure, or the number of bytes
  538. * used / required on success.
  539. */
  540. int
  541. ext4_xattr_get(struct inode *inode, int name_index, const char *name,
  542. void *buffer, size_t buffer_size)
  543. {
  544. int error;
  545. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  546. return -EIO;
  547. if (strlen(name) > 255)
  548. return -ERANGE;
  549. down_read(&EXT4_I(inode)->xattr_sem);
  550. error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
  551. buffer_size);
  552. if (error == -ENODATA)
  553. error = ext4_xattr_block_get(inode, name_index, name, buffer,
  554. buffer_size);
  555. up_read(&EXT4_I(inode)->xattr_sem);
  556. return error;
  557. }
  558. static int
  559. ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
  560. char *buffer, size_t buffer_size)
  561. {
  562. size_t rest = buffer_size;
  563. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  564. const struct xattr_handler *handler =
  565. ext4_xattr_handler(entry->e_name_index);
  566. if (handler && (!handler->list || handler->list(dentry))) {
  567. const char *prefix = handler->prefix ?: handler->name;
  568. size_t prefix_len = strlen(prefix);
  569. size_t size = prefix_len + entry->e_name_len + 1;
  570. if (buffer) {
  571. if (size > rest)
  572. return -ERANGE;
  573. memcpy(buffer, prefix, prefix_len);
  574. buffer += prefix_len;
  575. memcpy(buffer, entry->e_name, entry->e_name_len);
  576. buffer += entry->e_name_len;
  577. *buffer++ = 0;
  578. }
  579. rest -= size;
  580. }
  581. }
  582. return buffer_size - rest; /* total size */
  583. }
  584. static int
  585. ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  586. {
  587. struct inode *inode = d_inode(dentry);
  588. struct buffer_head *bh = NULL;
  589. int error;
  590. ea_idebug(inode, "buffer=%p, buffer_size=%ld",
  591. buffer, (long)buffer_size);
  592. error = 0;
  593. if (!EXT4_I(inode)->i_file_acl)
  594. goto cleanup;
  595. ea_idebug(inode, "reading block %llu",
  596. (unsigned long long)EXT4_I(inode)->i_file_acl);
  597. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  598. error = -EIO;
  599. if (!bh)
  600. goto cleanup;
  601. ea_bdebug(bh, "b_count=%d, refcount=%d",
  602. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  603. if (ext4_xattr_check_block(inode, bh)) {
  604. EXT4_ERROR_INODE(inode, "bad block %llu",
  605. EXT4_I(inode)->i_file_acl);
  606. error = -EFSCORRUPTED;
  607. goto cleanup;
  608. }
  609. ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
  610. error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
  611. cleanup:
  612. brelse(bh);
  613. return error;
  614. }
  615. static int
  616. ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  617. {
  618. struct inode *inode = d_inode(dentry);
  619. struct ext4_xattr_ibody_header *header;
  620. struct ext4_inode *raw_inode;
  621. struct ext4_iloc iloc;
  622. void *end;
  623. int error;
  624. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  625. return 0;
  626. error = ext4_get_inode_loc(inode, &iloc);
  627. if (error)
  628. return error;
  629. raw_inode = ext4_raw_inode(&iloc);
  630. header = IHDR(inode, raw_inode);
  631. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  632. error = xattr_check_inode(inode, header, end);
  633. if (error)
  634. goto cleanup;
  635. error = ext4_xattr_list_entries(dentry, IFIRST(header),
  636. buffer, buffer_size);
  637. cleanup:
  638. brelse(iloc.bh);
  639. return error;
  640. }
  641. /*
  642. * Inode operation listxattr()
  643. *
  644. * d_inode(dentry)->i_rwsem: don't care
  645. *
  646. * Copy a list of attribute names into the buffer
  647. * provided, or compute the buffer size required.
  648. * Buffer is NULL to compute the size of the buffer required.
  649. *
  650. * Returns a negative error number on failure, or the number of bytes
  651. * used / required on success.
  652. */
  653. ssize_t
  654. ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
  655. {
  656. int ret, ret2;
  657. down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
  658. ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
  659. if (ret < 0)
  660. goto errout;
  661. if (buffer) {
  662. buffer += ret;
  663. buffer_size -= ret;
  664. }
  665. ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
  666. if (ret < 0)
  667. goto errout;
  668. ret += ret2;
  669. errout:
  670. up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
  671. return ret;
  672. }
  673. /*
  674. * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
  675. * not set, set it.
  676. */
  677. static void ext4_xattr_update_super_block(handle_t *handle,
  678. struct super_block *sb)
  679. {
  680. if (ext4_has_feature_xattr(sb))
  681. return;
  682. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  683. if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
  684. ext4_set_feature_xattr(sb);
  685. ext4_handle_dirty_super(handle, sb);
  686. }
  687. }
  688. int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
  689. {
  690. struct ext4_iloc iloc = { .bh = NULL };
  691. struct buffer_head *bh = NULL;
  692. struct ext4_inode *raw_inode;
  693. struct ext4_xattr_ibody_header *header;
  694. struct ext4_xattr_entry *entry;
  695. qsize_t ea_inode_refs = 0;
  696. void *end;
  697. int ret;
  698. lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
  699. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  700. ret = ext4_get_inode_loc(inode, &iloc);
  701. if (ret)
  702. goto out;
  703. raw_inode = ext4_raw_inode(&iloc);
  704. header = IHDR(inode, raw_inode);
  705. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  706. ret = xattr_check_inode(inode, header, end);
  707. if (ret)
  708. goto out;
  709. for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
  710. entry = EXT4_XATTR_NEXT(entry))
  711. if (entry->e_value_inum)
  712. ea_inode_refs++;
  713. }
  714. if (EXT4_I(inode)->i_file_acl) {
  715. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  716. if (!bh) {
  717. ret = -EIO;
  718. goto out;
  719. }
  720. if (ext4_xattr_check_block(inode, bh)) {
  721. ret = -EFSCORRUPTED;
  722. goto out;
  723. }
  724. for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
  725. entry = EXT4_XATTR_NEXT(entry))
  726. if (entry->e_value_inum)
  727. ea_inode_refs++;
  728. }
  729. *usage = ea_inode_refs + 1;
  730. ret = 0;
  731. out:
  732. brelse(iloc.bh);
  733. brelse(bh);
  734. return ret;
  735. }
  736. static inline size_t round_up_cluster(struct inode *inode, size_t length)
  737. {
  738. struct super_block *sb = inode->i_sb;
  739. size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits +
  740. inode->i_blkbits);
  741. size_t mask = ~(cluster_size - 1);
  742. return (length + cluster_size - 1) & mask;
  743. }
  744. static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len)
  745. {
  746. int err;
  747. err = dquot_alloc_inode(inode);
  748. if (err)
  749. return err;
  750. err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len));
  751. if (err)
  752. dquot_free_inode(inode);
  753. return err;
  754. }
  755. static void ext4_xattr_inode_free_quota(struct inode *parent,
  756. struct inode *ea_inode,
  757. size_t len)
  758. {
  759. if (ea_inode &&
  760. ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE))
  761. return;
  762. dquot_free_space_nodirty(parent, round_up_cluster(parent, len));
  763. dquot_free_inode(parent);
  764. }
  765. int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
  766. struct buffer_head *block_bh, size_t value_len,
  767. bool is_create)
  768. {
  769. int credits;
  770. int blocks;
  771. /*
  772. * 1) Owner inode update
  773. * 2) Ref count update on old xattr block
  774. * 3) new xattr block
  775. * 4) block bitmap update for new xattr block
  776. * 5) group descriptor for new xattr block
  777. * 6) block bitmap update for old xattr block
  778. * 7) group descriptor for old block
  779. *
  780. * 6 & 7 can happen if we have two racing threads T_a and T_b
  781. * which are each trying to set an xattr on inodes I_a and I_b
  782. * which were both initially sharing an xattr block.
  783. */
  784. credits = 7;
  785. /* Quota updates. */
  786. credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb);
  787. /*
  788. * In case of inline data, we may push out the data to a block,
  789. * so we need to reserve credits for this eventuality
  790. */
  791. if (inode && ext4_has_inline_data(inode))
  792. credits += ext4_writepage_trans_blocks(inode) + 1;
  793. /* We are done if ea_inode feature is not enabled. */
  794. if (!ext4_has_feature_ea_inode(sb))
  795. return credits;
  796. /* New ea_inode, inode map, block bitmap, group descriptor. */
  797. credits += 4;
  798. /* Data blocks. */
  799. blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
  800. /* Indirection block or one level of extent tree. */
  801. blocks += 1;
  802. /* Block bitmap and group descriptor updates for each block. */
  803. credits += blocks * 2;
  804. /* Blocks themselves. */
  805. credits += blocks;
  806. if (!is_create) {
  807. /* Dereference ea_inode holding old xattr value.
  808. * Old ea_inode, inode map, block bitmap, group descriptor.
  809. */
  810. credits += 4;
  811. /* Data blocks for old ea_inode. */
  812. blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits;
  813. /* Indirection block or one level of extent tree for old
  814. * ea_inode.
  815. */
  816. blocks += 1;
  817. /* Block bitmap and group descriptor updates for each block. */
  818. credits += blocks * 2;
  819. }
  820. /* We may need to clone the existing xattr block in which case we need
  821. * to increment ref counts for existing ea_inodes referenced by it.
  822. */
  823. if (block_bh) {
  824. struct ext4_xattr_entry *entry = BFIRST(block_bh);
  825. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry))
  826. if (entry->e_value_inum)
  827. /* Ref count update on ea_inode. */
  828. credits += 1;
  829. }
  830. return credits;
  831. }
  832. static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
  833. int credits, struct buffer_head *bh,
  834. bool dirty, bool block_csum)
  835. {
  836. int error;
  837. if (!ext4_handle_valid(handle))
  838. return 0;
  839. if (handle->h_buffer_credits >= credits)
  840. return 0;
  841. error = ext4_journal_extend(handle, credits - handle->h_buffer_credits);
  842. if (!error)
  843. return 0;
  844. if (error < 0) {
  845. ext4_warning(inode->i_sb, "Extend journal (error %d)", error);
  846. return error;
  847. }
  848. if (bh && dirty) {
  849. if (block_csum)
  850. ext4_xattr_block_csum_set(inode, bh);
  851. error = ext4_handle_dirty_metadata(handle, NULL, bh);
  852. if (error) {
  853. ext4_warning(inode->i_sb, "Handle metadata (error %d)",
  854. error);
  855. return error;
  856. }
  857. }
  858. error = ext4_journal_restart(handle, credits);
  859. if (error) {
  860. ext4_warning(inode->i_sb, "Restart journal (error %d)", error);
  861. return error;
  862. }
  863. if (bh) {
  864. error = ext4_journal_get_write_access(handle, bh);
  865. if (error) {
  866. ext4_warning(inode->i_sb,
  867. "Get write access failed (error %d)",
  868. error);
  869. return error;
  870. }
  871. }
  872. return 0;
  873. }
  874. static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
  875. int ref_change)
  876. {
  877. struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
  878. struct ext4_iloc iloc;
  879. s64 ref_count;
  880. u32 hash;
  881. int ret;
  882. inode_lock(ea_inode);
  883. ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
  884. if (ret) {
  885. iloc.bh = NULL;
  886. goto out;
  887. }
  888. ref_count = ext4_xattr_inode_get_ref(ea_inode);
  889. ref_count += ref_change;
  890. ext4_xattr_inode_set_ref(ea_inode, ref_count);
  891. if (ref_change > 0) {
  892. WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
  893. ea_inode->i_ino, ref_count);
  894. if (ref_count == 1) {
  895. WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
  896. ea_inode->i_ino, ea_inode->i_nlink);
  897. set_nlink(ea_inode, 1);
  898. ext4_orphan_del(handle, ea_inode);
  899. if (ea_inode_cache) {
  900. hash = ext4_xattr_inode_get_hash(ea_inode);
  901. mb_cache_entry_create(ea_inode_cache,
  902. GFP_NOFS, hash,
  903. ea_inode->i_ino,
  904. true /* reusable */);
  905. }
  906. }
  907. } else {
  908. WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
  909. ea_inode->i_ino, ref_count);
  910. if (ref_count == 0) {
  911. WARN_ONCE(ea_inode->i_nlink != 1,
  912. "EA inode %lu i_nlink=%u",
  913. ea_inode->i_ino, ea_inode->i_nlink);
  914. clear_nlink(ea_inode);
  915. ext4_orphan_add(handle, ea_inode);
  916. if (ea_inode_cache) {
  917. hash = ext4_xattr_inode_get_hash(ea_inode);
  918. mb_cache_entry_delete(ea_inode_cache, hash,
  919. ea_inode->i_ino);
  920. }
  921. }
  922. }
  923. ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
  924. iloc.bh = NULL;
  925. if (ret)
  926. ext4_warning_inode(ea_inode,
  927. "ext4_mark_iloc_dirty() failed ret=%d", ret);
  928. out:
  929. brelse(iloc.bh);
  930. inode_unlock(ea_inode);
  931. return ret;
  932. }
  933. static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode)
  934. {
  935. return ext4_xattr_inode_update_ref(handle, ea_inode, 1);
  936. }
  937. static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode)
  938. {
  939. return ext4_xattr_inode_update_ref(handle, ea_inode, -1);
  940. }
  941. static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent,
  942. struct ext4_xattr_entry *first)
  943. {
  944. struct inode *ea_inode;
  945. struct ext4_xattr_entry *entry;
  946. struct ext4_xattr_entry *failed_entry;
  947. unsigned int ea_ino;
  948. int err, saved_err;
  949. for (entry = first; !IS_LAST_ENTRY(entry);
  950. entry = EXT4_XATTR_NEXT(entry)) {
  951. if (!entry->e_value_inum)
  952. continue;
  953. ea_ino = le32_to_cpu(entry->e_value_inum);
  954. err = ext4_xattr_inode_iget(parent, ea_ino,
  955. le32_to_cpu(entry->e_hash),
  956. &ea_inode);
  957. if (err)
  958. goto cleanup;
  959. err = ext4_xattr_inode_inc_ref(handle, ea_inode);
  960. if (err) {
  961. ext4_warning_inode(ea_inode, "inc ref error %d", err);
  962. iput(ea_inode);
  963. goto cleanup;
  964. }
  965. iput(ea_inode);
  966. }
  967. return 0;
  968. cleanup:
  969. saved_err = err;
  970. failed_entry = entry;
  971. for (entry = first; entry != failed_entry;
  972. entry = EXT4_XATTR_NEXT(entry)) {
  973. if (!entry->e_value_inum)
  974. continue;
  975. ea_ino = le32_to_cpu(entry->e_value_inum);
  976. err = ext4_xattr_inode_iget(parent, ea_ino,
  977. le32_to_cpu(entry->e_hash),
  978. &ea_inode);
  979. if (err) {
  980. ext4_warning(parent->i_sb,
  981. "cleanup ea_ino %u iget error %d", ea_ino,
  982. err);
  983. continue;
  984. }
  985. err = ext4_xattr_inode_dec_ref(handle, ea_inode);
  986. if (err)
  987. ext4_warning_inode(ea_inode, "cleanup dec ref error %d",
  988. err);
  989. iput(ea_inode);
  990. }
  991. return saved_err;
  992. }
  993. static void
  994. ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
  995. struct buffer_head *bh,
  996. struct ext4_xattr_entry *first, bool block_csum,
  997. struct ext4_xattr_inode_array **ea_inode_array,
  998. int extra_credits, bool skip_quota)
  999. {
  1000. struct inode *ea_inode;
  1001. struct ext4_xattr_entry *entry;
  1002. bool dirty = false;
  1003. unsigned int ea_ino;
  1004. int err;
  1005. int credits;
  1006. /* One credit for dec ref on ea_inode, one for orphan list addition, */
  1007. credits = 2 + extra_credits;
  1008. for (entry = first; !IS_LAST_ENTRY(entry);
  1009. entry = EXT4_XATTR_NEXT(entry)) {
  1010. if (!entry->e_value_inum)
  1011. continue;
  1012. ea_ino = le32_to_cpu(entry->e_value_inum);
  1013. err = ext4_xattr_inode_iget(parent, ea_ino,
  1014. le32_to_cpu(entry->e_hash),
  1015. &ea_inode);
  1016. if (err)
  1017. continue;
  1018. err = ext4_expand_inode_array(ea_inode_array, ea_inode);
  1019. if (err) {
  1020. ext4_warning_inode(ea_inode,
  1021. "Expand inode array err=%d", err);
  1022. iput(ea_inode);
  1023. continue;
  1024. }
  1025. err = ext4_xattr_ensure_credits(handle, parent, credits, bh,
  1026. dirty, block_csum);
  1027. if (err) {
  1028. ext4_warning_inode(ea_inode, "Ensure credits err=%d",
  1029. err);
  1030. continue;
  1031. }
  1032. err = ext4_xattr_inode_dec_ref(handle, ea_inode);
  1033. if (err) {
  1034. ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d",
  1035. err);
  1036. continue;
  1037. }
  1038. if (!skip_quota)
  1039. ext4_xattr_inode_free_quota(parent, ea_inode,
  1040. le32_to_cpu(entry->e_value_size));
  1041. /*
  1042. * Forget about ea_inode within the same transaction that
  1043. * decrements the ref count. This avoids duplicate decrements in
  1044. * case the rest of the work spills over to subsequent
  1045. * transactions.
  1046. */
  1047. entry->e_value_inum = 0;
  1048. entry->e_value_size = 0;
  1049. dirty = true;
  1050. }
  1051. if (dirty) {
  1052. /*
  1053. * Note that we are deliberately skipping csum calculation for
  1054. * the final update because we do not expect any journal
  1055. * restarts until xattr block is freed.
  1056. */
  1057. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  1058. if (err)
  1059. ext4_warning_inode(parent,
  1060. "handle dirty metadata err=%d", err);
  1061. }
  1062. }
  1063. /*
  1064. * Release the xattr block BH: If the reference count is > 1, decrement it;
  1065. * otherwise free the block.
  1066. */
  1067. static void
  1068. ext4_xattr_release_block(handle_t *handle, struct inode *inode,
  1069. struct buffer_head *bh,
  1070. struct ext4_xattr_inode_array **ea_inode_array,
  1071. int extra_credits)
  1072. {
  1073. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  1074. u32 hash, ref;
  1075. int error = 0;
  1076. BUFFER_TRACE(bh, "get_write_access");
  1077. error = ext4_journal_get_write_access(handle, bh);
  1078. if (error)
  1079. goto out;
  1080. lock_buffer(bh);
  1081. hash = le32_to_cpu(BHDR(bh)->h_hash);
  1082. ref = le32_to_cpu(BHDR(bh)->h_refcount);
  1083. if (ref == 1) {
  1084. ea_bdebug(bh, "refcount now=0; freeing");
  1085. /*
  1086. * This must happen under buffer lock for
  1087. * ext4_xattr_block_set() to reliably detect freed block
  1088. */
  1089. if (ea_block_cache)
  1090. mb_cache_entry_delete(ea_block_cache, hash,
  1091. bh->b_blocknr);
  1092. get_bh(bh);
  1093. unlock_buffer(bh);
  1094. if (ext4_has_feature_ea_inode(inode->i_sb))
  1095. ext4_xattr_inode_dec_ref_all(handle, inode, bh,
  1096. BFIRST(bh),
  1097. true /* block_csum */,
  1098. ea_inode_array,
  1099. extra_credits,
  1100. true /* skip_quota */);
  1101. ext4_free_blocks(handle, inode, bh, 0, 1,
  1102. EXT4_FREE_BLOCKS_METADATA |
  1103. EXT4_FREE_BLOCKS_FORGET);
  1104. } else {
  1105. ref--;
  1106. BHDR(bh)->h_refcount = cpu_to_le32(ref);
  1107. if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
  1108. struct mb_cache_entry *ce;
  1109. if (ea_block_cache) {
  1110. ce = mb_cache_entry_get(ea_block_cache, hash,
  1111. bh->b_blocknr);
  1112. if (ce) {
  1113. ce->e_reusable = 1;
  1114. mb_cache_entry_put(ea_block_cache, ce);
  1115. }
  1116. }
  1117. }
  1118. ext4_xattr_block_csum_set(inode, bh);
  1119. /*
  1120. * Beware of this ugliness: Releasing of xattr block references
  1121. * from different inodes can race and so we have to protect
  1122. * from a race where someone else frees the block (and releases
  1123. * its journal_head) before we are done dirtying the buffer. In
  1124. * nojournal mode this race is harmless and we actually cannot
  1125. * call ext4_handle_dirty_metadata() with locked buffer as
  1126. * that function can call sync_dirty_buffer() so for that case
  1127. * we handle the dirtying after unlocking the buffer.
  1128. */
  1129. if (ext4_handle_valid(handle))
  1130. error = ext4_handle_dirty_metadata(handle, inode, bh);
  1131. unlock_buffer(bh);
  1132. if (!ext4_handle_valid(handle))
  1133. error = ext4_handle_dirty_metadata(handle, inode, bh);
  1134. if (IS_SYNC(inode))
  1135. ext4_handle_sync(handle);
  1136. dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
  1137. ea_bdebug(bh, "refcount now=%d; releasing",
  1138. le32_to_cpu(BHDR(bh)->h_refcount));
  1139. }
  1140. out:
  1141. ext4_std_error(inode->i_sb, error);
  1142. return;
  1143. }
  1144. /*
  1145. * Find the available free space for EAs. This also returns the total number of
  1146. * bytes used by EA entries.
  1147. */
  1148. static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
  1149. size_t *min_offs, void *base, int *total)
  1150. {
  1151. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1152. if (!last->e_value_inum && last->e_value_size) {
  1153. size_t offs = le16_to_cpu(last->e_value_offs);
  1154. if (offs < *min_offs)
  1155. *min_offs = offs;
  1156. }
  1157. if (total)
  1158. *total += EXT4_XATTR_LEN(last->e_name_len);
  1159. }
  1160. return (*min_offs - ((void *)last - base) - sizeof(__u32));
  1161. }
  1162. /*
  1163. * Write the value of the EA in an inode.
  1164. */
  1165. static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
  1166. const void *buf, int bufsize)
  1167. {
  1168. struct buffer_head *bh = NULL;
  1169. unsigned long block = 0;
  1170. int blocksize = ea_inode->i_sb->s_blocksize;
  1171. int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
  1172. int csize, wsize = 0;
  1173. int ret = 0;
  1174. int retries = 0;
  1175. retry:
  1176. while (ret >= 0 && ret < max_blocks) {
  1177. struct ext4_map_blocks map;
  1178. map.m_lblk = block += ret;
  1179. map.m_len = max_blocks -= ret;
  1180. ret = ext4_map_blocks(handle, ea_inode, &map,
  1181. EXT4_GET_BLOCKS_CREATE);
  1182. if (ret <= 0) {
  1183. ext4_mark_inode_dirty(handle, ea_inode);
  1184. if (ret == -ENOSPC &&
  1185. ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
  1186. ret = 0;
  1187. goto retry;
  1188. }
  1189. break;
  1190. }
  1191. }
  1192. if (ret < 0)
  1193. return ret;
  1194. block = 0;
  1195. while (wsize < bufsize) {
  1196. if (bh != NULL)
  1197. brelse(bh);
  1198. csize = (bufsize - wsize) > blocksize ? blocksize :
  1199. bufsize - wsize;
  1200. bh = ext4_getblk(handle, ea_inode, block, 0);
  1201. if (IS_ERR(bh))
  1202. return PTR_ERR(bh);
  1203. ret = ext4_journal_get_write_access(handle, bh);
  1204. if (ret)
  1205. goto out;
  1206. memcpy(bh->b_data, buf, csize);
  1207. set_buffer_uptodate(bh);
  1208. ext4_handle_dirty_metadata(handle, ea_inode, bh);
  1209. buf += csize;
  1210. wsize += csize;
  1211. block += 1;
  1212. }
  1213. inode_lock(ea_inode);
  1214. i_size_write(ea_inode, wsize);
  1215. ext4_update_i_disksize(ea_inode, wsize);
  1216. inode_unlock(ea_inode);
  1217. ext4_mark_inode_dirty(handle, ea_inode);
  1218. out:
  1219. brelse(bh);
  1220. return ret;
  1221. }
  1222. /*
  1223. * Create an inode to store the value of a large EA.
  1224. */
  1225. static struct inode *ext4_xattr_inode_create(handle_t *handle,
  1226. struct inode *inode, u32 hash)
  1227. {
  1228. struct inode *ea_inode = NULL;
  1229. uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
  1230. int err;
  1231. /*
  1232. * Let the next inode be the goal, so we try and allocate the EA inode
  1233. * in the same group, or nearby one.
  1234. */
  1235. ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
  1236. S_IFREG | 0600, NULL, inode->i_ino + 1, owner,
  1237. EXT4_EA_INODE_FL);
  1238. if (!IS_ERR(ea_inode)) {
  1239. ea_inode->i_op = &ext4_file_inode_operations;
  1240. ea_inode->i_fop = &ext4_file_operations;
  1241. ext4_set_aops(ea_inode);
  1242. ext4_xattr_inode_set_class(ea_inode);
  1243. unlock_new_inode(ea_inode);
  1244. ext4_xattr_inode_set_ref(ea_inode, 1);
  1245. ext4_xattr_inode_set_hash(ea_inode, hash);
  1246. err = ext4_mark_inode_dirty(handle, ea_inode);
  1247. if (!err)
  1248. err = ext4_inode_attach_jinode(ea_inode);
  1249. if (err) {
  1250. iput(ea_inode);
  1251. return ERR_PTR(err);
  1252. }
  1253. /*
  1254. * Xattr inodes are shared therefore quota charging is performed
  1255. * at a higher level.
  1256. */
  1257. dquot_free_inode(ea_inode);
  1258. dquot_drop(ea_inode);
  1259. inode_lock(ea_inode);
  1260. ea_inode->i_flags |= S_NOQUOTA;
  1261. inode_unlock(ea_inode);
  1262. }
  1263. return ea_inode;
  1264. }
  1265. static struct inode *
  1266. ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
  1267. size_t value_len, u32 hash)
  1268. {
  1269. struct inode *ea_inode;
  1270. struct mb_cache_entry *ce;
  1271. struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
  1272. void *ea_data;
  1273. if (!ea_inode_cache)
  1274. return NULL;
  1275. ce = mb_cache_entry_find_first(ea_inode_cache, hash);
  1276. if (!ce)
  1277. return NULL;
  1278. ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
  1279. if (!ea_data) {
  1280. mb_cache_entry_put(ea_inode_cache, ce);
  1281. return NULL;
  1282. }
  1283. while (ce) {
  1284. ea_inode = ext4_iget(inode->i_sb, ce->e_value);
  1285. if (!IS_ERR(ea_inode) &&
  1286. !is_bad_inode(ea_inode) &&
  1287. (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
  1288. i_size_read(ea_inode) == value_len &&
  1289. !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
  1290. !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
  1291. value_len) &&
  1292. !memcmp(value, ea_data, value_len)) {
  1293. mb_cache_entry_touch(ea_inode_cache, ce);
  1294. mb_cache_entry_put(ea_inode_cache, ce);
  1295. kvfree(ea_data);
  1296. return ea_inode;
  1297. }
  1298. if (!IS_ERR(ea_inode))
  1299. iput(ea_inode);
  1300. ce = mb_cache_entry_find_next(ea_inode_cache, ce);
  1301. }
  1302. kvfree(ea_data);
  1303. return NULL;
  1304. }
  1305. /*
  1306. * Add value of the EA in an inode.
  1307. */
  1308. static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
  1309. const void *value, size_t value_len,
  1310. struct inode **ret_inode)
  1311. {
  1312. struct inode *ea_inode;
  1313. u32 hash;
  1314. int err;
  1315. hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
  1316. ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
  1317. if (ea_inode) {
  1318. err = ext4_xattr_inode_inc_ref(handle, ea_inode);
  1319. if (err) {
  1320. iput(ea_inode);
  1321. return err;
  1322. }
  1323. *ret_inode = ea_inode;
  1324. return 0;
  1325. }
  1326. /* Create an inode for the EA value */
  1327. ea_inode = ext4_xattr_inode_create(handle, inode, hash);
  1328. if (IS_ERR(ea_inode))
  1329. return PTR_ERR(ea_inode);
  1330. err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
  1331. if (err) {
  1332. ext4_xattr_inode_dec_ref(handle, ea_inode);
  1333. iput(ea_inode);
  1334. return err;
  1335. }
  1336. if (EA_INODE_CACHE(inode))
  1337. mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
  1338. ea_inode->i_ino, true /* reusable */);
  1339. *ret_inode = ea_inode;
  1340. return 0;
  1341. }
  1342. /*
  1343. * Reserve min(block_size/8, 1024) bytes for xattr entries/names if ea_inode
  1344. * feature is enabled.
  1345. */
  1346. #define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U)
  1347. static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
  1348. struct ext4_xattr_search *s,
  1349. handle_t *handle, struct inode *inode,
  1350. bool is_block)
  1351. {
  1352. struct ext4_xattr_entry *last;
  1353. struct ext4_xattr_entry *here = s->here;
  1354. size_t min_offs = s->end - s->base, name_len = strlen(i->name);
  1355. int in_inode = i->in_inode;
  1356. struct inode *old_ea_inode = NULL;
  1357. struct inode *new_ea_inode = NULL;
  1358. size_t old_size, new_size;
  1359. int ret;
  1360. /* Space used by old and new values. */
  1361. old_size = (!s->not_found && !here->e_value_inum) ?
  1362. EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0;
  1363. new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0;
  1364. /*
  1365. * Optimization for the simple case when old and new values have the
  1366. * same padded sizes. Not applicable if external inodes are involved.
  1367. */
  1368. if (new_size && new_size == old_size) {
  1369. size_t offs = le16_to_cpu(here->e_value_offs);
  1370. void *val = s->base + offs;
  1371. here->e_value_size = cpu_to_le32(i->value_len);
  1372. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  1373. memset(val, 0, new_size);
  1374. } else {
  1375. memcpy(val, i->value, i->value_len);
  1376. /* Clear padding bytes. */
  1377. memset(val + i->value_len, 0, new_size - i->value_len);
  1378. }
  1379. goto update_hash;
  1380. }
  1381. /* Compute min_offs and last. */
  1382. last = s->first;
  1383. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1384. if (!last->e_value_inum && last->e_value_size) {
  1385. size_t offs = le16_to_cpu(last->e_value_offs);
  1386. if (offs < min_offs)
  1387. min_offs = offs;
  1388. }
  1389. }
  1390. /* Check whether we have enough space. */
  1391. if (i->value) {
  1392. size_t free;
  1393. free = min_offs - ((void *)last - s->base) - sizeof(__u32);
  1394. if (!s->not_found)
  1395. free += EXT4_XATTR_LEN(name_len) + old_size;
  1396. if (free < EXT4_XATTR_LEN(name_len) + new_size) {
  1397. ret = -ENOSPC;
  1398. goto out;
  1399. }
  1400. /*
  1401. * If storing the value in an external inode is an option,
  1402. * reserve space for xattr entries/names in the external
  1403. * attribute block so that a long value does not occupy the
  1404. * whole space and prevent futher entries being added.
  1405. */
  1406. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  1407. new_size && is_block &&
  1408. (min_offs + old_size - new_size) <
  1409. EXT4_XATTR_BLOCK_RESERVE(inode)) {
  1410. ret = -ENOSPC;
  1411. goto out;
  1412. }
  1413. }
  1414. /*
  1415. * Getting access to old and new ea inodes is subject to failures.
  1416. * Finish that work before doing any modifications to the xattr data.
  1417. */
  1418. if (!s->not_found && here->e_value_inum) {
  1419. ret = ext4_xattr_inode_iget(inode,
  1420. le32_to_cpu(here->e_value_inum),
  1421. le32_to_cpu(here->e_hash),
  1422. &old_ea_inode);
  1423. if (ret) {
  1424. old_ea_inode = NULL;
  1425. goto out;
  1426. }
  1427. }
  1428. if (i->value && in_inode) {
  1429. WARN_ON_ONCE(!i->value_len);
  1430. ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
  1431. if (ret)
  1432. goto out;
  1433. ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
  1434. i->value_len,
  1435. &new_ea_inode);
  1436. if (ret) {
  1437. new_ea_inode = NULL;
  1438. ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
  1439. goto out;
  1440. }
  1441. }
  1442. if (old_ea_inode) {
  1443. /* We are ready to release ref count on the old_ea_inode. */
  1444. ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
  1445. if (ret) {
  1446. /* Release newly required ref count on new_ea_inode. */
  1447. if (new_ea_inode) {
  1448. int err;
  1449. err = ext4_xattr_inode_dec_ref(handle,
  1450. new_ea_inode);
  1451. if (err)
  1452. ext4_warning_inode(new_ea_inode,
  1453. "dec ref new_ea_inode err=%d",
  1454. err);
  1455. ext4_xattr_inode_free_quota(inode, new_ea_inode,
  1456. i->value_len);
  1457. }
  1458. goto out;
  1459. }
  1460. ext4_xattr_inode_free_quota(inode, old_ea_inode,
  1461. le32_to_cpu(here->e_value_size));
  1462. }
  1463. /* No failures allowed past this point. */
  1464. if (!s->not_found && here->e_value_offs) {
  1465. /* Remove the old value. */
  1466. void *first_val = s->base + min_offs;
  1467. size_t offs = le16_to_cpu(here->e_value_offs);
  1468. void *val = s->base + offs;
  1469. memmove(first_val + old_size, first_val, val - first_val);
  1470. memset(first_val, 0, old_size);
  1471. min_offs += old_size;
  1472. /* Adjust all value offsets. */
  1473. last = s->first;
  1474. while (!IS_LAST_ENTRY(last)) {
  1475. size_t o = le16_to_cpu(last->e_value_offs);
  1476. if (!last->e_value_inum &&
  1477. last->e_value_size && o < offs)
  1478. last->e_value_offs = cpu_to_le16(o + old_size);
  1479. last = EXT4_XATTR_NEXT(last);
  1480. }
  1481. }
  1482. if (!i->value) {
  1483. /* Remove old name. */
  1484. size_t size = EXT4_XATTR_LEN(name_len);
  1485. last = ENTRY((void *)last - size);
  1486. memmove(here, (void *)here + size,
  1487. (void *)last - (void *)here + sizeof(__u32));
  1488. memset(last, 0, size);
  1489. } else if (s->not_found) {
  1490. /* Insert new name. */
  1491. size_t size = EXT4_XATTR_LEN(name_len);
  1492. size_t rest = (void *)last - (void *)here + sizeof(__u32);
  1493. memmove((void *)here + size, here, rest);
  1494. memset(here, 0, size);
  1495. here->e_name_index = i->name_index;
  1496. here->e_name_len = name_len;
  1497. memcpy(here->e_name, i->name, name_len);
  1498. } else {
  1499. /* This is an update, reset value info. */
  1500. here->e_value_inum = 0;
  1501. here->e_value_offs = 0;
  1502. here->e_value_size = 0;
  1503. }
  1504. if (i->value) {
  1505. /* Insert new value. */
  1506. if (in_inode) {
  1507. here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino);
  1508. } else if (i->value_len) {
  1509. void *val = s->base + min_offs - new_size;
  1510. here->e_value_offs = cpu_to_le16(min_offs - new_size);
  1511. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  1512. memset(val, 0, new_size);
  1513. } else {
  1514. memcpy(val, i->value, i->value_len);
  1515. /* Clear padding bytes. */
  1516. memset(val + i->value_len, 0,
  1517. new_size - i->value_len);
  1518. }
  1519. }
  1520. here->e_value_size = cpu_to_le32(i->value_len);
  1521. }
  1522. update_hash:
  1523. if (i->value) {
  1524. __le32 hash = 0;
  1525. /* Entry hash calculation. */
  1526. if (in_inode) {
  1527. __le32 crc32c_hash;
  1528. /*
  1529. * Feed crc32c hash instead of the raw value for entry
  1530. * hash calculation. This is to avoid walking
  1531. * potentially long value buffer again.
  1532. */
  1533. crc32c_hash = cpu_to_le32(
  1534. ext4_xattr_inode_get_hash(new_ea_inode));
  1535. hash = ext4_xattr_hash_entry(here->e_name,
  1536. here->e_name_len,
  1537. &crc32c_hash, 1);
  1538. } else if (is_block) {
  1539. __le32 *value = s->base + le16_to_cpu(
  1540. here->e_value_offs);
  1541. hash = ext4_xattr_hash_entry(here->e_name,
  1542. here->e_name_len, value,
  1543. new_size >> 2);
  1544. }
  1545. here->e_hash = hash;
  1546. }
  1547. if (is_block)
  1548. ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
  1549. ret = 0;
  1550. out:
  1551. iput(old_ea_inode);
  1552. iput(new_ea_inode);
  1553. return ret;
  1554. }
  1555. struct ext4_xattr_block_find {
  1556. struct ext4_xattr_search s;
  1557. struct buffer_head *bh;
  1558. };
  1559. static int
  1560. ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
  1561. struct ext4_xattr_block_find *bs)
  1562. {
  1563. struct super_block *sb = inode->i_sb;
  1564. int error;
  1565. ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
  1566. i->name_index, i->name, i->value, (long)i->value_len);
  1567. if (EXT4_I(inode)->i_file_acl) {
  1568. /* The inode already has an extended attribute block. */
  1569. bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
  1570. error = -EIO;
  1571. if (!bs->bh)
  1572. goto cleanup;
  1573. ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
  1574. atomic_read(&(bs->bh->b_count)),
  1575. le32_to_cpu(BHDR(bs->bh)->h_refcount));
  1576. if (ext4_xattr_check_block(inode, bs->bh)) {
  1577. EXT4_ERROR_INODE(inode, "bad block %llu",
  1578. EXT4_I(inode)->i_file_acl);
  1579. error = -EFSCORRUPTED;
  1580. goto cleanup;
  1581. }
  1582. /* Find the named attribute. */
  1583. bs->s.base = BHDR(bs->bh);
  1584. bs->s.first = BFIRST(bs->bh);
  1585. bs->s.end = bs->bh->b_data + bs->bh->b_size;
  1586. bs->s.here = bs->s.first;
  1587. error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
  1588. i->name, 1);
  1589. if (error && error != -ENODATA)
  1590. goto cleanup;
  1591. bs->s.not_found = error;
  1592. }
  1593. error = 0;
  1594. cleanup:
  1595. return error;
  1596. }
  1597. static int
  1598. ext4_xattr_block_set(handle_t *handle, struct inode *inode,
  1599. struct ext4_xattr_info *i,
  1600. struct ext4_xattr_block_find *bs)
  1601. {
  1602. struct super_block *sb = inode->i_sb;
  1603. struct buffer_head *new_bh = NULL;
  1604. struct ext4_xattr_search s_copy = bs->s;
  1605. struct ext4_xattr_search *s = &s_copy;
  1606. struct mb_cache_entry *ce = NULL;
  1607. int error = 0;
  1608. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  1609. struct inode *ea_inode = NULL, *tmp_inode;
  1610. size_t old_ea_inode_quota = 0;
  1611. unsigned int ea_ino;
  1612. #define header(x) ((struct ext4_xattr_header *)(x))
  1613. if (s->base) {
  1614. BUFFER_TRACE(bs->bh, "get_write_access");
  1615. error = ext4_journal_get_write_access(handle, bs->bh);
  1616. if (error)
  1617. goto cleanup;
  1618. lock_buffer(bs->bh);
  1619. if (header(s->base)->h_refcount == cpu_to_le32(1)) {
  1620. __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
  1621. /*
  1622. * This must happen under buffer lock for
  1623. * ext4_xattr_block_set() to reliably detect modified
  1624. * block
  1625. */
  1626. if (ea_block_cache)
  1627. mb_cache_entry_delete(ea_block_cache, hash,
  1628. bs->bh->b_blocknr);
  1629. ea_bdebug(bs->bh, "modifying in-place");
  1630. error = ext4_xattr_set_entry(i, s, handle, inode,
  1631. true /* is_block */);
  1632. ext4_xattr_block_csum_set(inode, bs->bh);
  1633. unlock_buffer(bs->bh);
  1634. if (error == -EFSCORRUPTED)
  1635. goto bad_block;
  1636. if (!error)
  1637. error = ext4_handle_dirty_metadata(handle,
  1638. inode,
  1639. bs->bh);
  1640. if (error)
  1641. goto cleanup;
  1642. goto inserted;
  1643. } else {
  1644. int offset = (char *)s->here - bs->bh->b_data;
  1645. unlock_buffer(bs->bh);
  1646. ea_bdebug(bs->bh, "cloning");
  1647. s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
  1648. error = -ENOMEM;
  1649. if (s->base == NULL)
  1650. goto cleanup;
  1651. memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
  1652. s->first = ENTRY(header(s->base)+1);
  1653. header(s->base)->h_refcount = cpu_to_le32(1);
  1654. s->here = ENTRY(s->base + offset);
  1655. s->end = s->base + bs->bh->b_size;
  1656. /*
  1657. * If existing entry points to an xattr inode, we need
  1658. * to prevent ext4_xattr_set_entry() from decrementing
  1659. * ref count on it because the reference belongs to the
  1660. * original block. In this case, make the entry look
  1661. * like it has an empty value.
  1662. */
  1663. if (!s->not_found && s->here->e_value_inum) {
  1664. ea_ino = le32_to_cpu(s->here->e_value_inum);
  1665. error = ext4_xattr_inode_iget(inode, ea_ino,
  1666. le32_to_cpu(s->here->e_hash),
  1667. &tmp_inode);
  1668. if (error)
  1669. goto cleanup;
  1670. if (!ext4_test_inode_state(tmp_inode,
  1671. EXT4_STATE_LUSTRE_EA_INODE)) {
  1672. /*
  1673. * Defer quota free call for previous
  1674. * inode until success is guaranteed.
  1675. */
  1676. old_ea_inode_quota = le32_to_cpu(
  1677. s->here->e_value_size);
  1678. }
  1679. iput(tmp_inode);
  1680. s->here->e_value_inum = 0;
  1681. s->here->e_value_size = 0;
  1682. }
  1683. }
  1684. } else {
  1685. /* Allocate a buffer where we construct the new block. */
  1686. s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
  1687. /* assert(header == s->base) */
  1688. error = -ENOMEM;
  1689. if (s->base == NULL)
  1690. goto cleanup;
  1691. header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1692. header(s->base)->h_blocks = cpu_to_le32(1);
  1693. header(s->base)->h_refcount = cpu_to_le32(1);
  1694. s->first = ENTRY(header(s->base)+1);
  1695. s->here = ENTRY(header(s->base)+1);
  1696. s->end = s->base + sb->s_blocksize;
  1697. }
  1698. error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
  1699. if (error == -EFSCORRUPTED)
  1700. goto bad_block;
  1701. if (error)
  1702. goto cleanup;
  1703. if (i->value && s->here->e_value_inum) {
  1704. /*
  1705. * A ref count on ea_inode has been taken as part of the call to
  1706. * ext4_xattr_set_entry() above. We would like to drop this
  1707. * extra ref but we have to wait until the xattr block is
  1708. * initialized and has its own ref count on the ea_inode.
  1709. */
  1710. ea_ino = le32_to_cpu(s->here->e_value_inum);
  1711. error = ext4_xattr_inode_iget(inode, ea_ino,
  1712. le32_to_cpu(s->here->e_hash),
  1713. &ea_inode);
  1714. if (error) {
  1715. ea_inode = NULL;
  1716. goto cleanup;
  1717. }
  1718. }
  1719. inserted:
  1720. if (!IS_LAST_ENTRY(s->first)) {
  1721. new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
  1722. &ce);
  1723. if (new_bh) {
  1724. /* We found an identical block in the cache. */
  1725. if (new_bh == bs->bh)
  1726. ea_bdebug(new_bh, "keeping");
  1727. else {
  1728. u32 ref;
  1729. WARN_ON_ONCE(dquot_initialize_needed(inode));
  1730. /* The old block is released after updating
  1731. the inode. */
  1732. error = dquot_alloc_block(inode,
  1733. EXT4_C2B(EXT4_SB(sb), 1));
  1734. if (error)
  1735. goto cleanup;
  1736. BUFFER_TRACE(new_bh, "get_write_access");
  1737. error = ext4_journal_get_write_access(handle,
  1738. new_bh);
  1739. if (error)
  1740. goto cleanup_dquot;
  1741. lock_buffer(new_bh);
  1742. /*
  1743. * We have to be careful about races with
  1744. * freeing, rehashing or adding references to
  1745. * xattr block. Once we hold buffer lock xattr
  1746. * block's state is stable so we can check
  1747. * whether the block got freed / rehashed or
  1748. * not. Since we unhash mbcache entry under
  1749. * buffer lock when freeing / rehashing xattr
  1750. * block, checking whether entry is still
  1751. * hashed is reliable. Same rules hold for
  1752. * e_reusable handling.
  1753. */
  1754. if (hlist_bl_unhashed(&ce->e_hash_list) ||
  1755. !ce->e_reusable) {
  1756. /*
  1757. * Undo everything and check mbcache
  1758. * again.
  1759. */
  1760. unlock_buffer(new_bh);
  1761. dquot_free_block(inode,
  1762. EXT4_C2B(EXT4_SB(sb),
  1763. 1));
  1764. brelse(new_bh);
  1765. mb_cache_entry_put(ea_block_cache, ce);
  1766. ce = NULL;
  1767. new_bh = NULL;
  1768. goto inserted;
  1769. }
  1770. ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
  1771. BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
  1772. if (ref >= EXT4_XATTR_REFCOUNT_MAX)
  1773. ce->e_reusable = 0;
  1774. ea_bdebug(new_bh, "reusing; refcount now=%d",
  1775. ref);
  1776. ext4_xattr_block_csum_set(inode, new_bh);
  1777. unlock_buffer(new_bh);
  1778. error = ext4_handle_dirty_metadata(handle,
  1779. inode,
  1780. new_bh);
  1781. if (error)
  1782. goto cleanup_dquot;
  1783. }
  1784. mb_cache_entry_touch(ea_block_cache, ce);
  1785. mb_cache_entry_put(ea_block_cache, ce);
  1786. ce = NULL;
  1787. } else if (bs->bh && s->base == bs->bh->b_data) {
  1788. /* We were modifying this block in-place. */
  1789. ea_bdebug(bs->bh, "keeping this block");
  1790. ext4_xattr_block_cache_insert(ea_block_cache, bs->bh);
  1791. new_bh = bs->bh;
  1792. get_bh(new_bh);
  1793. } else {
  1794. /* We need to allocate a new block */
  1795. ext4_fsblk_t goal, block;
  1796. WARN_ON_ONCE(dquot_initialize_needed(inode));
  1797. goal = ext4_group_first_block_no(sb,
  1798. EXT4_I(inode)->i_block_group);
  1799. /* non-extent files can't have physical blocks past 2^32 */
  1800. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  1801. goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
  1802. block = ext4_new_meta_blocks(handle, inode, goal, 0,
  1803. NULL, &error);
  1804. if (error)
  1805. goto cleanup;
  1806. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  1807. BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
  1808. ea_idebug(inode, "creating block %llu",
  1809. (unsigned long long)block);
  1810. new_bh = sb_getblk(sb, block);
  1811. if (unlikely(!new_bh)) {
  1812. error = -ENOMEM;
  1813. getblk_failed:
  1814. ext4_free_blocks(handle, inode, NULL, block, 1,
  1815. EXT4_FREE_BLOCKS_METADATA);
  1816. goto cleanup;
  1817. }
  1818. error = ext4_xattr_inode_inc_ref_all(handle, inode,
  1819. ENTRY(header(s->base)+1));
  1820. if (error)
  1821. goto getblk_failed;
  1822. if (ea_inode) {
  1823. /* Drop the extra ref on ea_inode. */
  1824. error = ext4_xattr_inode_dec_ref(handle,
  1825. ea_inode);
  1826. if (error)
  1827. ext4_warning_inode(ea_inode,
  1828. "dec ref error=%d",
  1829. error);
  1830. iput(ea_inode);
  1831. ea_inode = NULL;
  1832. }
  1833. lock_buffer(new_bh);
  1834. error = ext4_journal_get_create_access(handle, new_bh);
  1835. if (error) {
  1836. unlock_buffer(new_bh);
  1837. error = -EIO;
  1838. goto getblk_failed;
  1839. }
  1840. memcpy(new_bh->b_data, s->base, new_bh->b_size);
  1841. ext4_xattr_block_csum_set(inode, new_bh);
  1842. set_buffer_uptodate(new_bh);
  1843. unlock_buffer(new_bh);
  1844. ext4_xattr_block_cache_insert(ea_block_cache, new_bh);
  1845. error = ext4_handle_dirty_metadata(handle, inode,
  1846. new_bh);
  1847. if (error)
  1848. goto cleanup;
  1849. }
  1850. }
  1851. if (old_ea_inode_quota)
  1852. ext4_xattr_inode_free_quota(inode, NULL, old_ea_inode_quota);
  1853. /* Update the inode. */
  1854. EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
  1855. /* Drop the previous xattr block. */
  1856. if (bs->bh && bs->bh != new_bh) {
  1857. struct ext4_xattr_inode_array *ea_inode_array = NULL;
  1858. ext4_xattr_release_block(handle, inode, bs->bh,
  1859. &ea_inode_array,
  1860. 0 /* extra_credits */);
  1861. ext4_xattr_inode_array_free(ea_inode_array);
  1862. }
  1863. error = 0;
  1864. cleanup:
  1865. if (ea_inode) {
  1866. int error2;
  1867. error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
  1868. if (error2)
  1869. ext4_warning_inode(ea_inode, "dec ref error=%d",
  1870. error2);
  1871. /* If there was an error, revert the quota charge. */
  1872. if (error)
  1873. ext4_xattr_inode_free_quota(inode, ea_inode,
  1874. i_size_read(ea_inode));
  1875. iput(ea_inode);
  1876. }
  1877. if (ce)
  1878. mb_cache_entry_put(ea_block_cache, ce);
  1879. brelse(new_bh);
  1880. if (!(bs->bh && s->base == bs->bh->b_data))
  1881. kfree(s->base);
  1882. return error;
  1883. cleanup_dquot:
  1884. dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
  1885. goto cleanup;
  1886. bad_block:
  1887. EXT4_ERROR_INODE(inode, "bad block %llu",
  1888. EXT4_I(inode)->i_file_acl);
  1889. goto cleanup;
  1890. #undef header
  1891. }
  1892. int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
  1893. struct ext4_xattr_ibody_find *is)
  1894. {
  1895. struct ext4_xattr_ibody_header *header;
  1896. struct ext4_inode *raw_inode;
  1897. int error;
  1898. if (EXT4_I(inode)->i_extra_isize == 0)
  1899. return 0;
  1900. raw_inode = ext4_raw_inode(&is->iloc);
  1901. header = IHDR(inode, raw_inode);
  1902. is->s.base = is->s.first = IFIRST(header);
  1903. is->s.here = is->s.first;
  1904. is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  1905. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  1906. error = xattr_check_inode(inode, header, is->s.end);
  1907. if (error)
  1908. return error;
  1909. /* Find the named attribute. */
  1910. error = ext4_xattr_find_entry(&is->s.here, i->name_index,
  1911. i->name, 0);
  1912. if (error && error != -ENODATA)
  1913. return error;
  1914. is->s.not_found = error;
  1915. }
  1916. return 0;
  1917. }
  1918. int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
  1919. struct ext4_xattr_info *i,
  1920. struct ext4_xattr_ibody_find *is)
  1921. {
  1922. struct ext4_xattr_ibody_header *header;
  1923. struct ext4_xattr_search *s = &is->s;
  1924. int error;
  1925. if (EXT4_I(inode)->i_extra_isize == 0)
  1926. return -ENOSPC;
  1927. error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
  1928. if (error) {
  1929. if (error == -ENOSPC &&
  1930. ext4_has_inline_data(inode)) {
  1931. error = ext4_try_to_evict_inline_data(handle, inode,
  1932. EXT4_XATTR_LEN(strlen(i->name) +
  1933. EXT4_XATTR_SIZE(i->value_len)));
  1934. if (error)
  1935. return error;
  1936. error = ext4_xattr_ibody_find(inode, i, is);
  1937. if (error)
  1938. return error;
  1939. error = ext4_xattr_set_entry(i, s, handle, inode,
  1940. false /* is_block */);
  1941. }
  1942. if (error)
  1943. return error;
  1944. }
  1945. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  1946. if (!IS_LAST_ENTRY(s->first)) {
  1947. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1948. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  1949. } else {
  1950. header->h_magic = cpu_to_le32(0);
  1951. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  1952. }
  1953. return 0;
  1954. }
  1955. static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
  1956. struct ext4_xattr_info *i,
  1957. struct ext4_xattr_ibody_find *is)
  1958. {
  1959. struct ext4_xattr_ibody_header *header;
  1960. struct ext4_xattr_search *s = &is->s;
  1961. int error;
  1962. if (EXT4_I(inode)->i_extra_isize == 0)
  1963. return -ENOSPC;
  1964. error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
  1965. if (error)
  1966. return error;
  1967. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  1968. if (!IS_LAST_ENTRY(s->first)) {
  1969. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1970. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  1971. } else {
  1972. header->h_magic = cpu_to_le32(0);
  1973. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  1974. }
  1975. return 0;
  1976. }
  1977. static int ext4_xattr_value_same(struct ext4_xattr_search *s,
  1978. struct ext4_xattr_info *i)
  1979. {
  1980. void *value;
  1981. /* When e_value_inum is set the value is stored externally. */
  1982. if (s->here->e_value_inum)
  1983. return 0;
  1984. if (le32_to_cpu(s->here->e_value_size) != i->value_len)
  1985. return 0;
  1986. value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
  1987. return !memcmp(value, i->value, i->value_len);
  1988. }
  1989. static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
  1990. {
  1991. struct buffer_head *bh;
  1992. int error;
  1993. if (!EXT4_I(inode)->i_file_acl)
  1994. return NULL;
  1995. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  1996. if (!bh)
  1997. return ERR_PTR(-EIO);
  1998. error = ext4_xattr_check_block(inode, bh);
  1999. if (error)
  2000. return ERR_PTR(error);
  2001. return bh;
  2002. }
  2003. /*
  2004. * ext4_xattr_set_handle()
  2005. *
  2006. * Create, replace or remove an extended attribute for this inode. Value
  2007. * is NULL to remove an existing extended attribute, and non-NULL to
  2008. * either replace an existing extended attribute, or create a new extended
  2009. * attribute. The flags XATTR_REPLACE and XATTR_CREATE
  2010. * specify that an extended attribute must exist and must not exist
  2011. * previous to the call, respectively.
  2012. *
  2013. * Returns 0, or a negative error number on failure.
  2014. */
  2015. int
  2016. ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
  2017. const char *name, const void *value, size_t value_len,
  2018. int flags)
  2019. {
  2020. struct ext4_xattr_info i = {
  2021. .name_index = name_index,
  2022. .name = name,
  2023. .value = value,
  2024. .value_len = value_len,
  2025. .in_inode = 0,
  2026. };
  2027. struct ext4_xattr_ibody_find is = {
  2028. .s = { .not_found = -ENODATA, },
  2029. };
  2030. struct ext4_xattr_block_find bs = {
  2031. .s = { .not_found = -ENODATA, },
  2032. };
  2033. int no_expand;
  2034. int error;
  2035. if (!name)
  2036. return -EINVAL;
  2037. if (strlen(name) > 255)
  2038. return -ERANGE;
  2039. ext4_write_lock_xattr(inode, &no_expand);
  2040. /* Check journal credits under write lock. */
  2041. if (ext4_handle_valid(handle)) {
  2042. struct buffer_head *bh;
  2043. int credits;
  2044. bh = ext4_xattr_get_block(inode);
  2045. if (IS_ERR(bh)) {
  2046. error = PTR_ERR(bh);
  2047. goto cleanup;
  2048. }
  2049. credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
  2050. value_len,
  2051. flags & XATTR_CREATE);
  2052. brelse(bh);
  2053. if (!ext4_handle_has_enough_credits(handle, credits)) {
  2054. error = -ENOSPC;
  2055. goto cleanup;
  2056. }
  2057. }
  2058. error = ext4_reserve_inode_write(handle, inode, &is.iloc);
  2059. if (error)
  2060. goto cleanup;
  2061. if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
  2062. struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
  2063. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  2064. ext4_clear_inode_state(inode, EXT4_STATE_NEW);
  2065. }
  2066. error = ext4_xattr_ibody_find(inode, &i, &is);
  2067. if (error)
  2068. goto cleanup;
  2069. if (is.s.not_found)
  2070. error = ext4_xattr_block_find(inode, &i, &bs);
  2071. if (error)
  2072. goto cleanup;
  2073. if (is.s.not_found && bs.s.not_found) {
  2074. error = -ENODATA;
  2075. if (flags & XATTR_REPLACE)
  2076. goto cleanup;
  2077. error = 0;
  2078. if (!value)
  2079. goto cleanup;
  2080. } else {
  2081. error = -EEXIST;
  2082. if (flags & XATTR_CREATE)
  2083. goto cleanup;
  2084. }
  2085. if (!value) {
  2086. if (!is.s.not_found)
  2087. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  2088. else if (!bs.s.not_found)
  2089. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  2090. } else {
  2091. error = 0;
  2092. /* Xattr value did not change? Save us some work and bail out */
  2093. if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
  2094. goto cleanup;
  2095. if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
  2096. goto cleanup;
  2097. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  2098. (EXT4_XATTR_SIZE(i.value_len) >
  2099. EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
  2100. i.in_inode = 1;
  2101. retry_inode:
  2102. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  2103. if (!error && !bs.s.not_found) {
  2104. i.value = NULL;
  2105. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  2106. } else if (error == -ENOSPC) {
  2107. if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
  2108. error = ext4_xattr_block_find(inode, &i, &bs);
  2109. if (error)
  2110. goto cleanup;
  2111. }
  2112. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  2113. if (!error && !is.s.not_found) {
  2114. i.value = NULL;
  2115. error = ext4_xattr_ibody_set(handle, inode, &i,
  2116. &is);
  2117. } else if (error == -ENOSPC) {
  2118. /*
  2119. * Xattr does not fit in the block, store at
  2120. * external inode if possible.
  2121. */
  2122. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  2123. !i.in_inode) {
  2124. i.in_inode = 1;
  2125. goto retry_inode;
  2126. }
  2127. }
  2128. }
  2129. }
  2130. if (!error) {
  2131. ext4_xattr_update_super_block(handle, inode->i_sb);
  2132. inode->i_ctime = current_time(inode);
  2133. if (!value)
  2134. no_expand = 0;
  2135. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  2136. /*
  2137. * The bh is consumed by ext4_mark_iloc_dirty, even with
  2138. * error != 0.
  2139. */
  2140. is.iloc.bh = NULL;
  2141. if (IS_SYNC(inode))
  2142. ext4_handle_sync(handle);
  2143. }
  2144. cleanup:
  2145. brelse(is.iloc.bh);
  2146. brelse(bs.bh);
  2147. ext4_write_unlock_xattr(inode, &no_expand);
  2148. return error;
  2149. }
  2150. int ext4_xattr_set_credits(struct inode *inode, size_t value_len,
  2151. bool is_create, int *credits)
  2152. {
  2153. struct buffer_head *bh;
  2154. int err;
  2155. *credits = 0;
  2156. if (!EXT4_SB(inode->i_sb)->s_journal)
  2157. return 0;
  2158. down_read(&EXT4_I(inode)->xattr_sem);
  2159. bh = ext4_xattr_get_block(inode);
  2160. if (IS_ERR(bh)) {
  2161. err = PTR_ERR(bh);
  2162. } else {
  2163. *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
  2164. value_len, is_create);
  2165. brelse(bh);
  2166. err = 0;
  2167. }
  2168. up_read(&EXT4_I(inode)->xattr_sem);
  2169. return err;
  2170. }
  2171. /*
  2172. * ext4_xattr_set()
  2173. *
  2174. * Like ext4_xattr_set_handle, but start from an inode. This extended
  2175. * attribute modification is a filesystem transaction by itself.
  2176. *
  2177. * Returns 0, or a negative error number on failure.
  2178. */
  2179. int
  2180. ext4_xattr_set(struct inode *inode, int name_index, const char *name,
  2181. const void *value, size_t value_len, int flags)
  2182. {
  2183. handle_t *handle;
  2184. struct super_block *sb = inode->i_sb;
  2185. int error, retries = 0;
  2186. int credits;
  2187. error = dquot_initialize(inode);
  2188. if (error)
  2189. return error;
  2190. retry:
  2191. error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE,
  2192. &credits);
  2193. if (error)
  2194. return error;
  2195. handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
  2196. if (IS_ERR(handle)) {
  2197. error = PTR_ERR(handle);
  2198. } else {
  2199. int error2;
  2200. error = ext4_xattr_set_handle(handle, inode, name_index, name,
  2201. value, value_len, flags);
  2202. error2 = ext4_journal_stop(handle);
  2203. if (error == -ENOSPC &&
  2204. ext4_should_retry_alloc(sb, &retries))
  2205. goto retry;
  2206. if (error == 0)
  2207. error = error2;
  2208. }
  2209. return error;
  2210. }
  2211. /*
  2212. * Shift the EA entries in the inode to create space for the increased
  2213. * i_extra_isize.
  2214. */
  2215. static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
  2216. int value_offs_shift, void *to,
  2217. void *from, size_t n)
  2218. {
  2219. struct ext4_xattr_entry *last = entry;
  2220. int new_offs;
  2221. /* We always shift xattr headers further thus offsets get lower */
  2222. BUG_ON(value_offs_shift > 0);
  2223. /* Adjust the value offsets of the entries */
  2224. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  2225. if (!last->e_value_inum && last->e_value_size) {
  2226. new_offs = le16_to_cpu(last->e_value_offs) +
  2227. value_offs_shift;
  2228. last->e_value_offs = cpu_to_le16(new_offs);
  2229. }
  2230. }
  2231. /* Shift the entries by n bytes */
  2232. memmove(to, from, n);
  2233. }
  2234. /*
  2235. * Move xattr pointed to by 'entry' from inode into external xattr block
  2236. */
  2237. static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
  2238. struct ext4_inode *raw_inode,
  2239. struct ext4_xattr_entry *entry)
  2240. {
  2241. struct ext4_xattr_ibody_find *is = NULL;
  2242. struct ext4_xattr_block_find *bs = NULL;
  2243. char *buffer = NULL, *b_entry_name = NULL;
  2244. size_t value_size = le32_to_cpu(entry->e_value_size);
  2245. struct ext4_xattr_info i = {
  2246. .value = NULL,
  2247. .value_len = 0,
  2248. .name_index = entry->e_name_index,
  2249. .in_inode = !!entry->e_value_inum,
  2250. };
  2251. struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
  2252. int error;
  2253. is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
  2254. bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
  2255. buffer = kmalloc(value_size, GFP_NOFS);
  2256. b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
  2257. if (!is || !bs || !buffer || !b_entry_name) {
  2258. error = -ENOMEM;
  2259. goto out;
  2260. }
  2261. is->s.not_found = -ENODATA;
  2262. bs->s.not_found = -ENODATA;
  2263. is->iloc.bh = NULL;
  2264. bs->bh = NULL;
  2265. /* Save the entry name and the entry value */
  2266. if (entry->e_value_inum) {
  2267. error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
  2268. if (error)
  2269. goto out;
  2270. } else {
  2271. size_t value_offs = le16_to_cpu(entry->e_value_offs);
  2272. memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
  2273. }
  2274. memcpy(b_entry_name, entry->e_name, entry->e_name_len);
  2275. b_entry_name[entry->e_name_len] = '\0';
  2276. i.name = b_entry_name;
  2277. error = ext4_get_inode_loc(inode, &is->iloc);
  2278. if (error)
  2279. goto out;
  2280. error = ext4_xattr_ibody_find(inode, &i, is);
  2281. if (error)
  2282. goto out;
  2283. /* Remove the chosen entry from the inode */
  2284. error = ext4_xattr_ibody_set(handle, inode, &i, is);
  2285. if (error)
  2286. goto out;
  2287. i.value = buffer;
  2288. i.value_len = value_size;
  2289. error = ext4_xattr_block_find(inode, &i, bs);
  2290. if (error)
  2291. goto out;
  2292. /* Add entry which was removed from the inode into the block */
  2293. error = ext4_xattr_block_set(handle, inode, &i, bs);
  2294. if (error)
  2295. goto out;
  2296. error = 0;
  2297. out:
  2298. kfree(b_entry_name);
  2299. kfree(buffer);
  2300. if (is)
  2301. brelse(is->iloc.bh);
  2302. kfree(is);
  2303. kfree(bs);
  2304. return error;
  2305. }
  2306. static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
  2307. struct ext4_inode *raw_inode,
  2308. int isize_diff, size_t ifree,
  2309. size_t bfree, int *total_ino)
  2310. {
  2311. struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
  2312. struct ext4_xattr_entry *small_entry;
  2313. struct ext4_xattr_entry *entry;
  2314. struct ext4_xattr_entry *last;
  2315. unsigned int entry_size; /* EA entry size */
  2316. unsigned int total_size; /* EA entry size + value size */
  2317. unsigned int min_total_size;
  2318. int error;
  2319. while (isize_diff > ifree) {
  2320. entry = NULL;
  2321. small_entry = NULL;
  2322. min_total_size = ~0U;
  2323. last = IFIRST(header);
  2324. /* Find the entry best suited to be pushed into EA block */
  2325. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  2326. total_size = EXT4_XATTR_LEN(last->e_name_len);
  2327. if (!last->e_value_inum)
  2328. total_size += EXT4_XATTR_SIZE(
  2329. le32_to_cpu(last->e_value_size));
  2330. if (total_size <= bfree &&
  2331. total_size < min_total_size) {
  2332. if (total_size + ifree < isize_diff) {
  2333. small_entry = last;
  2334. } else {
  2335. entry = last;
  2336. min_total_size = total_size;
  2337. }
  2338. }
  2339. }
  2340. if (entry == NULL) {
  2341. if (small_entry == NULL)
  2342. return -ENOSPC;
  2343. entry = small_entry;
  2344. }
  2345. entry_size = EXT4_XATTR_LEN(entry->e_name_len);
  2346. total_size = entry_size;
  2347. if (!entry->e_value_inum)
  2348. total_size += EXT4_XATTR_SIZE(
  2349. le32_to_cpu(entry->e_value_size));
  2350. error = ext4_xattr_move_to_block(handle, inode, raw_inode,
  2351. entry);
  2352. if (error)
  2353. return error;
  2354. *total_ino -= entry_size;
  2355. ifree += total_size;
  2356. bfree -= total_size;
  2357. }
  2358. return 0;
  2359. }
  2360. /*
  2361. * Expand an inode by new_extra_isize bytes when EAs are present.
  2362. * Returns 0 on success or negative error number on failure.
  2363. */
  2364. int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
  2365. struct ext4_inode *raw_inode, handle_t *handle)
  2366. {
  2367. struct ext4_xattr_ibody_header *header;
  2368. struct buffer_head *bh;
  2369. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  2370. static unsigned int mnt_count;
  2371. size_t min_offs;
  2372. size_t ifree, bfree;
  2373. int total_ino;
  2374. void *base, *end;
  2375. int error = 0, tried_min_extra_isize = 0;
  2376. int s_min_extra_isize = le16_to_cpu(sbi->s_es->s_min_extra_isize);
  2377. int isize_diff; /* How much do we need to grow i_extra_isize */
  2378. retry:
  2379. isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
  2380. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
  2381. return 0;
  2382. header = IHDR(inode, raw_inode);
  2383. /*
  2384. * Check if enough free space is available in the inode to shift the
  2385. * entries ahead by new_extra_isize.
  2386. */
  2387. base = IFIRST(header);
  2388. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  2389. min_offs = end - base;
  2390. total_ino = sizeof(struct ext4_xattr_ibody_header);
  2391. error = xattr_check_inode(inode, header, end);
  2392. if (error)
  2393. goto cleanup;
  2394. ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
  2395. if (ifree >= isize_diff)
  2396. goto shift;
  2397. /*
  2398. * Enough free space isn't available in the inode, check if
  2399. * EA block can hold new_extra_isize bytes.
  2400. */
  2401. if (EXT4_I(inode)->i_file_acl) {
  2402. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  2403. error = -EIO;
  2404. if (!bh)
  2405. goto cleanup;
  2406. if (ext4_xattr_check_block(inode, bh)) {
  2407. EXT4_ERROR_INODE(inode, "bad block %llu",
  2408. EXT4_I(inode)->i_file_acl);
  2409. error = -EFSCORRUPTED;
  2410. brelse(bh);
  2411. goto cleanup;
  2412. }
  2413. base = BHDR(bh);
  2414. end = bh->b_data + bh->b_size;
  2415. min_offs = end - base;
  2416. bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
  2417. NULL);
  2418. brelse(bh);
  2419. if (bfree + ifree < isize_diff) {
  2420. if (!tried_min_extra_isize && s_min_extra_isize) {
  2421. tried_min_extra_isize++;
  2422. new_extra_isize = s_min_extra_isize;
  2423. goto retry;
  2424. }
  2425. error = -ENOSPC;
  2426. goto cleanup;
  2427. }
  2428. } else {
  2429. bfree = inode->i_sb->s_blocksize;
  2430. }
  2431. error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
  2432. isize_diff, ifree, bfree,
  2433. &total_ino);
  2434. if (error) {
  2435. if (error == -ENOSPC && !tried_min_extra_isize &&
  2436. s_min_extra_isize) {
  2437. tried_min_extra_isize++;
  2438. new_extra_isize = s_min_extra_isize;
  2439. goto retry;
  2440. }
  2441. goto cleanup;
  2442. }
  2443. shift:
  2444. /* Adjust the offsets and shift the remaining entries ahead */
  2445. ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
  2446. - new_extra_isize, (void *)raw_inode +
  2447. EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
  2448. (void *)header, total_ino);
  2449. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  2450. cleanup:
  2451. if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
  2452. ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
  2453. inode->i_ino);
  2454. mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count);
  2455. }
  2456. return error;
  2457. }
  2458. #define EIA_INCR 16 /* must be 2^n */
  2459. #define EIA_MASK (EIA_INCR - 1)
  2460. /* Add the large xattr @inode into @ea_inode_array for deferred iput().
  2461. * If @ea_inode_array is new or full it will be grown and the old
  2462. * contents copied over.
  2463. */
  2464. static int
  2465. ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
  2466. struct inode *inode)
  2467. {
  2468. if (*ea_inode_array == NULL) {
  2469. /*
  2470. * Start with 15 inodes, so it fits into a power-of-two size.
  2471. * If *ea_inode_array is NULL, this is essentially offsetof()
  2472. */
  2473. (*ea_inode_array) =
  2474. kmalloc(offsetof(struct ext4_xattr_inode_array,
  2475. inodes[EIA_MASK]),
  2476. GFP_NOFS);
  2477. if (*ea_inode_array == NULL)
  2478. return -ENOMEM;
  2479. (*ea_inode_array)->count = 0;
  2480. } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
  2481. /* expand the array once all 15 + n * 16 slots are full */
  2482. struct ext4_xattr_inode_array *new_array = NULL;
  2483. int count = (*ea_inode_array)->count;
  2484. /* if new_array is NULL, this is essentially offsetof() */
  2485. new_array = kmalloc(
  2486. offsetof(struct ext4_xattr_inode_array,
  2487. inodes[count + EIA_INCR]),
  2488. GFP_NOFS);
  2489. if (new_array == NULL)
  2490. return -ENOMEM;
  2491. memcpy(new_array, *ea_inode_array,
  2492. offsetof(struct ext4_xattr_inode_array, inodes[count]));
  2493. kfree(*ea_inode_array);
  2494. *ea_inode_array = new_array;
  2495. }
  2496. (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
  2497. return 0;
  2498. }
  2499. /*
  2500. * ext4_xattr_delete_inode()
  2501. *
  2502. * Free extended attribute resources associated with this inode. Traverse
  2503. * all entries and decrement reference on any xattr inodes associated with this
  2504. * inode. This is called immediately before an inode is freed. We have exclusive
  2505. * access to the inode. If an orphan inode is deleted it will also release its
  2506. * references on xattr block and xattr inodes.
  2507. */
  2508. int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
  2509. struct ext4_xattr_inode_array **ea_inode_array,
  2510. int extra_credits)
  2511. {
  2512. struct buffer_head *bh = NULL;
  2513. struct ext4_xattr_ibody_header *header;
  2514. struct ext4_iloc iloc = { .bh = NULL };
  2515. struct ext4_xattr_entry *entry;
  2516. struct inode *ea_inode;
  2517. int error;
  2518. error = ext4_xattr_ensure_credits(handle, inode, extra_credits,
  2519. NULL /* bh */,
  2520. false /* dirty */,
  2521. false /* block_csum */);
  2522. if (error) {
  2523. EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
  2524. goto cleanup;
  2525. }
  2526. if (ext4_has_feature_ea_inode(inode->i_sb) &&
  2527. ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  2528. error = ext4_get_inode_loc(inode, &iloc);
  2529. if (error) {
  2530. EXT4_ERROR_INODE(inode, "inode loc (error %d)", error);
  2531. goto cleanup;
  2532. }
  2533. error = ext4_journal_get_write_access(handle, iloc.bh);
  2534. if (error) {
  2535. EXT4_ERROR_INODE(inode, "write access (error %d)",
  2536. error);
  2537. goto cleanup;
  2538. }
  2539. header = IHDR(inode, ext4_raw_inode(&iloc));
  2540. if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC))
  2541. ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh,
  2542. IFIRST(header),
  2543. false /* block_csum */,
  2544. ea_inode_array,
  2545. extra_credits,
  2546. false /* skip_quota */);
  2547. }
  2548. if (EXT4_I(inode)->i_file_acl) {
  2549. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  2550. if (!bh) {
  2551. EXT4_ERROR_INODE(inode, "block %llu read error",
  2552. EXT4_I(inode)->i_file_acl);
  2553. error = -EIO;
  2554. goto cleanup;
  2555. }
  2556. error = ext4_xattr_check_block(inode, bh);
  2557. if (error) {
  2558. EXT4_ERROR_INODE(inode, "bad block %llu (error %d)",
  2559. EXT4_I(inode)->i_file_acl, error);
  2560. goto cleanup;
  2561. }
  2562. if (ext4_has_feature_ea_inode(inode->i_sb)) {
  2563. for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
  2564. entry = EXT4_XATTR_NEXT(entry)) {
  2565. if (!entry->e_value_inum)
  2566. continue;
  2567. error = ext4_xattr_inode_iget(inode,
  2568. le32_to_cpu(entry->e_value_inum),
  2569. le32_to_cpu(entry->e_hash),
  2570. &ea_inode);
  2571. if (error)
  2572. continue;
  2573. ext4_xattr_inode_free_quota(inode, ea_inode,
  2574. le32_to_cpu(entry->e_value_size));
  2575. iput(ea_inode);
  2576. }
  2577. }
  2578. ext4_xattr_release_block(handle, inode, bh, ea_inode_array,
  2579. extra_credits);
  2580. /*
  2581. * Update i_file_acl value in the same transaction that releases
  2582. * block.
  2583. */
  2584. EXT4_I(inode)->i_file_acl = 0;
  2585. error = ext4_mark_inode_dirty(handle, inode);
  2586. if (error) {
  2587. EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)",
  2588. error);
  2589. goto cleanup;
  2590. }
  2591. }
  2592. error = 0;
  2593. cleanup:
  2594. brelse(iloc.bh);
  2595. brelse(bh);
  2596. return error;
  2597. }
  2598. void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
  2599. {
  2600. int idx;
  2601. if (ea_inode_array == NULL)
  2602. return;
  2603. for (idx = 0; idx < ea_inode_array->count; ++idx)
  2604. iput(ea_inode_array->inodes[idx]);
  2605. kfree(ea_inode_array);
  2606. }
  2607. /*
  2608. * ext4_xattr_block_cache_insert()
  2609. *
  2610. * Create a new entry in the extended attribute block cache, and insert
  2611. * it unless such an entry is already in the cache.
  2612. *
  2613. * Returns 0, or a negative error number on failure.
  2614. */
  2615. static void
  2616. ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
  2617. struct buffer_head *bh)
  2618. {
  2619. struct ext4_xattr_header *header = BHDR(bh);
  2620. __u32 hash = le32_to_cpu(header->h_hash);
  2621. int reusable = le32_to_cpu(header->h_refcount) <
  2622. EXT4_XATTR_REFCOUNT_MAX;
  2623. int error;
  2624. if (!ea_block_cache)
  2625. return;
  2626. error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash,
  2627. bh->b_blocknr, reusable);
  2628. if (error) {
  2629. if (error == -EBUSY)
  2630. ea_bdebug(bh, "already in cache");
  2631. } else
  2632. ea_bdebug(bh, "inserting [%x]", (int)hash);
  2633. }
  2634. /*
  2635. * ext4_xattr_cmp()
  2636. *
  2637. * Compare two extended attribute blocks for equality.
  2638. *
  2639. * Returns 0 if the blocks are equal, 1 if they differ, and
  2640. * a negative error number on errors.
  2641. */
  2642. static int
  2643. ext4_xattr_cmp(struct ext4_xattr_header *header1,
  2644. struct ext4_xattr_header *header2)
  2645. {
  2646. struct ext4_xattr_entry *entry1, *entry2;
  2647. entry1 = ENTRY(header1+1);
  2648. entry2 = ENTRY(header2+1);
  2649. while (!IS_LAST_ENTRY(entry1)) {
  2650. if (IS_LAST_ENTRY(entry2))
  2651. return 1;
  2652. if (entry1->e_hash != entry2->e_hash ||
  2653. entry1->e_name_index != entry2->e_name_index ||
  2654. entry1->e_name_len != entry2->e_name_len ||
  2655. entry1->e_value_size != entry2->e_value_size ||
  2656. entry1->e_value_inum != entry2->e_value_inum ||
  2657. memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
  2658. return 1;
  2659. if (!entry1->e_value_inum &&
  2660. memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
  2661. (char *)header2 + le16_to_cpu(entry2->e_value_offs),
  2662. le32_to_cpu(entry1->e_value_size)))
  2663. return 1;
  2664. entry1 = EXT4_XATTR_NEXT(entry1);
  2665. entry2 = EXT4_XATTR_NEXT(entry2);
  2666. }
  2667. if (!IS_LAST_ENTRY(entry2))
  2668. return 1;
  2669. return 0;
  2670. }
  2671. /*
  2672. * ext4_xattr_block_cache_find()
  2673. *
  2674. * Find an identical extended attribute block.
  2675. *
  2676. * Returns a pointer to the block found, or NULL if such a block was
  2677. * not found or an error occurred.
  2678. */
  2679. static struct buffer_head *
  2680. ext4_xattr_block_cache_find(struct inode *inode,
  2681. struct ext4_xattr_header *header,
  2682. struct mb_cache_entry **pce)
  2683. {
  2684. __u32 hash = le32_to_cpu(header->h_hash);
  2685. struct mb_cache_entry *ce;
  2686. struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
  2687. if (!ea_block_cache)
  2688. return NULL;
  2689. if (!header->h_hash)
  2690. return NULL; /* never share */
  2691. ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
  2692. ce = mb_cache_entry_find_first(ea_block_cache, hash);
  2693. while (ce) {
  2694. struct buffer_head *bh;
  2695. bh = sb_bread(inode->i_sb, ce->e_value);
  2696. if (!bh) {
  2697. EXT4_ERROR_INODE(inode, "block %lu read error",
  2698. (unsigned long)ce->e_value);
  2699. } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
  2700. *pce = ce;
  2701. return bh;
  2702. }
  2703. brelse(bh);
  2704. ce = mb_cache_entry_find_next(ea_block_cache, ce);
  2705. }
  2706. return NULL;
  2707. }
  2708. #define NAME_HASH_SHIFT 5
  2709. #define VALUE_HASH_SHIFT 16
  2710. /*
  2711. * ext4_xattr_hash_entry()
  2712. *
  2713. * Compute the hash of an extended attribute.
  2714. */
  2715. static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
  2716. size_t value_count)
  2717. {
  2718. __u32 hash = 0;
  2719. while (name_len--) {
  2720. hash = (hash << NAME_HASH_SHIFT) ^
  2721. (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
  2722. *name++;
  2723. }
  2724. while (value_count--) {
  2725. hash = (hash << VALUE_HASH_SHIFT) ^
  2726. (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
  2727. le32_to_cpu(*value++);
  2728. }
  2729. return cpu_to_le32(hash);
  2730. }
  2731. #undef NAME_HASH_SHIFT
  2732. #undef VALUE_HASH_SHIFT
  2733. #define BLOCK_HASH_SHIFT 16
  2734. /*
  2735. * ext4_xattr_rehash()
  2736. *
  2737. * Re-compute the extended attribute hash value after an entry has changed.
  2738. */
  2739. static void ext4_xattr_rehash(struct ext4_xattr_header *header)
  2740. {
  2741. struct ext4_xattr_entry *here;
  2742. __u32 hash = 0;
  2743. here = ENTRY(header+1);
  2744. while (!IS_LAST_ENTRY(here)) {
  2745. if (!here->e_hash) {
  2746. /* Block is not shared if an entry's hash value == 0 */
  2747. hash = 0;
  2748. break;
  2749. }
  2750. hash = (hash << BLOCK_HASH_SHIFT) ^
  2751. (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
  2752. le32_to_cpu(here->e_hash);
  2753. here = EXT4_XATTR_NEXT(here);
  2754. }
  2755. header->h_hash = cpu_to_le32(hash);
  2756. }
  2757. #undef BLOCK_HASH_SHIFT
  2758. #define HASH_BUCKET_BITS 10
  2759. struct mb_cache *
  2760. ext4_xattr_create_cache(void)
  2761. {
  2762. return mb_cache_create(HASH_BUCKET_BITS);
  2763. }
  2764. void ext4_xattr_destroy_cache(struct mb_cache *cache)
  2765. {
  2766. if (cache)
  2767. mb_cache_destroy(cache);
  2768. }