xattr.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820
  1. /*
  2. * linux/fs/ext4/xattr.c
  3. *
  4. * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  5. *
  6. * Fix by Harrison Xing <harrison@mountainviewdata.com>.
  7. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
  8. * Extended attributes for symlinks and special files added per
  9. * suggestion of Luka Renko <luka.renko@hermes.si>.
  10. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
  11. * Red Hat Inc.
  12. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
  13. * and Andreas Gruenbacher <agruen@suse.de>.
  14. */
  15. /*
  16. * Extended attributes are stored directly in inodes (on file systems with
  17. * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
  18. * field contains the block number if an inode uses an additional block. All
  19. * attributes must fit in the inode and one additional block. Blocks that
  20. * contain the identical set of attributes may be shared among several inodes.
  21. * Identical blocks are detected by keeping a cache of blocks that have
  22. * recently been accessed.
  23. *
  24. * The attributes in inodes and on blocks have a different header; the entries
  25. * are stored in the same format:
  26. *
  27. * +------------------+
  28. * | header |
  29. * | entry 1 | |
  30. * | entry 2 | | growing downwards
  31. * | entry 3 | v
  32. * | four null bytes |
  33. * | . . . |
  34. * | value 1 | ^
  35. * | value 3 | | growing upwards
  36. * | value 2 | |
  37. * +------------------+
  38. *
  39. * The header is followed by multiple entry descriptors. In disk blocks, the
  40. * entry descriptors are kept sorted. In inodes, they are unsorted. The
  41. * attribute values are aligned to the end of the block in no specific order.
  42. *
  43. * Locking strategy
  44. * ----------------
  45. * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
  46. * EA blocks are only changed if they are exclusive to an inode, so
  47. * holding xattr_sem also means that nothing but the EA block's reference
  48. * count can change. Multiple writers to the same block are synchronized
  49. * by the buffer lock.
  50. */
  51. #include <linux/init.h>
  52. #include <linux/fs.h>
  53. #include <linux/slab.h>
  54. #include <linux/mbcache.h>
  55. #include <linux/quotaops.h>
  56. #include "ext4_jbd2.h"
  57. #include "ext4.h"
  58. #include "xattr.h"
  59. #include "acl.h"
  60. #ifdef EXT4_XATTR_DEBUG
  61. # define ea_idebug(inode, fmt, ...) \
  62. printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
  63. inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
  64. # define ea_bdebug(bh, fmt, ...) \
  65. printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
  66. bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
  67. #else
  68. # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  69. # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  70. #endif
  71. static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
  72. static struct buffer_head *ext4_xattr_cache_find(struct inode *,
  73. struct ext4_xattr_header *,
  74. struct mb_cache_entry **);
  75. static void ext4_xattr_rehash(struct ext4_xattr_header *,
  76. struct ext4_xattr_entry *);
  77. static int ext4_xattr_list(struct dentry *dentry, char *buffer,
  78. size_t buffer_size);
  79. static const struct xattr_handler *ext4_xattr_handler_map[] = {
  80. [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
  81. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  82. [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
  83. [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
  84. #endif
  85. [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
  86. #ifdef CONFIG_EXT4_FS_SECURITY
  87. [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
  88. #endif
  89. };
  90. const struct xattr_handler *ext4_xattr_handlers[] = {
  91. &ext4_xattr_user_handler,
  92. &ext4_xattr_trusted_handler,
  93. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  94. &posix_acl_access_xattr_handler,
  95. &posix_acl_default_xattr_handler,
  96. #endif
  97. #ifdef CONFIG_EXT4_FS_SECURITY
  98. &ext4_xattr_security_handler,
  99. #endif
  100. NULL
  101. };
  102. #define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \
  103. inode->i_sb->s_fs_info)->s_mb_cache)
  104. static __le32 ext4_xattr_block_csum(struct inode *inode,
  105. sector_t block_nr,
  106. struct ext4_xattr_header *hdr)
  107. {
  108. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  109. __u32 csum;
  110. __le64 dsk_block_nr = cpu_to_le64(block_nr);
  111. __u32 dummy_csum = 0;
  112. int offset = offsetof(struct ext4_xattr_header, h_checksum);
  113. csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
  114. sizeof(dsk_block_nr));
  115. csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
  116. csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
  117. offset += sizeof(dummy_csum);
  118. csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
  119. EXT4_BLOCK_SIZE(inode->i_sb) - offset);
  120. return cpu_to_le32(csum);
  121. }
  122. static int ext4_xattr_block_csum_verify(struct inode *inode,
  123. sector_t block_nr,
  124. struct ext4_xattr_header *hdr)
  125. {
  126. if (ext4_has_metadata_csum(inode->i_sb) &&
  127. (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
  128. return 0;
  129. return 1;
  130. }
  131. static void ext4_xattr_block_csum_set(struct inode *inode,
  132. sector_t block_nr,
  133. struct ext4_xattr_header *hdr)
  134. {
  135. if (!ext4_has_metadata_csum(inode->i_sb))
  136. return;
  137. hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
  138. }
  139. static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
  140. struct inode *inode,
  141. struct buffer_head *bh)
  142. {
  143. ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
  144. return ext4_handle_dirty_metadata(handle, inode, bh);
  145. }
  146. static inline const struct xattr_handler *
  147. ext4_xattr_handler(int name_index)
  148. {
  149. const struct xattr_handler *handler = NULL;
  150. if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
  151. handler = ext4_xattr_handler_map[name_index];
  152. return handler;
  153. }
  154. /*
  155. * Inode operation listxattr()
  156. *
  157. * d_inode(dentry)->i_mutex: don't care
  158. */
  159. ssize_t
  160. ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
  161. {
  162. return ext4_xattr_list(dentry, buffer, size);
  163. }
  164. static int
  165. ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
  166. void *value_start)
  167. {
  168. struct ext4_xattr_entry *e = entry;
  169. /* Find the end of the names list */
  170. while (!IS_LAST_ENTRY(e)) {
  171. struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
  172. if ((void *)next >= end)
  173. return -EFSCORRUPTED;
  174. e = next;
  175. }
  176. /* Check the values */
  177. while (!IS_LAST_ENTRY(entry)) {
  178. if (entry->e_value_block != 0)
  179. return -EFSCORRUPTED;
  180. if (entry->e_value_size != 0) {
  181. u16 offs = le16_to_cpu(entry->e_value_offs);
  182. u32 size = le32_to_cpu(entry->e_value_size);
  183. void *value;
  184. /*
  185. * The value cannot overlap the names, and the value
  186. * with padding cannot extend beyond 'end'. Check both
  187. * the padded and unpadded sizes, since the size may
  188. * overflow to 0 when adding padding.
  189. */
  190. if (offs > end - value_start)
  191. return -EFSCORRUPTED;
  192. value = value_start + offs;
  193. if (value < (void *)e + sizeof(u32) ||
  194. size > end - value ||
  195. EXT4_XATTR_SIZE(size) > end - value)
  196. return -EFSCORRUPTED;
  197. }
  198. entry = EXT4_XATTR_NEXT(entry);
  199. }
  200. return 0;
  201. }
  202. static inline int
  203. ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
  204. {
  205. int error;
  206. if (buffer_verified(bh))
  207. return 0;
  208. if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
  209. BHDR(bh)->h_blocks != cpu_to_le32(1))
  210. return -EFSCORRUPTED;
  211. if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
  212. return -EFSBADCRC;
  213. error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
  214. bh->b_data);
  215. if (!error)
  216. set_buffer_verified(bh);
  217. return error;
  218. }
  219. static int
  220. __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
  221. void *end, const char *function, unsigned int line)
  222. {
  223. int error = -EFSCORRUPTED;
  224. if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
  225. (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
  226. goto errout;
  227. error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
  228. errout:
  229. if (error)
  230. __ext4_error_inode(inode, function, line, 0,
  231. "corrupted in-inode xattr");
  232. return error;
  233. }
  234. #define xattr_check_inode(inode, header, end) \
  235. __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
  236. static inline int
  237. ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
  238. {
  239. size_t value_size = le32_to_cpu(entry->e_value_size);
  240. if (entry->e_value_block != 0 || value_size > size ||
  241. le16_to_cpu(entry->e_value_offs) + value_size > size)
  242. return -EFSCORRUPTED;
  243. return 0;
  244. }
  245. static int
  246. ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
  247. const char *name, size_t size, int sorted)
  248. {
  249. struct ext4_xattr_entry *entry;
  250. size_t name_len;
  251. int cmp = 1;
  252. if (name == NULL)
  253. return -EINVAL;
  254. name_len = strlen(name);
  255. entry = *pentry;
  256. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  257. cmp = name_index - entry->e_name_index;
  258. if (!cmp)
  259. cmp = name_len - entry->e_name_len;
  260. if (!cmp)
  261. cmp = memcmp(name, entry->e_name, name_len);
  262. if (cmp <= 0 && (sorted || cmp == 0))
  263. break;
  264. }
  265. *pentry = entry;
  266. if (!cmp && ext4_xattr_check_entry(entry, size))
  267. return -EFSCORRUPTED;
  268. return cmp ? -ENODATA : 0;
  269. }
  270. static int
  271. ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
  272. void *buffer, size_t buffer_size)
  273. {
  274. struct buffer_head *bh = NULL;
  275. struct ext4_xattr_entry *entry;
  276. size_t size;
  277. int error;
  278. struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
  279. ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
  280. name_index, name, buffer, (long)buffer_size);
  281. error = -ENODATA;
  282. if (!EXT4_I(inode)->i_file_acl)
  283. goto cleanup;
  284. ea_idebug(inode, "reading block %llu",
  285. (unsigned long long)EXT4_I(inode)->i_file_acl);
  286. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  287. if (!bh)
  288. goto cleanup;
  289. ea_bdebug(bh, "b_count=%d, refcount=%d",
  290. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  291. if (ext4_xattr_check_block(inode, bh)) {
  292. bad_block:
  293. EXT4_ERROR_INODE(inode, "bad block %llu",
  294. EXT4_I(inode)->i_file_acl);
  295. error = -EFSCORRUPTED;
  296. goto cleanup;
  297. }
  298. ext4_xattr_cache_insert(ext4_mb_cache, bh);
  299. entry = BFIRST(bh);
  300. error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
  301. if (error == -EFSCORRUPTED)
  302. goto bad_block;
  303. if (error)
  304. goto cleanup;
  305. size = le32_to_cpu(entry->e_value_size);
  306. if (buffer) {
  307. error = -ERANGE;
  308. if (size > buffer_size)
  309. goto cleanup;
  310. memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
  311. size);
  312. }
  313. error = size;
  314. cleanup:
  315. brelse(bh);
  316. return error;
  317. }
  318. int
  319. ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
  320. void *buffer, size_t buffer_size)
  321. {
  322. struct ext4_xattr_ibody_header *header;
  323. struct ext4_xattr_entry *entry;
  324. struct ext4_inode *raw_inode;
  325. struct ext4_iloc iloc;
  326. size_t size;
  327. void *end;
  328. int error;
  329. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  330. return -ENODATA;
  331. error = ext4_get_inode_loc(inode, &iloc);
  332. if (error)
  333. return error;
  334. raw_inode = ext4_raw_inode(&iloc);
  335. header = IHDR(inode, raw_inode);
  336. entry = IFIRST(header);
  337. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  338. error = xattr_check_inode(inode, header, end);
  339. if (error)
  340. goto cleanup;
  341. error = ext4_xattr_find_entry(&entry, name_index, name,
  342. end - (void *)entry, 0);
  343. if (error)
  344. goto cleanup;
  345. size = le32_to_cpu(entry->e_value_size);
  346. if (buffer) {
  347. error = -ERANGE;
  348. if (size > buffer_size)
  349. goto cleanup;
  350. memcpy(buffer, (void *)IFIRST(header) +
  351. le16_to_cpu(entry->e_value_offs), size);
  352. }
  353. error = size;
  354. cleanup:
  355. brelse(iloc.bh);
  356. return error;
  357. }
  358. /*
  359. * ext4_xattr_get()
  360. *
  361. * Copy an extended attribute into the buffer
  362. * provided, or compute the buffer size required.
  363. * Buffer is NULL to compute the size of the buffer required.
  364. *
  365. * Returns a negative error number on failure, or the number of bytes
  366. * used / required on success.
  367. */
  368. int
  369. ext4_xattr_get(struct inode *inode, int name_index, const char *name,
  370. void *buffer, size_t buffer_size)
  371. {
  372. int error;
  373. if (strlen(name) > 255)
  374. return -ERANGE;
  375. down_read(&EXT4_I(inode)->xattr_sem);
  376. error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
  377. buffer_size);
  378. if (error == -ENODATA)
  379. error = ext4_xattr_block_get(inode, name_index, name, buffer,
  380. buffer_size);
  381. up_read(&EXT4_I(inode)->xattr_sem);
  382. return error;
  383. }
  384. static int
  385. ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
  386. char *buffer, size_t buffer_size)
  387. {
  388. size_t rest = buffer_size;
  389. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  390. const struct xattr_handler *handler =
  391. ext4_xattr_handler(entry->e_name_index);
  392. if (handler && (!handler->list || handler->list(dentry))) {
  393. const char *prefix = handler->prefix ?: handler->name;
  394. size_t prefix_len = strlen(prefix);
  395. size_t size = prefix_len + entry->e_name_len + 1;
  396. if (buffer) {
  397. if (size > rest)
  398. return -ERANGE;
  399. memcpy(buffer, prefix, prefix_len);
  400. buffer += prefix_len;
  401. memcpy(buffer, entry->e_name, entry->e_name_len);
  402. buffer += entry->e_name_len;
  403. *buffer++ = 0;
  404. }
  405. rest -= size;
  406. }
  407. }
  408. return buffer_size - rest; /* total size */
  409. }
  410. static int
  411. ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  412. {
  413. struct inode *inode = d_inode(dentry);
  414. struct buffer_head *bh = NULL;
  415. int error;
  416. struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
  417. ea_idebug(inode, "buffer=%p, buffer_size=%ld",
  418. buffer, (long)buffer_size);
  419. error = 0;
  420. if (!EXT4_I(inode)->i_file_acl)
  421. goto cleanup;
  422. ea_idebug(inode, "reading block %llu",
  423. (unsigned long long)EXT4_I(inode)->i_file_acl);
  424. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  425. error = -EIO;
  426. if (!bh)
  427. goto cleanup;
  428. ea_bdebug(bh, "b_count=%d, refcount=%d",
  429. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  430. if (ext4_xattr_check_block(inode, bh)) {
  431. EXT4_ERROR_INODE(inode, "bad block %llu",
  432. EXT4_I(inode)->i_file_acl);
  433. error = -EFSCORRUPTED;
  434. goto cleanup;
  435. }
  436. ext4_xattr_cache_insert(ext4_mb_cache, bh);
  437. error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
  438. cleanup:
  439. brelse(bh);
  440. return error;
  441. }
  442. static int
  443. ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  444. {
  445. struct inode *inode = d_inode(dentry);
  446. struct ext4_xattr_ibody_header *header;
  447. struct ext4_inode *raw_inode;
  448. struct ext4_iloc iloc;
  449. void *end;
  450. int error;
  451. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  452. return 0;
  453. error = ext4_get_inode_loc(inode, &iloc);
  454. if (error)
  455. return error;
  456. raw_inode = ext4_raw_inode(&iloc);
  457. header = IHDR(inode, raw_inode);
  458. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  459. error = xattr_check_inode(inode, header, end);
  460. if (error)
  461. goto cleanup;
  462. error = ext4_xattr_list_entries(dentry, IFIRST(header),
  463. buffer, buffer_size);
  464. cleanup:
  465. brelse(iloc.bh);
  466. return error;
  467. }
  468. /*
  469. * ext4_xattr_list()
  470. *
  471. * Copy a list of attribute names into the buffer
  472. * provided, or compute the buffer size required.
  473. * Buffer is NULL to compute the size of the buffer required.
  474. *
  475. * Returns a negative error number on failure, or the number of bytes
  476. * used / required on success.
  477. */
  478. static int
  479. ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  480. {
  481. int ret, ret2;
  482. down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
  483. ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
  484. if (ret < 0)
  485. goto errout;
  486. if (buffer) {
  487. buffer += ret;
  488. buffer_size -= ret;
  489. }
  490. ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
  491. if (ret < 0)
  492. goto errout;
  493. ret += ret2;
  494. errout:
  495. up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
  496. return ret;
  497. }
  498. /*
  499. * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
  500. * not set, set it.
  501. */
  502. static void ext4_xattr_update_super_block(handle_t *handle,
  503. struct super_block *sb)
  504. {
  505. if (ext4_has_feature_xattr(sb))
  506. return;
  507. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  508. if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
  509. ext4_set_feature_xattr(sb);
  510. ext4_handle_dirty_super(handle, sb);
  511. }
  512. }
  513. /*
  514. * Release the xattr block BH: If the reference count is > 1, decrement it;
  515. * otherwise free the block.
  516. */
  517. static void
  518. ext4_xattr_release_block(handle_t *handle, struct inode *inode,
  519. struct buffer_head *bh)
  520. {
  521. struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
  522. u32 hash, ref;
  523. int error = 0;
  524. BUFFER_TRACE(bh, "get_write_access");
  525. error = ext4_journal_get_write_access(handle, bh);
  526. if (error)
  527. goto out;
  528. lock_buffer(bh);
  529. hash = le32_to_cpu(BHDR(bh)->h_hash);
  530. ref = le32_to_cpu(BHDR(bh)->h_refcount);
  531. if (ref == 1) {
  532. ea_bdebug(bh, "refcount now=0; freeing");
  533. /*
  534. * This must happen under buffer lock for
  535. * ext4_xattr_block_set() to reliably detect freed block
  536. */
  537. mb_cache_entry_delete_block(ext4_mb_cache, hash, bh->b_blocknr);
  538. get_bh(bh);
  539. unlock_buffer(bh);
  540. ext4_free_blocks(handle, inode, bh, 0, 1,
  541. EXT4_FREE_BLOCKS_METADATA |
  542. EXT4_FREE_BLOCKS_FORGET);
  543. } else {
  544. ref--;
  545. BHDR(bh)->h_refcount = cpu_to_le32(ref);
  546. if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
  547. struct mb_cache_entry *ce;
  548. ce = mb_cache_entry_get(ext4_mb_cache, hash,
  549. bh->b_blocknr);
  550. if (ce) {
  551. ce->e_reusable = 1;
  552. mb_cache_entry_put(ext4_mb_cache, ce);
  553. }
  554. }
  555. /*
  556. * Beware of this ugliness: Releasing of xattr block references
  557. * from different inodes can race and so we have to protect
  558. * from a race where someone else frees the block (and releases
  559. * its journal_head) before we are done dirtying the buffer. In
  560. * nojournal mode this race is harmless and we actually cannot
  561. * call ext4_handle_dirty_xattr_block() with locked buffer as
  562. * that function can call sync_dirty_buffer() so for that case
  563. * we handle the dirtying after unlocking the buffer.
  564. */
  565. if (ext4_handle_valid(handle))
  566. error = ext4_handle_dirty_xattr_block(handle, inode,
  567. bh);
  568. unlock_buffer(bh);
  569. if (!ext4_handle_valid(handle))
  570. error = ext4_handle_dirty_xattr_block(handle, inode,
  571. bh);
  572. if (IS_SYNC(inode))
  573. ext4_handle_sync(handle);
  574. dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
  575. ea_bdebug(bh, "refcount now=%d; releasing",
  576. le32_to_cpu(BHDR(bh)->h_refcount));
  577. }
  578. out:
  579. ext4_std_error(inode->i_sb, error);
  580. return;
  581. }
  582. /*
  583. * Find the available free space for EAs. This also returns the total number of
  584. * bytes used by EA entries.
  585. */
  586. static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
  587. size_t *min_offs, void *base, int *total)
  588. {
  589. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  590. if (last->e_value_size) {
  591. size_t offs = le16_to_cpu(last->e_value_offs);
  592. if (offs < *min_offs)
  593. *min_offs = offs;
  594. }
  595. if (total)
  596. *total += EXT4_XATTR_LEN(last->e_name_len);
  597. }
  598. return (*min_offs - ((void *)last - base) - sizeof(__u32));
  599. }
  600. static int
  601. ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
  602. {
  603. struct ext4_xattr_entry *last;
  604. size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
  605. /* Compute min_offs and last. */
  606. last = s->first;
  607. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  608. if (last->e_value_size) {
  609. size_t offs = le16_to_cpu(last->e_value_offs);
  610. if (offs < min_offs)
  611. min_offs = offs;
  612. }
  613. }
  614. free = min_offs - ((void *)last - s->base) - sizeof(__u32);
  615. if (!s->not_found) {
  616. if (s->here->e_value_size) {
  617. size_t size = le32_to_cpu(s->here->e_value_size);
  618. free += EXT4_XATTR_SIZE(size);
  619. }
  620. free += EXT4_XATTR_LEN(name_len);
  621. }
  622. if (i->value) {
  623. if (free < EXT4_XATTR_LEN(name_len) +
  624. EXT4_XATTR_SIZE(i->value_len))
  625. return -ENOSPC;
  626. }
  627. if (i->value && s->not_found) {
  628. /* Insert the new name. */
  629. size_t size = EXT4_XATTR_LEN(name_len);
  630. size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
  631. memmove((void *)s->here + size, s->here, rest);
  632. memset(s->here, 0, size);
  633. s->here->e_name_index = i->name_index;
  634. s->here->e_name_len = name_len;
  635. memcpy(s->here->e_name, i->name, name_len);
  636. } else {
  637. if (s->here->e_value_size) {
  638. void *first_val = s->base + min_offs;
  639. size_t offs = le16_to_cpu(s->here->e_value_offs);
  640. void *val = s->base + offs;
  641. size_t size = EXT4_XATTR_SIZE(
  642. le32_to_cpu(s->here->e_value_size));
  643. if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) {
  644. /* The old and the new value have the same
  645. size. Just replace. */
  646. s->here->e_value_size =
  647. cpu_to_le32(i->value_len);
  648. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  649. memset(val, 0, size);
  650. } else {
  651. /* Clear pad bytes first. */
  652. memset(val + size - EXT4_XATTR_PAD, 0,
  653. EXT4_XATTR_PAD);
  654. memcpy(val, i->value, i->value_len);
  655. }
  656. return 0;
  657. }
  658. /* Remove the old value. */
  659. memmove(first_val + size, first_val, val - first_val);
  660. memset(first_val, 0, size);
  661. s->here->e_value_size = 0;
  662. s->here->e_value_offs = 0;
  663. min_offs += size;
  664. /* Adjust all value offsets. */
  665. last = s->first;
  666. while (!IS_LAST_ENTRY(last)) {
  667. size_t o = le16_to_cpu(last->e_value_offs);
  668. if (last->e_value_size && o < offs)
  669. last->e_value_offs =
  670. cpu_to_le16(o + size);
  671. last = EXT4_XATTR_NEXT(last);
  672. }
  673. }
  674. if (!i->value) {
  675. /* Remove the old name. */
  676. size_t size = EXT4_XATTR_LEN(name_len);
  677. last = ENTRY((void *)last - size);
  678. memmove(s->here, (void *)s->here + size,
  679. (void *)last - (void *)s->here + sizeof(__u32));
  680. memset(last, 0, size);
  681. }
  682. }
  683. if (i->value) {
  684. /* Insert the new value. */
  685. s->here->e_value_size = cpu_to_le32(i->value_len);
  686. if (i->value_len) {
  687. size_t size = EXT4_XATTR_SIZE(i->value_len);
  688. void *val = s->base + min_offs - size;
  689. s->here->e_value_offs = cpu_to_le16(min_offs - size);
  690. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  691. memset(val, 0, size);
  692. } else {
  693. /* Clear the pad bytes first. */
  694. memset(val + size - EXT4_XATTR_PAD, 0,
  695. EXT4_XATTR_PAD);
  696. memcpy(val, i->value, i->value_len);
  697. }
  698. }
  699. }
  700. return 0;
  701. }
  702. struct ext4_xattr_block_find {
  703. struct ext4_xattr_search s;
  704. struct buffer_head *bh;
  705. };
  706. static int
  707. ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
  708. struct ext4_xattr_block_find *bs)
  709. {
  710. struct super_block *sb = inode->i_sb;
  711. int error;
  712. ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
  713. i->name_index, i->name, i->value, (long)i->value_len);
  714. if (EXT4_I(inode)->i_file_acl) {
  715. /* The inode already has an extended attribute block. */
  716. bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
  717. error = -EIO;
  718. if (!bs->bh)
  719. goto cleanup;
  720. ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
  721. atomic_read(&(bs->bh->b_count)),
  722. le32_to_cpu(BHDR(bs->bh)->h_refcount));
  723. if (ext4_xattr_check_block(inode, bs->bh)) {
  724. EXT4_ERROR_INODE(inode, "bad block %llu",
  725. EXT4_I(inode)->i_file_acl);
  726. error = -EFSCORRUPTED;
  727. goto cleanup;
  728. }
  729. /* Find the named attribute. */
  730. bs->s.base = BHDR(bs->bh);
  731. bs->s.first = BFIRST(bs->bh);
  732. bs->s.end = bs->bh->b_data + bs->bh->b_size;
  733. bs->s.here = bs->s.first;
  734. error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
  735. i->name, bs->bh->b_size, 1);
  736. if (error && error != -ENODATA)
  737. goto cleanup;
  738. bs->s.not_found = error;
  739. }
  740. error = 0;
  741. cleanup:
  742. return error;
  743. }
  744. static int
  745. ext4_xattr_block_set(handle_t *handle, struct inode *inode,
  746. struct ext4_xattr_info *i,
  747. struct ext4_xattr_block_find *bs)
  748. {
  749. struct super_block *sb = inode->i_sb;
  750. struct buffer_head *new_bh = NULL;
  751. struct ext4_xattr_search *s = &bs->s;
  752. struct mb_cache_entry *ce = NULL;
  753. int error = 0;
  754. struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
  755. #define header(x) ((struct ext4_xattr_header *)(x))
  756. if (i->value && i->value_len > sb->s_blocksize)
  757. return -ENOSPC;
  758. if (s->base) {
  759. BUFFER_TRACE(bs->bh, "get_write_access");
  760. error = ext4_journal_get_write_access(handle, bs->bh);
  761. if (error)
  762. goto cleanup;
  763. lock_buffer(bs->bh);
  764. if (header(s->base)->h_refcount == cpu_to_le32(1)) {
  765. __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
  766. /*
  767. * This must happen under buffer lock for
  768. * ext4_xattr_block_set() to reliably detect modified
  769. * block
  770. */
  771. mb_cache_entry_delete_block(ext4_mb_cache, hash,
  772. bs->bh->b_blocknr);
  773. ea_bdebug(bs->bh, "modifying in-place");
  774. error = ext4_xattr_set_entry(i, s);
  775. if (!error) {
  776. if (!IS_LAST_ENTRY(s->first))
  777. ext4_xattr_rehash(header(s->base),
  778. s->here);
  779. ext4_xattr_cache_insert(ext4_mb_cache,
  780. bs->bh);
  781. }
  782. unlock_buffer(bs->bh);
  783. if (error == -EFSCORRUPTED)
  784. goto bad_block;
  785. if (!error)
  786. error = ext4_handle_dirty_xattr_block(handle,
  787. inode,
  788. bs->bh);
  789. if (error)
  790. goto cleanup;
  791. goto inserted;
  792. } else {
  793. int offset = (char *)s->here - bs->bh->b_data;
  794. unlock_buffer(bs->bh);
  795. ea_bdebug(bs->bh, "cloning");
  796. s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
  797. error = -ENOMEM;
  798. if (s->base == NULL)
  799. goto cleanup;
  800. memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
  801. s->first = ENTRY(header(s->base)+1);
  802. header(s->base)->h_refcount = cpu_to_le32(1);
  803. s->here = ENTRY(s->base + offset);
  804. s->end = s->base + bs->bh->b_size;
  805. }
  806. } else {
  807. /* Allocate a buffer where we construct the new block. */
  808. s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
  809. /* assert(header == s->base) */
  810. error = -ENOMEM;
  811. if (s->base == NULL)
  812. goto cleanup;
  813. header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  814. header(s->base)->h_blocks = cpu_to_le32(1);
  815. header(s->base)->h_refcount = cpu_to_le32(1);
  816. s->first = ENTRY(header(s->base)+1);
  817. s->here = ENTRY(header(s->base)+1);
  818. s->end = s->base + sb->s_blocksize;
  819. }
  820. error = ext4_xattr_set_entry(i, s);
  821. if (error == -EFSCORRUPTED)
  822. goto bad_block;
  823. if (error)
  824. goto cleanup;
  825. if (!IS_LAST_ENTRY(s->first))
  826. ext4_xattr_rehash(header(s->base), s->here);
  827. inserted:
  828. if (!IS_LAST_ENTRY(s->first)) {
  829. new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce);
  830. if (new_bh) {
  831. /* We found an identical block in the cache. */
  832. if (new_bh == bs->bh)
  833. ea_bdebug(new_bh, "keeping");
  834. else {
  835. u32 ref;
  836. /* The old block is released after updating
  837. the inode. */
  838. error = dquot_alloc_block(inode,
  839. EXT4_C2B(EXT4_SB(sb), 1));
  840. if (error)
  841. goto cleanup;
  842. BUFFER_TRACE(new_bh, "get_write_access");
  843. error = ext4_journal_get_write_access(handle,
  844. new_bh);
  845. if (error)
  846. goto cleanup_dquot;
  847. lock_buffer(new_bh);
  848. /*
  849. * We have to be careful about races with
  850. * freeing, rehashing or adding references to
  851. * xattr block. Once we hold buffer lock xattr
  852. * block's state is stable so we can check
  853. * whether the block got freed / rehashed or
  854. * not. Since we unhash mbcache entry under
  855. * buffer lock when freeing / rehashing xattr
  856. * block, checking whether entry is still
  857. * hashed is reliable. Same rules hold for
  858. * e_reusable handling.
  859. */
  860. if (hlist_bl_unhashed(&ce->e_hash_list) ||
  861. !ce->e_reusable) {
  862. /*
  863. * Undo everything and check mbcache
  864. * again.
  865. */
  866. unlock_buffer(new_bh);
  867. dquot_free_block(inode,
  868. EXT4_C2B(EXT4_SB(sb),
  869. 1));
  870. brelse(new_bh);
  871. mb_cache_entry_put(ext4_mb_cache, ce);
  872. ce = NULL;
  873. new_bh = NULL;
  874. goto inserted;
  875. }
  876. ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
  877. BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
  878. if (ref >= EXT4_XATTR_REFCOUNT_MAX)
  879. ce->e_reusable = 0;
  880. ea_bdebug(new_bh, "reusing; refcount now=%d",
  881. ref);
  882. unlock_buffer(new_bh);
  883. error = ext4_handle_dirty_xattr_block(handle,
  884. inode,
  885. new_bh);
  886. if (error)
  887. goto cleanup_dquot;
  888. }
  889. mb_cache_entry_touch(ext4_mb_cache, ce);
  890. mb_cache_entry_put(ext4_mb_cache, ce);
  891. ce = NULL;
  892. } else if (bs->bh && s->base == bs->bh->b_data) {
  893. /* We were modifying this block in-place. */
  894. ea_bdebug(bs->bh, "keeping this block");
  895. new_bh = bs->bh;
  896. get_bh(new_bh);
  897. } else {
  898. /* We need to allocate a new block */
  899. ext4_fsblk_t goal, block;
  900. goal = ext4_group_first_block_no(sb,
  901. EXT4_I(inode)->i_block_group);
  902. /* non-extent files can't have physical blocks past 2^32 */
  903. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  904. goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
  905. block = ext4_new_meta_blocks(handle, inode, goal, 0,
  906. NULL, &error);
  907. if (error)
  908. goto cleanup;
  909. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  910. BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
  911. ea_idebug(inode, "creating block %llu",
  912. (unsigned long long)block);
  913. new_bh = sb_getblk(sb, block);
  914. if (unlikely(!new_bh)) {
  915. error = -ENOMEM;
  916. getblk_failed:
  917. ext4_free_blocks(handle, inode, NULL, block, 1,
  918. EXT4_FREE_BLOCKS_METADATA);
  919. goto cleanup;
  920. }
  921. lock_buffer(new_bh);
  922. error = ext4_journal_get_create_access(handle, new_bh);
  923. if (error) {
  924. unlock_buffer(new_bh);
  925. error = -EIO;
  926. goto getblk_failed;
  927. }
  928. memcpy(new_bh->b_data, s->base, new_bh->b_size);
  929. set_buffer_uptodate(new_bh);
  930. unlock_buffer(new_bh);
  931. ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
  932. error = ext4_handle_dirty_xattr_block(handle,
  933. inode, new_bh);
  934. if (error)
  935. goto cleanup;
  936. }
  937. }
  938. /* Update the inode. */
  939. EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
  940. /* Drop the previous xattr block. */
  941. if (bs->bh && bs->bh != new_bh)
  942. ext4_xattr_release_block(handle, inode, bs->bh);
  943. error = 0;
  944. cleanup:
  945. if (ce)
  946. mb_cache_entry_put(ext4_mb_cache, ce);
  947. brelse(new_bh);
  948. if (!(bs->bh && s->base == bs->bh->b_data))
  949. kfree(s->base);
  950. return error;
  951. cleanup_dquot:
  952. dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
  953. goto cleanup;
  954. bad_block:
  955. EXT4_ERROR_INODE(inode, "bad block %llu",
  956. EXT4_I(inode)->i_file_acl);
  957. goto cleanup;
  958. #undef header
  959. }
  960. int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
  961. struct ext4_xattr_ibody_find *is)
  962. {
  963. struct ext4_xattr_ibody_header *header;
  964. struct ext4_inode *raw_inode;
  965. int error;
  966. if (EXT4_I(inode)->i_extra_isize == 0)
  967. return 0;
  968. raw_inode = ext4_raw_inode(&is->iloc);
  969. header = IHDR(inode, raw_inode);
  970. is->s.base = is->s.first = IFIRST(header);
  971. is->s.here = is->s.first;
  972. is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  973. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  974. error = xattr_check_inode(inode, header, is->s.end);
  975. if (error)
  976. return error;
  977. /* Find the named attribute. */
  978. error = ext4_xattr_find_entry(&is->s.here, i->name_index,
  979. i->name, is->s.end -
  980. (void *)is->s.base, 0);
  981. if (error && error != -ENODATA)
  982. return error;
  983. is->s.not_found = error;
  984. }
  985. return 0;
  986. }
  987. int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
  988. struct ext4_xattr_info *i,
  989. struct ext4_xattr_ibody_find *is)
  990. {
  991. struct ext4_xattr_ibody_header *header;
  992. struct ext4_xattr_search *s = &is->s;
  993. int error;
  994. if (EXT4_I(inode)->i_extra_isize == 0)
  995. return -ENOSPC;
  996. error = ext4_xattr_set_entry(i, s);
  997. if (error) {
  998. if (error == -ENOSPC &&
  999. ext4_has_inline_data(inode)) {
  1000. error = ext4_try_to_evict_inline_data(handle, inode,
  1001. EXT4_XATTR_LEN(strlen(i->name) +
  1002. EXT4_XATTR_SIZE(i->value_len)));
  1003. if (error)
  1004. return error;
  1005. error = ext4_xattr_ibody_find(inode, i, is);
  1006. if (error)
  1007. return error;
  1008. error = ext4_xattr_set_entry(i, s);
  1009. }
  1010. if (error)
  1011. return error;
  1012. }
  1013. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  1014. if (!IS_LAST_ENTRY(s->first)) {
  1015. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1016. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  1017. } else {
  1018. header->h_magic = cpu_to_le32(0);
  1019. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  1020. }
  1021. return 0;
  1022. }
  1023. static int ext4_xattr_ibody_set(struct inode *inode,
  1024. struct ext4_xattr_info *i,
  1025. struct ext4_xattr_ibody_find *is)
  1026. {
  1027. struct ext4_xattr_ibody_header *header;
  1028. struct ext4_xattr_search *s = &is->s;
  1029. int error;
  1030. if (EXT4_I(inode)->i_extra_isize == 0)
  1031. return -ENOSPC;
  1032. error = ext4_xattr_set_entry(i, s);
  1033. if (error)
  1034. return error;
  1035. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  1036. if (!IS_LAST_ENTRY(s->first)) {
  1037. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  1038. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  1039. } else {
  1040. header->h_magic = cpu_to_le32(0);
  1041. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  1042. }
  1043. return 0;
  1044. }
  1045. static int ext4_xattr_value_same(struct ext4_xattr_search *s,
  1046. struct ext4_xattr_info *i)
  1047. {
  1048. void *value;
  1049. if (le32_to_cpu(s->here->e_value_size) != i->value_len)
  1050. return 0;
  1051. value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
  1052. return !memcmp(value, i->value, i->value_len);
  1053. }
  1054. /*
  1055. * ext4_xattr_set_handle()
  1056. *
  1057. * Create, replace or remove an extended attribute for this inode. Value
  1058. * is NULL to remove an existing extended attribute, and non-NULL to
  1059. * either replace an existing extended attribute, or create a new extended
  1060. * attribute. The flags XATTR_REPLACE and XATTR_CREATE
  1061. * specify that an extended attribute must exist and must not exist
  1062. * previous to the call, respectively.
  1063. *
  1064. * Returns 0, or a negative error number on failure.
  1065. */
  1066. int
  1067. ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
  1068. const char *name, const void *value, size_t value_len,
  1069. int flags)
  1070. {
  1071. struct ext4_xattr_info i = {
  1072. .name_index = name_index,
  1073. .name = name,
  1074. .value = value,
  1075. .value_len = value_len,
  1076. };
  1077. struct ext4_xattr_ibody_find is = {
  1078. .s = { .not_found = -ENODATA, },
  1079. };
  1080. struct ext4_xattr_block_find bs = {
  1081. .s = { .not_found = -ENODATA, },
  1082. };
  1083. unsigned long no_expand;
  1084. int error;
  1085. if (!name)
  1086. return -EINVAL;
  1087. if (strlen(name) > 255)
  1088. return -ERANGE;
  1089. down_write(&EXT4_I(inode)->xattr_sem);
  1090. no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1091. ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1092. error = ext4_reserve_inode_write(handle, inode, &is.iloc);
  1093. if (error)
  1094. goto cleanup;
  1095. if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
  1096. struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
  1097. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  1098. ext4_clear_inode_state(inode, EXT4_STATE_NEW);
  1099. }
  1100. error = ext4_xattr_ibody_find(inode, &i, &is);
  1101. if (error)
  1102. goto cleanup;
  1103. if (is.s.not_found)
  1104. error = ext4_xattr_block_find(inode, &i, &bs);
  1105. if (error)
  1106. goto cleanup;
  1107. if (is.s.not_found && bs.s.not_found) {
  1108. error = -ENODATA;
  1109. if (flags & XATTR_REPLACE)
  1110. goto cleanup;
  1111. error = 0;
  1112. if (!value)
  1113. goto cleanup;
  1114. } else {
  1115. error = -EEXIST;
  1116. if (flags & XATTR_CREATE)
  1117. goto cleanup;
  1118. }
  1119. if (!value) {
  1120. if (!is.s.not_found)
  1121. error = ext4_xattr_ibody_set(inode, &i, &is);
  1122. else if (!bs.s.not_found)
  1123. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  1124. } else {
  1125. error = 0;
  1126. /* Xattr value did not change? Save us some work and bail out */
  1127. if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
  1128. goto cleanup;
  1129. if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
  1130. goto cleanup;
  1131. error = ext4_xattr_ibody_set(inode, &i, &is);
  1132. if (!error && !bs.s.not_found) {
  1133. i.value = NULL;
  1134. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  1135. } else if (error == -ENOSPC) {
  1136. if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
  1137. error = ext4_xattr_block_find(inode, &i, &bs);
  1138. if (error)
  1139. goto cleanup;
  1140. }
  1141. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  1142. if (error)
  1143. goto cleanup;
  1144. if (!is.s.not_found) {
  1145. i.value = NULL;
  1146. error = ext4_xattr_ibody_set(inode, &i, &is);
  1147. }
  1148. }
  1149. }
  1150. if (!error) {
  1151. ext4_xattr_update_super_block(handle, inode->i_sb);
  1152. inode->i_ctime = current_time(inode);
  1153. if (!value)
  1154. ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1155. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  1156. /*
  1157. * The bh is consumed by ext4_mark_iloc_dirty, even with
  1158. * error != 0.
  1159. */
  1160. is.iloc.bh = NULL;
  1161. if (IS_SYNC(inode))
  1162. ext4_handle_sync(handle);
  1163. }
  1164. cleanup:
  1165. brelse(is.iloc.bh);
  1166. brelse(bs.bh);
  1167. if (no_expand == 0)
  1168. ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1169. up_write(&EXT4_I(inode)->xattr_sem);
  1170. return error;
  1171. }
  1172. /*
  1173. * ext4_xattr_set()
  1174. *
  1175. * Like ext4_xattr_set_handle, but start from an inode. This extended
  1176. * attribute modification is a filesystem transaction by itself.
  1177. *
  1178. * Returns 0, or a negative error number on failure.
  1179. */
  1180. int
  1181. ext4_xattr_set(struct inode *inode, int name_index, const char *name,
  1182. const void *value, size_t value_len, int flags)
  1183. {
  1184. handle_t *handle;
  1185. int error, retries = 0;
  1186. int credits = ext4_jbd2_credits_xattr(inode);
  1187. retry:
  1188. handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
  1189. if (IS_ERR(handle)) {
  1190. error = PTR_ERR(handle);
  1191. } else {
  1192. int error2;
  1193. error = ext4_xattr_set_handle(handle, inode, name_index, name,
  1194. value, value_len, flags);
  1195. error2 = ext4_journal_stop(handle);
  1196. if (error == -ENOSPC &&
  1197. ext4_should_retry_alloc(inode->i_sb, &retries))
  1198. goto retry;
  1199. if (error == 0)
  1200. error = error2;
  1201. }
  1202. return error;
  1203. }
  1204. /*
  1205. * Shift the EA entries in the inode to create space for the increased
  1206. * i_extra_isize.
  1207. */
  1208. static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
  1209. int value_offs_shift, void *to,
  1210. void *from, size_t n)
  1211. {
  1212. struct ext4_xattr_entry *last = entry;
  1213. int new_offs;
  1214. /* We always shift xattr headers further thus offsets get lower */
  1215. BUG_ON(value_offs_shift > 0);
  1216. /* Adjust the value offsets of the entries */
  1217. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1218. if (last->e_value_size) {
  1219. new_offs = le16_to_cpu(last->e_value_offs) +
  1220. value_offs_shift;
  1221. last->e_value_offs = cpu_to_le16(new_offs);
  1222. }
  1223. }
  1224. /* Shift the entries by n bytes */
  1225. memmove(to, from, n);
  1226. }
  1227. /*
  1228. * Move xattr pointed to by 'entry' from inode into external xattr block
  1229. */
  1230. static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
  1231. struct ext4_inode *raw_inode,
  1232. struct ext4_xattr_entry *entry)
  1233. {
  1234. struct ext4_xattr_ibody_find *is = NULL;
  1235. struct ext4_xattr_block_find *bs = NULL;
  1236. char *buffer = NULL, *b_entry_name = NULL;
  1237. size_t value_offs, value_size;
  1238. struct ext4_xattr_info i = {
  1239. .value = NULL,
  1240. .value_len = 0,
  1241. .name_index = entry->e_name_index,
  1242. };
  1243. struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
  1244. int error;
  1245. value_offs = le16_to_cpu(entry->e_value_offs);
  1246. value_size = le32_to_cpu(entry->e_value_size);
  1247. is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
  1248. bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
  1249. buffer = kmalloc(value_size, GFP_NOFS);
  1250. b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
  1251. if (!is || !bs || !buffer || !b_entry_name) {
  1252. error = -ENOMEM;
  1253. goto out;
  1254. }
  1255. is->s.not_found = -ENODATA;
  1256. bs->s.not_found = -ENODATA;
  1257. is->iloc.bh = NULL;
  1258. bs->bh = NULL;
  1259. /* Save the entry name and the entry value */
  1260. memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
  1261. memcpy(b_entry_name, entry->e_name, entry->e_name_len);
  1262. b_entry_name[entry->e_name_len] = '\0';
  1263. i.name = b_entry_name;
  1264. error = ext4_get_inode_loc(inode, &is->iloc);
  1265. if (error)
  1266. goto out;
  1267. error = ext4_xattr_ibody_find(inode, &i, is);
  1268. if (error)
  1269. goto out;
  1270. /* Remove the chosen entry from the inode */
  1271. error = ext4_xattr_ibody_set(inode, &i, is);
  1272. if (error)
  1273. goto out;
  1274. i.name = b_entry_name;
  1275. i.value = buffer;
  1276. i.value_len = value_size;
  1277. error = ext4_xattr_block_find(inode, &i, bs);
  1278. if (error)
  1279. goto out;
  1280. /* Add entry which was removed from the inode into the block */
  1281. error = ext4_xattr_block_set(handle, inode, &i, bs);
  1282. if (error)
  1283. goto out;
  1284. error = 0;
  1285. out:
  1286. kfree(b_entry_name);
  1287. kfree(buffer);
  1288. if (is)
  1289. brelse(is->iloc.bh);
  1290. kfree(is);
  1291. kfree(bs);
  1292. return error;
  1293. }
  1294. static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
  1295. struct ext4_inode *raw_inode,
  1296. int isize_diff, size_t ifree,
  1297. size_t bfree, int *total_ino)
  1298. {
  1299. struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
  1300. struct ext4_xattr_entry *small_entry;
  1301. struct ext4_xattr_entry *entry;
  1302. struct ext4_xattr_entry *last;
  1303. unsigned int entry_size; /* EA entry size */
  1304. unsigned int total_size; /* EA entry size + value size */
  1305. unsigned int min_total_size;
  1306. int error;
  1307. while (isize_diff > ifree) {
  1308. entry = NULL;
  1309. small_entry = NULL;
  1310. min_total_size = ~0U;
  1311. last = IFIRST(header);
  1312. /* Find the entry best suited to be pushed into EA block */
  1313. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1314. total_size =
  1315. EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
  1316. EXT4_XATTR_LEN(last->e_name_len);
  1317. if (total_size <= bfree &&
  1318. total_size < min_total_size) {
  1319. if (total_size + ifree < isize_diff) {
  1320. small_entry = last;
  1321. } else {
  1322. entry = last;
  1323. min_total_size = total_size;
  1324. }
  1325. }
  1326. }
  1327. if (entry == NULL) {
  1328. if (small_entry == NULL)
  1329. return -ENOSPC;
  1330. entry = small_entry;
  1331. }
  1332. entry_size = EXT4_XATTR_LEN(entry->e_name_len);
  1333. total_size = entry_size +
  1334. EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size));
  1335. error = ext4_xattr_move_to_block(handle, inode, raw_inode,
  1336. entry);
  1337. if (error)
  1338. return error;
  1339. *total_ino -= entry_size;
  1340. ifree += total_size;
  1341. bfree -= total_size;
  1342. }
  1343. return 0;
  1344. }
  1345. /*
  1346. * Expand an inode by new_extra_isize bytes when EAs are present.
  1347. * Returns 0 on success or negative error number on failure.
  1348. */
  1349. int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
  1350. struct ext4_inode *raw_inode, handle_t *handle)
  1351. {
  1352. struct ext4_xattr_ibody_header *header;
  1353. struct buffer_head *bh = NULL;
  1354. size_t min_offs;
  1355. size_t ifree, bfree;
  1356. int total_ino;
  1357. void *base, *end;
  1358. int error = 0, tried_min_extra_isize = 0;
  1359. int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
  1360. int isize_diff; /* How much do we need to grow i_extra_isize */
  1361. down_write(&EXT4_I(inode)->xattr_sem);
  1362. /*
  1363. * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
  1364. */
  1365. ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1366. retry:
  1367. isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
  1368. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
  1369. goto out;
  1370. header = IHDR(inode, raw_inode);
  1371. /*
  1372. * Check if enough free space is available in the inode to shift the
  1373. * entries ahead by new_extra_isize.
  1374. */
  1375. base = IFIRST(header);
  1376. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  1377. min_offs = end - base;
  1378. total_ino = sizeof(struct ext4_xattr_ibody_header);
  1379. error = xattr_check_inode(inode, header, end);
  1380. if (error)
  1381. goto cleanup;
  1382. ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
  1383. if (ifree >= isize_diff)
  1384. goto shift;
  1385. /*
  1386. * Enough free space isn't available in the inode, check if
  1387. * EA block can hold new_extra_isize bytes.
  1388. */
  1389. if (EXT4_I(inode)->i_file_acl) {
  1390. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  1391. error = -EIO;
  1392. if (!bh)
  1393. goto cleanup;
  1394. if (ext4_xattr_check_block(inode, bh)) {
  1395. EXT4_ERROR_INODE(inode, "bad block %llu",
  1396. EXT4_I(inode)->i_file_acl);
  1397. error = -EFSCORRUPTED;
  1398. goto cleanup;
  1399. }
  1400. base = BHDR(bh);
  1401. end = bh->b_data + bh->b_size;
  1402. min_offs = end - base;
  1403. bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
  1404. NULL);
  1405. if (bfree + ifree < isize_diff) {
  1406. if (!tried_min_extra_isize && s_min_extra_isize) {
  1407. tried_min_extra_isize++;
  1408. new_extra_isize = s_min_extra_isize;
  1409. brelse(bh);
  1410. goto retry;
  1411. }
  1412. error = -ENOSPC;
  1413. goto cleanup;
  1414. }
  1415. } else {
  1416. bfree = inode->i_sb->s_blocksize;
  1417. }
  1418. error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
  1419. isize_diff, ifree, bfree,
  1420. &total_ino);
  1421. if (error) {
  1422. if (error == -ENOSPC && !tried_min_extra_isize &&
  1423. s_min_extra_isize) {
  1424. tried_min_extra_isize++;
  1425. new_extra_isize = s_min_extra_isize;
  1426. brelse(bh);
  1427. goto retry;
  1428. }
  1429. goto cleanup;
  1430. }
  1431. shift:
  1432. /* Adjust the offsets and shift the remaining entries ahead */
  1433. ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
  1434. - new_extra_isize, (void *)raw_inode +
  1435. EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
  1436. (void *)header, total_ino);
  1437. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  1438. brelse(bh);
  1439. out:
  1440. ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1441. up_write(&EXT4_I(inode)->xattr_sem);
  1442. return 0;
  1443. cleanup:
  1444. brelse(bh);
  1445. /*
  1446. * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
  1447. * size expansion failed.
  1448. */
  1449. up_write(&EXT4_I(inode)->xattr_sem);
  1450. return error;
  1451. }
  1452. /*
  1453. * ext4_xattr_delete_inode()
  1454. *
  1455. * Free extended attribute resources associated with this inode. This
  1456. * is called immediately before an inode is freed. We have exclusive
  1457. * access to the inode.
  1458. */
  1459. void
  1460. ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
  1461. {
  1462. struct buffer_head *bh = NULL;
  1463. if (!EXT4_I(inode)->i_file_acl)
  1464. goto cleanup;
  1465. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  1466. if (!bh) {
  1467. EXT4_ERROR_INODE(inode, "block %llu read error",
  1468. EXT4_I(inode)->i_file_acl);
  1469. goto cleanup;
  1470. }
  1471. if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
  1472. BHDR(bh)->h_blocks != cpu_to_le32(1)) {
  1473. EXT4_ERROR_INODE(inode, "bad block %llu",
  1474. EXT4_I(inode)->i_file_acl);
  1475. goto cleanup;
  1476. }
  1477. ext4_xattr_release_block(handle, inode, bh);
  1478. EXT4_I(inode)->i_file_acl = 0;
  1479. cleanup:
  1480. brelse(bh);
  1481. }
  1482. /*
  1483. * ext4_xattr_cache_insert()
  1484. *
  1485. * Create a new entry in the extended attribute cache, and insert
  1486. * it unless such an entry is already in the cache.
  1487. *
  1488. * Returns 0, or a negative error number on failure.
  1489. */
  1490. static void
  1491. ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
  1492. {
  1493. struct ext4_xattr_header *header = BHDR(bh);
  1494. __u32 hash = le32_to_cpu(header->h_hash);
  1495. int reusable = le32_to_cpu(header->h_refcount) <
  1496. EXT4_XATTR_REFCOUNT_MAX;
  1497. int error;
  1498. error = mb_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash,
  1499. bh->b_blocknr, reusable);
  1500. if (error) {
  1501. if (error == -EBUSY)
  1502. ea_bdebug(bh, "already in cache");
  1503. } else
  1504. ea_bdebug(bh, "inserting [%x]", (int)hash);
  1505. }
  1506. /*
  1507. * ext4_xattr_cmp()
  1508. *
  1509. * Compare two extended attribute blocks for equality.
  1510. *
  1511. * Returns 0 if the blocks are equal, 1 if they differ, and
  1512. * a negative error number on errors.
  1513. */
  1514. static int
  1515. ext4_xattr_cmp(struct ext4_xattr_header *header1,
  1516. struct ext4_xattr_header *header2)
  1517. {
  1518. struct ext4_xattr_entry *entry1, *entry2;
  1519. entry1 = ENTRY(header1+1);
  1520. entry2 = ENTRY(header2+1);
  1521. while (!IS_LAST_ENTRY(entry1)) {
  1522. if (IS_LAST_ENTRY(entry2))
  1523. return 1;
  1524. if (entry1->e_hash != entry2->e_hash ||
  1525. entry1->e_name_index != entry2->e_name_index ||
  1526. entry1->e_name_len != entry2->e_name_len ||
  1527. entry1->e_value_size != entry2->e_value_size ||
  1528. memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
  1529. return 1;
  1530. if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
  1531. return -EFSCORRUPTED;
  1532. if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
  1533. (char *)header2 + le16_to_cpu(entry2->e_value_offs),
  1534. le32_to_cpu(entry1->e_value_size)))
  1535. return 1;
  1536. entry1 = EXT4_XATTR_NEXT(entry1);
  1537. entry2 = EXT4_XATTR_NEXT(entry2);
  1538. }
  1539. if (!IS_LAST_ENTRY(entry2))
  1540. return 1;
  1541. return 0;
  1542. }
  1543. /*
  1544. * ext4_xattr_cache_find()
  1545. *
  1546. * Find an identical extended attribute block.
  1547. *
  1548. * Returns a pointer to the block found, or NULL if such a block was
  1549. * not found or an error occurred.
  1550. */
  1551. static struct buffer_head *
  1552. ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
  1553. struct mb_cache_entry **pce)
  1554. {
  1555. __u32 hash = le32_to_cpu(header->h_hash);
  1556. struct mb_cache_entry *ce;
  1557. struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
  1558. if (!header->h_hash)
  1559. return NULL; /* never share */
  1560. ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
  1561. ce = mb_cache_entry_find_first(ext4_mb_cache, hash);
  1562. while (ce) {
  1563. struct buffer_head *bh;
  1564. bh = sb_bread(inode->i_sb, ce->e_block);
  1565. if (!bh) {
  1566. EXT4_ERROR_INODE(inode, "block %lu read error",
  1567. (unsigned long) ce->e_block);
  1568. } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
  1569. *pce = ce;
  1570. return bh;
  1571. }
  1572. brelse(bh);
  1573. ce = mb_cache_entry_find_next(ext4_mb_cache, ce);
  1574. }
  1575. return NULL;
  1576. }
  1577. #define NAME_HASH_SHIFT 5
  1578. #define VALUE_HASH_SHIFT 16
  1579. /*
  1580. * ext4_xattr_hash_entry()
  1581. *
  1582. * Compute the hash of an extended attribute.
  1583. */
  1584. static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
  1585. struct ext4_xattr_entry *entry)
  1586. {
  1587. __u32 hash = 0;
  1588. char *name = entry->e_name;
  1589. int n;
  1590. for (n = 0; n < entry->e_name_len; n++) {
  1591. hash = (hash << NAME_HASH_SHIFT) ^
  1592. (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
  1593. *name++;
  1594. }
  1595. if (entry->e_value_size != 0) {
  1596. __le32 *value = (__le32 *)((char *)header +
  1597. le16_to_cpu(entry->e_value_offs));
  1598. for (n = (le32_to_cpu(entry->e_value_size) +
  1599. EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
  1600. hash = (hash << VALUE_HASH_SHIFT) ^
  1601. (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
  1602. le32_to_cpu(*value++);
  1603. }
  1604. }
  1605. entry->e_hash = cpu_to_le32(hash);
  1606. }
  1607. #undef NAME_HASH_SHIFT
  1608. #undef VALUE_HASH_SHIFT
  1609. #define BLOCK_HASH_SHIFT 16
  1610. /*
  1611. * ext4_xattr_rehash()
  1612. *
  1613. * Re-compute the extended attribute hash value after an entry has changed.
  1614. */
  1615. static void ext4_xattr_rehash(struct ext4_xattr_header *header,
  1616. struct ext4_xattr_entry *entry)
  1617. {
  1618. struct ext4_xattr_entry *here;
  1619. __u32 hash = 0;
  1620. ext4_xattr_hash_entry(header, entry);
  1621. here = ENTRY(header+1);
  1622. while (!IS_LAST_ENTRY(here)) {
  1623. if (!here->e_hash) {
  1624. /* Block is not shared if an entry's hash value == 0 */
  1625. hash = 0;
  1626. break;
  1627. }
  1628. hash = (hash << BLOCK_HASH_SHIFT) ^
  1629. (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
  1630. le32_to_cpu(here->e_hash);
  1631. here = EXT4_XATTR_NEXT(here);
  1632. }
  1633. header->h_hash = cpu_to_le32(hash);
  1634. }
  1635. #undef BLOCK_HASH_SHIFT
  1636. #define HASH_BUCKET_BITS 10
  1637. struct mb_cache *
  1638. ext4_xattr_create_cache(void)
  1639. {
  1640. return mb_cache_create(HASH_BUCKET_BITS);
  1641. }
  1642. void ext4_xattr_destroy_cache(struct mb_cache *cache)
  1643. {
  1644. if (cache)
  1645. mb_cache_destroy(cache);
  1646. }