|
@@ -77,8 +77,8 @@ static void ext4_xattr_block_cache_insert(struct mb_cache *,
|
|
|
static struct buffer_head *
|
|
|
ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
|
|
|
struct mb_cache_entry **);
|
|
|
-static void ext4_xattr_hash_entry(struct ext4_xattr_entry *entry,
|
|
|
- void *value_base);
|
|
|
+static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
|
|
|
+ size_t value_count);
|
|
|
static void ext4_xattr_rehash(struct ext4_xattr_header *);
|
|
|
|
|
|
static const struct xattr_handler * const ext4_xattr_handler_map[] = {
|
|
@@ -380,7 +380,9 @@ error:
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-ext4_xattr_inode_verify_hash(struct inode *ea_inode, void *buffer, size_t size)
|
|
|
+ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
|
|
|
+ struct ext4_xattr_entry *entry, void *buffer,
|
|
|
+ size_t size)
|
|
|
{
|
|
|
u32 hash;
|
|
|
|
|
@@ -388,23 +390,35 @@ ext4_xattr_inode_verify_hash(struct inode *ea_inode, void *buffer, size_t size)
|
|
|
hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size);
|
|
|
if (hash != ext4_xattr_inode_get_hash(ea_inode))
|
|
|
return -EFSCORRUPTED;
|
|
|
+
|
|
|
+ if (entry) {
|
|
|
+ __le32 e_hash, tmp_data;
|
|
|
+
|
|
|
+ /* Verify entry hash. */
|
|
|
+ tmp_data = cpu_to_le32(hash);
|
|
|
+ e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
|
|
|
+ &tmp_data, 1);
|
|
|
+ if (e_hash != entry->e_hash)
|
|
|
+ return -EFSCORRUPTED;
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
|
|
|
|
|
|
/*
|
|
|
- * Read the value from the EA inode.
|
|
|
+ * Read xattr value from the EA inode.
|
|
|
*/
|
|
|
static int
|
|
|
-ext4_xattr_inode_get(struct inode *inode, unsigned long ea_ino, void *buffer,
|
|
|
- size_t size)
|
|
|
+ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry,
|
|
|
+ void *buffer, size_t size)
|
|
|
{
|
|
|
struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
|
|
|
struct inode *ea_inode;
|
|
|
int err;
|
|
|
|
|
|
- err = ext4_xattr_inode_iget(inode, ea_ino, &ea_inode);
|
|
|
+ err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum),
|
|
|
+ &ea_inode);
|
|
|
if (err) {
|
|
|
ea_inode = NULL;
|
|
|
goto out;
|
|
@@ -422,7 +436,7 @@ ext4_xattr_inode_get(struct inode *inode, unsigned long ea_ino, void *buffer,
|
|
|
if (err)
|
|
|
goto out;
|
|
|
|
|
|
- err = ext4_xattr_inode_verify_hash(ea_inode, buffer, size);
|
|
|
+ err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer, size);
|
|
|
/*
|
|
|
* Compatibility check for old Lustre ea_inode implementation. Old
|
|
|
* version does not have hash validation, but it has a backpointer
|
|
@@ -489,9 +503,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
|
|
if (size > buffer_size)
|
|
|
goto cleanup;
|
|
|
if (entry->e_value_inum) {
|
|
|
- error = ext4_xattr_inode_get(inode,
|
|
|
- le32_to_cpu(entry->e_value_inum),
|
|
|
- buffer, size);
|
|
|
+ error = ext4_xattr_inode_get(inode, entry, buffer,
|
|
|
+ size);
|
|
|
if (error)
|
|
|
goto cleanup;
|
|
|
} else {
|
|
@@ -539,9 +552,8 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
|
|
|
if (size > buffer_size)
|
|
|
goto cleanup;
|
|
|
if (entry->e_value_inum) {
|
|
|
- error = ext4_xattr_inode_get(inode,
|
|
|
- le32_to_cpu(entry->e_value_inum),
|
|
|
- buffer, size);
|
|
|
+ error = ext4_xattr_inode_get(inode, entry, buffer,
|
|
|
+ size);
|
|
|
if (error)
|
|
|
goto cleanup;
|
|
|
} else {
|
|
@@ -1400,8 +1412,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
|
|
|
(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
|
|
|
i_size_read(ea_inode) == value_len &&
|
|
|
!ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
|
|
|
- !ext4_xattr_inode_verify_hash(ea_inode, ea_data,
|
|
|
- value_len) &&
|
|
|
+ !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
|
|
|
+ value_len) &&
|
|
|
!memcmp(value, ea_data, value_len)) {
|
|
|
mb_cache_entry_touch(ea_inode_cache, ce);
|
|
|
mb_cache_entry_put(ea_inode_cache, ce);
|
|
@@ -1665,12 +1677,36 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
|
|
|
here->e_value_size = cpu_to_le32(i->value_len);
|
|
|
}
|
|
|
|
|
|
- if (is_block) {
|
|
|
- if (i->value)
|
|
|
- ext4_xattr_hash_entry(here, s->base);
|
|
|
- ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
|
|
|
+ if (i->value) {
|
|
|
+ __le32 hash = 0;
|
|
|
+
|
|
|
+ /* Entry hash calculation. */
|
|
|
+ if (in_inode) {
|
|
|
+ __le32 crc32c_hash;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Feed crc32c hash instead of the raw value for entry
|
|
|
+ * hash calculation. This is to avoid walking
|
|
|
+ * potentially long value buffer again.
|
|
|
+ */
|
|
|
+ crc32c_hash = cpu_to_le32(
|
|
|
+ ext4_xattr_inode_get_hash(new_ea_inode));
|
|
|
+ hash = ext4_xattr_hash_entry(here->e_name,
|
|
|
+ here->e_name_len,
|
|
|
+ &crc32c_hash, 1);
|
|
|
+ } else if (is_block) {
|
|
|
+ __le32 *value = s->base + min_offs - new_size;
|
|
|
+
|
|
|
+ hash = ext4_xattr_hash_entry(here->e_name,
|
|
|
+ here->e_name_len, value,
|
|
|
+ new_size >> 2);
|
|
|
+ }
|
|
|
+ here->e_hash = hash;
|
|
|
}
|
|
|
|
|
|
+ if (is_block)
|
|
|
+ ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
|
|
|
+
|
|
|
ret = 0;
|
|
|
out:
|
|
|
iput(old_ea_inode);
|
|
@@ -2452,9 +2488,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
|
|
|
|
|
|
/* Save the entry name and the entry value */
|
|
|
if (entry->e_value_inum) {
|
|
|
- error = ext4_xattr_inode_get(inode,
|
|
|
- le32_to_cpu(entry->e_value_inum),
|
|
|
- buffer, value_size);
|
|
|
+ error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
|
|
|
if (error)
|
|
|
goto out;
|
|
|
} else {
|
|
@@ -2944,30 +2978,22 @@ ext4_xattr_block_cache_find(struct inode *inode,
|
|
|
*
|
|
|
* Compute the hash of an extended attribute.
|
|
|
*/
|
|
|
-static void ext4_xattr_hash_entry(struct ext4_xattr_entry *entry,
|
|
|
- void *value_base)
|
|
|
+static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
|
|
|
+ size_t value_count)
|
|
|
{
|
|
|
__u32 hash = 0;
|
|
|
- char *name = entry->e_name;
|
|
|
- int n;
|
|
|
|
|
|
- for (n = 0; n < entry->e_name_len; n++) {
|
|
|
+ while (name_len--) {
|
|
|
hash = (hash << NAME_HASH_SHIFT) ^
|
|
|
(hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
|
|
|
*name++;
|
|
|
}
|
|
|
-
|
|
|
- if (!entry->e_value_inum && entry->e_value_size) {
|
|
|
- __le32 *value = (__le32 *)((char *)value_base +
|
|
|
- le16_to_cpu(entry->e_value_offs));
|
|
|
- for (n = (le32_to_cpu(entry->e_value_size) +
|
|
|
- EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
|
|
|
- hash = (hash << VALUE_HASH_SHIFT) ^
|
|
|
- (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
|
|
|
- le32_to_cpu(*value++);
|
|
|
- }
|
|
|
+ while (value_count--) {
|
|
|
+ hash = (hash << VALUE_HASH_SHIFT) ^
|
|
|
+ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
|
|
|
+ le32_to_cpu(*value++);
|
|
|
}
|
|
|
- entry->e_hash = cpu_to_le32(hash);
|
|
|
+ return cpu_to_le32(hash);
|
|
|
}
|
|
|
|
|
|
#undef NAME_HASH_SHIFT
|