|
@@ -41,9 +41,7 @@
|
|
|
#include "swab.h"
|
|
|
#include "util.h"
|
|
|
|
|
|
-static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock);
|
|
|
-
|
|
|
-static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
|
|
|
+static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
|
|
|
{
|
|
|
struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
|
|
|
int ptrs = uspi->s_apb;
|
|
@@ -75,227 +73,232 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off
|
|
|
return n;
|
|
|
}
|
|
|
|
|
|
+typedef struct {
|
|
|
+ void *p;
|
|
|
+ union {
|
|
|
+ __fs32 key32;
|
|
|
+ __fs64 key64;
|
|
|
+ };
|
|
|
+ struct buffer_head *bh;
|
|
|
+} Indirect;
|
|
|
+
|
|
|
+static inline int grow_chain32(struct ufs_inode_info *ufsi,
|
|
|
+ struct buffer_head *bh, __fs32 *v,
|
|
|
+ Indirect *from, Indirect *to)
|
|
|
+{
|
|
|
+ Indirect *p;
|
|
|
+ unsigned seq;
|
|
|
+ to->bh = bh;
|
|
|
+ do {
|
|
|
+ seq = read_seqbegin(&ufsi->meta_lock);
|
|
|
+ to->key32 = *(__fs32 *)(to->p = v);
|
|
|
+ for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
|
|
|
+ ;
|
|
|
+ } while (read_seqretry(&ufsi->meta_lock, seq));
|
|
|
+ return (p > to);
|
|
|
+}
|
|
|
+
|
|
|
+static inline int grow_chain64(struct ufs_inode_info *ufsi,
|
|
|
+ struct buffer_head *bh, __fs64 *v,
|
|
|
+ Indirect *from, Indirect *to)
|
|
|
+{
|
|
|
+ Indirect *p;
|
|
|
+ unsigned seq;
|
|
|
+ to->bh = bh;
|
|
|
+ do {
|
|
|
+ seq = read_seqbegin(&ufsi->meta_lock);
|
|
|
+ to->key64 = *(__fs64 *)(to->p = v);
|
|
|
+ for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
|
|
|
+ ;
|
|
|
+ } while (read_seqretry(&ufsi->meta_lock, seq));
|
|
|
+ return (p > to);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Returns the location of the fragment from
|
|
|
* the beginning of the filesystem.
|
|
|
*/
|
|
|
|
|
|
-static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock)
|
|
|
+static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
|
|
|
{
|
|
|
struct ufs_inode_info *ufsi = UFS_I(inode);
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
|
|
|
int shift = uspi->s_apbshift-uspi->s_fpbshift;
|
|
|
- sector_t offsets[4], *p;
|
|
|
- int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
|
|
|
- u64 ret = 0L;
|
|
|
- __fs32 block;
|
|
|
- __fs64 u2_block = 0L;
|
|
|
+ Indirect chain[4], *q = chain;
|
|
|
+ unsigned *p;
|
|
|
unsigned flags = UFS_SB(sb)->s_flags;
|
|
|
- u64 temp = 0L;
|
|
|
+ u64 res = 0;
|
|
|
|
|
|
- UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth);
|
|
|
UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
|
|
|
uspi->s_fpbshift, uspi->s_apbmask,
|
|
|
(unsigned long long)mask);
|
|
|
|
|
|
if (depth == 0)
|
|
|
- return 0;
|
|
|
+ goto no_block;
|
|
|
|
|
|
+again:
|
|
|
p = offsets;
|
|
|
|
|
|
- if (needs_lock)
|
|
|
- lock_ufs(sb);
|
|
|
if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
|
|
|
goto ufs2;
|
|
|
|
|
|
- block = ufsi->i_u1.i_data[*p++];
|
|
|
- if (!block)
|
|
|
- goto out;
|
|
|
+ if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
|
|
|
+ goto changed;
|
|
|
+ if (!q->key32)
|
|
|
+ goto no_block;
|
|
|
while (--depth) {
|
|
|
+ __fs32 *ptr;
|
|
|
struct buffer_head *bh;
|
|
|
- sector_t n = *p++;
|
|
|
+ unsigned n = *p++;
|
|
|
|
|
|
- bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
|
|
|
+ bh = sb_bread(sb, uspi->s_sbbase +
|
|
|
+ fs32_to_cpu(sb, q->key32) + (n>>shift));
|
|
|
if (!bh)
|
|
|
- goto out;
|
|
|
- block = ((__fs32 *) bh->b_data)[n & mask];
|
|
|
- brelse (bh);
|
|
|
- if (!block)
|
|
|
- goto out;
|
|
|
- }
|
|
|
- ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
|
|
|
- goto out;
|
|
|
-ufs2:
|
|
|
- u2_block = ufsi->i_u1.u2_i_data[*p++];
|
|
|
- if (!u2_block)
|
|
|
- goto out;
|
|
|
+ goto no_block;
|
|
|
+ ptr = (__fs32 *)bh->b_data + (n & mask);
|
|
|
+ if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
|
|
|
+ goto changed;
|
|
|
+ if (!q->key32)
|
|
|
+ goto no_block;
|
|
|
+ }
|
|
|
+ res = fs32_to_cpu(sb, q->key32);
|
|
|
+ goto found;
|
|
|
|
|
|
+ufs2:
|
|
|
+ if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
|
|
|
+ goto changed;
|
|
|
+ if (!q->key64)
|
|
|
+ goto no_block;
|
|
|
|
|
|
while (--depth) {
|
|
|
+ __fs64 *ptr;
|
|
|
struct buffer_head *bh;
|
|
|
- sector_t n = *p++;
|
|
|
-
|
|
|
+ unsigned n = *p++;
|
|
|
|
|
|
- temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
|
|
|
- bh = sb_bread(sb, temp +(u64) (n>>shift));
|
|
|
+ bh = sb_bread(sb, uspi->s_sbbase +
|
|
|
+ fs64_to_cpu(sb, q->key64) + (n>>shift));
|
|
|
if (!bh)
|
|
|
- goto out;
|
|
|
- u2_block = ((__fs64 *)bh->b_data)[n & mask];
|
|
|
- brelse(bh);
|
|
|
- if (!u2_block)
|
|
|
- goto out;
|
|
|
+ goto no_block;
|
|
|
+ ptr = (__fs64 *)bh->b_data + (n & mask);
|
|
|
+ if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
|
|
|
+ goto changed;
|
|
|
+ if (!q->key64)
|
|
|
+ goto no_block;
|
|
|
}
|
|
|
- temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
|
|
|
- ret = temp + (u64) (frag & uspi->s_fpbmask);
|
|
|
+ res = fs64_to_cpu(sb, q->key64);
|
|
|
+found:
|
|
|
+ res += uspi->s_sbbase;
|
|
|
+no_block:
|
|
|
+ while (q > chain) {
|
|
|
+ brelse(q->bh);
|
|
|
+ q--;
|
|
|
+ }
|
|
|
+ return res;
|
|
|
|
|
|
-out:
|
|
|
- if (needs_lock)
|
|
|
- unlock_ufs(sb);
|
|
|
- return ret;
|
|
|
+changed:
|
|
|
+ while (q > chain) {
|
|
|
+ brelse(q->bh);
|
|
|
+ q--;
|
|
|
+ }
|
|
|
+ goto again;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Unpacking tails: we have a file with partial final block and
|
|
|
+ * we had been asked to extend it. If the fragment being written
|
|
|
+ * is within the same block, we need to extend the tail just to cover
|
|
|
+ * that fragment. Otherwise the tail is extended to full block.
|
|
|
+ *
|
|
|
+ * Note that we might need to create a _new_ tail, but that will
|
|
|
+ * be handled elsewhere; this is strictly for resizing old
|
|
|
+ * ones.
|
|
|
+ */
|
|
|
+static bool
|
|
|
+ufs_extend_tail(struct inode *inode, u64 writes_to,
|
|
|
+ int *err, struct page *locked_page)
|
|
|
+{
|
|
|
+ struct ufs_inode_info *ufsi = UFS_I(inode);
|
|
|
+ struct super_block *sb = inode->i_sb;
|
|
|
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
+ unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
|
|
|
+ unsigned block = ufs_fragstoblks(lastfrag);
|
|
|
+ unsigned new_size;
|
|
|
+ void *p;
|
|
|
+ u64 tmp;
|
|
|
+
|
|
|
+ if (writes_to < (lastfrag | uspi->s_fpbmask))
|
|
|
+ new_size = (writes_to & uspi->s_fpbmask) + 1;
|
|
|
+ else
|
|
|
+ new_size = uspi->s_fpb;
|
|
|
+
|
|
|
+ p = ufs_get_direct_data_ptr(uspi, ufsi, block);
|
|
|
+ tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
|
|
|
+ new_size, err, locked_page);
|
|
|
+ return tmp != 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ufs_inode_getfrag() - allocate new fragment(s)
|
|
|
* @inode: pointer to inode
|
|
|
- * @fragment: number of `fragment' which hold pointer
|
|
|
- * to new allocated fragment(s)
|
|
|
+ * @index: number of block pointer within the inode's array.
|
|
|
* @new_fragment: number of new allocated fragment(s)
|
|
|
- * @required: how many fragment(s) we require
|
|
|
* @err: we set it if something wrong
|
|
|
- * @phys: pointer to where we save physical number of new allocated fragments,
|
|
|
- * NULL if we allocate not data(indirect blocks for example).
|
|
|
* @new: we set it if we allocate new block
|
|
|
* @locked_page: for ufs_new_fragments()
|
|
|
*/
|
|
|
-static struct buffer_head *
|
|
|
-ufs_inode_getfrag(struct inode *inode, u64 fragment,
|
|
|
- sector_t new_fragment, unsigned int required, int *err,
|
|
|
- long *phys, int *new, struct page *locked_page)
|
|
|
+static u64
|
|
|
+ufs_inode_getfrag(struct inode *inode, unsigned index,
|
|
|
+ sector_t new_fragment, int *err,
|
|
|
+ int *new, struct page *locked_page)
|
|
|
{
|
|
|
struct ufs_inode_info *ufsi = UFS_I(inode);
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
- struct buffer_head * result;
|
|
|
- unsigned blockoff, lastblockoff;
|
|
|
- u64 tmp, goal, lastfrag, block, lastblock;
|
|
|
- void *p, *p2;
|
|
|
-
|
|
|
- UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
|
|
|
- "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
|
|
|
- (unsigned long long)new_fragment, required, !phys);
|
|
|
+ u64 tmp, goal, lastfrag;
|
|
|
+ unsigned nfrags = uspi->s_fpb;
|
|
|
+ void *p;
|
|
|
|
|
|
/* TODO : to be done for write support
|
|
|
if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
|
|
|
goto ufs2;
|
|
|
*/
|
|
|
|
|
|
- block = ufs_fragstoblks (fragment);
|
|
|
- blockoff = ufs_fragnum (fragment);
|
|
|
- p = ufs_get_direct_data_ptr(uspi, ufsi, block);
|
|
|
-
|
|
|
- goal = 0;
|
|
|
-
|
|
|
-repeat:
|
|
|
+ p = ufs_get_direct_data_ptr(uspi, ufsi, index);
|
|
|
tmp = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (tmp)
|
|
|
+ goto out;
|
|
|
|
|
|
lastfrag = ufsi->i_lastfrag;
|
|
|
- if (tmp && fragment < lastfrag) {
|
|
|
- if (!phys) {
|
|
|
- result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
|
|
|
- if (tmp == ufs_data_ptr_to_cpu(sb, p)) {
|
|
|
- UFSD("EXIT, result %llu\n",
|
|
|
- (unsigned long long)tmp + blockoff);
|
|
|
- return result;
|
|
|
- }
|
|
|
- brelse (result);
|
|
|
- goto repeat;
|
|
|
- } else {
|
|
|
- *phys = uspi->s_sbbase + tmp + blockoff;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- }
|
|
|
|
|
|
- lastblock = ufs_fragstoblks (lastfrag);
|
|
|
- lastblockoff = ufs_fragnum (lastfrag);
|
|
|
- /*
|
|
|
- * We will extend file into new block beyond last allocated block
|
|
|
- */
|
|
|
- if (lastblock < block) {
|
|
|
- /*
|
|
|
- * We must reallocate last allocated block
|
|
|
- */
|
|
|
- if (lastblockoff) {
|
|
|
- p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
|
|
|
- tmp = ufs_new_fragments(inode, p2, lastfrag,
|
|
|
- ufs_data_ptr_to_cpu(sb, p2),
|
|
|
- uspi->s_fpb - lastblockoff,
|
|
|
- err, locked_page);
|
|
|
- if (!tmp) {
|
|
|
- if (lastfrag != ufsi->i_lastfrag)
|
|
|
- goto repeat;
|
|
|
- else
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- lastfrag = ufsi->i_lastfrag;
|
|
|
-
|
|
|
- }
|
|
|
- tmp = ufs_data_ptr_to_cpu(sb,
|
|
|
- ufs_get_direct_data_ptr(uspi, ufsi,
|
|
|
- lastblock));
|
|
|
- if (tmp)
|
|
|
- goal = tmp + uspi->s_fpb;
|
|
|
- tmp = ufs_new_fragments (inode, p, fragment - blockoff,
|
|
|
- goal, required + blockoff,
|
|
|
- err,
|
|
|
- phys != NULL ? locked_page : NULL);
|
|
|
- } else if (lastblock == block) {
|
|
|
- /*
|
|
|
- * We will extend last allocated block
|
|
|
- */
|
|
|
- tmp = ufs_new_fragments(inode, p, fragment -
|
|
|
- (blockoff - lastblockoff),
|
|
|
- ufs_data_ptr_to_cpu(sb, p),
|
|
|
- required + (blockoff - lastblockoff),
|
|
|
- err, phys != NULL ? locked_page : NULL);
|
|
|
- } else /* (lastblock > block) */ {
|
|
|
- /*
|
|
|
- * We will allocate new block before last allocated block
|
|
|
- */
|
|
|
- if (block) {
|
|
|
- tmp = ufs_data_ptr_to_cpu(sb,
|
|
|
- ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
|
|
|
- if (tmp)
|
|
|
- goal = tmp + uspi->s_fpb;
|
|
|
- }
|
|
|
- tmp = ufs_new_fragments(inode, p, fragment - blockoff,
|
|
|
- goal, uspi->s_fpb, err,
|
|
|
- phys != NULL ? locked_page : NULL);
|
|
|
+ /* will that be a new tail? */
|
|
|
+ if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
|
|
|
+ nfrags = (new_fragment & uspi->s_fpbmask) + 1;
|
|
|
+
|
|
|
+ goal = 0;
|
|
|
+ if (index) {
|
|
|
+ goal = ufs_data_ptr_to_cpu(sb,
|
|
|
+ ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
|
|
|
+ if (goal)
|
|
|
+ goal += uspi->s_fpb;
|
|
|
}
|
|
|
+ tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
|
|
|
+ goal, uspi->s_fpb, err, locked_page);
|
|
|
+
|
|
|
if (!tmp) {
|
|
|
- if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) ||
|
|
|
- (blockoff && lastfrag != ufsi->i_lastfrag))
|
|
|
- goto repeat;
|
|
|
*err = -ENOSPC;
|
|
|
- return NULL;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
- if (!phys) {
|
|
|
- result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
|
|
|
- } else {
|
|
|
- *phys = uspi->s_sbbase + tmp + blockoff;
|
|
|
- result = NULL;
|
|
|
- *err = 0;
|
|
|
+ if (new)
|
|
|
*new = 1;
|
|
|
- }
|
|
|
-
|
|
|
inode->i_ctime = CURRENT_TIME_SEC;
|
|
|
if (IS_SYNC(inode))
|
|
|
ufs_sync_inode (inode);
|
|
|
mark_inode_dirty(inode);
|
|
|
- UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff);
|
|
|
- return result;
|
|
|
+out:
|
|
|
+ return tmp + uspi->s_sbbase;
|
|
|
|
|
|
/* This part : To be implemented ....
|
|
|
Required only for writing, not required for READ-ONLY.
|
|
@@ -316,95 +319,70 @@ repeat2:
|
|
|
/**
|
|
|
* ufs_inode_getblock() - allocate new block
|
|
|
* @inode: pointer to inode
|
|
|
- * @bh: pointer to block which hold "pointer" to new allocated block
|
|
|
- * @fragment: number of `fragment' which hold pointer
|
|
|
- * to new allocated block
|
|
|
+ * @ind_block: block number of the indirect block
|
|
|
+ * @index: number of pointer within the indirect block
|
|
|
* @new_fragment: number of new allocated fragment
|
|
|
* (block will hold this fragment and also uspi->s_fpb-1)
|
|
|
* @err: see ufs_inode_getfrag()
|
|
|
- * @phys: see ufs_inode_getfrag()
|
|
|
* @new: see ufs_inode_getfrag()
|
|
|
* @locked_page: see ufs_inode_getfrag()
|
|
|
*/
|
|
|
-static struct buffer_head *
|
|
|
-ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
|
|
|
- u64 fragment, sector_t new_fragment, int *err,
|
|
|
- long *phys, int *new, struct page *locked_page)
|
|
|
+static u64
|
|
|
+ufs_inode_getblock(struct inode *inode, u64 ind_block,
|
|
|
+ unsigned index, sector_t new_fragment, int *err,
|
|
|
+ int *new, struct page *locked_page)
|
|
|
{
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
- struct buffer_head * result;
|
|
|
- unsigned blockoff;
|
|
|
- u64 tmp, goal, block;
|
|
|
+ int shift = uspi->s_apbshift - uspi->s_fpbshift;
|
|
|
+ u64 tmp = 0, goal;
|
|
|
+ struct buffer_head *bh;
|
|
|
void *p;
|
|
|
|
|
|
- block = ufs_fragstoblks (fragment);
|
|
|
- blockoff = ufs_fragnum (fragment);
|
|
|
-
|
|
|
- UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
|
|
|
- inode->i_ino, (unsigned long long)fragment,
|
|
|
- (unsigned long long)new_fragment, !phys);
|
|
|
+ if (!ind_block)
|
|
|
+ return 0;
|
|
|
|
|
|
- result = NULL;
|
|
|
- if (!bh)
|
|
|
- goto out;
|
|
|
- if (!buffer_uptodate(bh)) {
|
|
|
- ll_rw_block (READ, 1, &bh);
|
|
|
- wait_on_buffer (bh);
|
|
|
- if (!buffer_uptodate(bh))
|
|
|
- goto out;
|
|
|
+ bh = sb_bread(sb, ind_block + (index >> shift));
|
|
|
+ if (unlikely(!bh)) {
|
|
|
+ *err = -EIO;
|
|
|
+ return 0;
|
|
|
}
|
|
|
+
|
|
|
+ index &= uspi->s_apbmask >> uspi->s_fpbshift;
|
|
|
if (uspi->fs_magic == UFS2_MAGIC)
|
|
|
- p = (__fs64 *)bh->b_data + block;
|
|
|
+ p = (__fs64 *)bh->b_data + index;
|
|
|
else
|
|
|
- p = (__fs32 *)bh->b_data + block;
|
|
|
-repeat:
|
|
|
+ p = (__fs32 *)bh->b_data + index;
|
|
|
+
|
|
|
tmp = ufs_data_ptr_to_cpu(sb, p);
|
|
|
- if (tmp) {
|
|
|
- if (!phys) {
|
|
|
- result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
|
|
|
- if (tmp == ufs_data_ptr_to_cpu(sb, p))
|
|
|
- goto out;
|
|
|
- brelse (result);
|
|
|
- goto repeat;
|
|
|
- } else {
|
|
|
- *phys = uspi->s_sbbase + tmp + blockoff;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (tmp)
|
|
|
+ goto out;
|
|
|
|
|
|
- if (block && (uspi->fs_magic == UFS2_MAGIC ?
|
|
|
- (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) :
|
|
|
- (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1]))))
|
|
|
+ if (index && (uspi->fs_magic == UFS2_MAGIC ?
|
|
|
+ (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
|
|
|
+ (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
|
|
|
goal = tmp + uspi->s_fpb;
|
|
|
else
|
|
|
goal = bh->b_blocknr + uspi->s_fpb;
|
|
|
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
|
|
|
uspi->s_fpb, err, locked_page);
|
|
|
- if (!tmp) {
|
|
|
- if (ufs_data_ptr_to_cpu(sb, p))
|
|
|
- goto repeat;
|
|
|
+ if (!tmp)
|
|
|
goto out;
|
|
|
- }
|
|
|
|
|
|
-
|
|
|
- if (!phys) {
|
|
|
- result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
|
|
|
- } else {
|
|
|
- *phys = uspi->s_sbbase + tmp + blockoff;
|
|
|
+ if (new)
|
|
|
*new = 1;
|
|
|
- }
|
|
|
|
|
|
mark_buffer_dirty(bh);
|
|
|
if (IS_SYNC(inode))
|
|
|
sync_dirty_buffer(bh);
|
|
|
inode->i_ctime = CURRENT_TIME_SEC;
|
|
|
mark_inode_dirty(inode);
|
|
|
- UFSD("result %llu\n", (unsigned long long)tmp + blockoff);
|
|
|
out:
|
|
|
brelse (bh);
|
|
|
UFSD("EXIT\n");
|
|
|
- return result;
|
|
|
+ if (tmp)
|
|
|
+ tmp += uspi->s_sbbase;
|
|
|
+ return tmp;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -412,103 +390,64 @@ out:
|
|
|
* readpage, writepage and so on
|
|
|
*/
|
|
|
|
|
|
-int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
|
|
|
+static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
|
|
|
{
|
|
|
- struct super_block * sb = inode->i_sb;
|
|
|
- struct ufs_sb_info * sbi = UFS_SB(sb);
|
|
|
- struct ufs_sb_private_info * uspi = sbi->s_uspi;
|
|
|
- struct buffer_head * bh;
|
|
|
- int ret, err, new;
|
|
|
- unsigned long ptr,phys;
|
|
|
+ struct super_block *sb = inode->i_sb;
|
|
|
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
+ int err = 0, new = 0;
|
|
|
+ unsigned offsets[4];
|
|
|
+ int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
|
|
|
u64 phys64 = 0;
|
|
|
- bool needs_lock = (sbi->mutex_owner != current);
|
|
|
-
|
|
|
+ unsigned frag = fragment & uspi->s_fpbmask;
|
|
|
+
|
|
|
if (!create) {
|
|
|
- phys64 = ufs_frag_map(inode, fragment, needs_lock);
|
|
|
- UFSD("phys64 = %llu\n", (unsigned long long)phys64);
|
|
|
- if (phys64)
|
|
|
- map_bh(bh_result, sb, phys64);
|
|
|
- return 0;
|
|
|
+ phys64 = ufs_frag_map(inode, offsets, depth);
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
/* This code entered only while writing ....? */
|
|
|
|
|
|
- err = -EIO;
|
|
|
- new = 0;
|
|
|
- ret = 0;
|
|
|
- bh = NULL;
|
|
|
-
|
|
|
- if (needs_lock)
|
|
|
- lock_ufs(sb);
|
|
|
+ mutex_lock(&UFS_I(inode)->truncate_mutex);
|
|
|
|
|
|
UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
|
|
|
- if (fragment >
|
|
|
- ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
|
|
|
- << uspi->s_fpbshift))
|
|
|
- goto abort_too_big;
|
|
|
-
|
|
|
- err = 0;
|
|
|
- ptr = fragment;
|
|
|
-
|
|
|
- /*
|
|
|
- * ok, these macros clean the logic up a bit and make
|
|
|
- * it much more readable:
|
|
|
- */
|
|
|
-#define GET_INODE_DATABLOCK(x) \
|
|
|
- ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
|
|
|
- bh_result->b_page)
|
|
|
-#define GET_INODE_PTR(x) \
|
|
|
- ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
|
|
|
- bh_result->b_page)
|
|
|
-#define GET_INDIRECT_DATABLOCK(x) \
|
|
|
- ufs_inode_getblock(inode, bh, x, fragment, \
|
|
|
- &err, &phys, &new, bh_result->b_page)
|
|
|
-#define GET_INDIRECT_PTR(x) \
|
|
|
- ufs_inode_getblock(inode, bh, x, fragment, \
|
|
|
- &err, NULL, NULL, NULL)
|
|
|
-
|
|
|
- if (ptr < UFS_NDIR_FRAGMENT) {
|
|
|
- bh = GET_INODE_DATABLOCK(ptr);
|
|
|
+ if (unlikely(!depth)) {
|
|
|
+ ufs_warning(sb, "ufs_get_block", "block > big");
|
|
|
+ err = -EIO;
|
|
|
goto out;
|
|
|
}
|
|
|
- ptr -= UFS_NDIR_FRAGMENT;
|
|
|
- if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
|
|
|
- bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
|
|
|
- goto get_indirect;
|
|
|
- }
|
|
|
- ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
|
|
|
- if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
|
|
|
- bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
|
|
|
- goto get_double;
|
|
|
- }
|
|
|
- ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
|
|
|
- bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
|
|
|
- bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
|
|
|
-get_double:
|
|
|
- bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
|
|
|
-get_indirect:
|
|
|
- bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
|
|
|
-
|
|
|
-#undef GET_INODE_DATABLOCK
|
|
|
-#undef GET_INODE_PTR
|
|
|
-#undef GET_INDIRECT_DATABLOCK
|
|
|
-#undef GET_INDIRECT_PTR
|
|
|
|
|
|
-out:
|
|
|
- if (err)
|
|
|
- goto abort;
|
|
|
- if (new)
|
|
|
- set_buffer_new(bh_result);
|
|
|
- map_bh(bh_result, sb, phys);
|
|
|
-abort:
|
|
|
- if (needs_lock)
|
|
|
- unlock_ufs(sb);
|
|
|
+ if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
|
|
|
+ unsigned lastfrag = UFS_I(inode)->i_lastfrag;
|
|
|
+ unsigned tailfrags = lastfrag & uspi->s_fpbmask;
|
|
|
+ if (tailfrags && fragment >= lastfrag) {
|
|
|
+ if (!ufs_extend_tail(inode, fragment,
|
|
|
+ &err, bh_result->b_page))
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
+ if (depth == 1) {
|
|
|
+ phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
|
|
|
+ &err, &new, bh_result->b_page);
|
|
|
+ } else {
|
|
|
+ int i;
|
|
|
+ phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
|
|
|
+ &err, NULL, NULL);
|
|
|
+ for (i = 1; i < depth - 1; i++)
|
|
|
+ phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
|
|
|
+ fragment, &err, NULL, NULL);
|
|
|
+ phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
|
|
|
+ fragment, &err, &new, bh_result->b_page);
|
|
|
+ }
|
|
|
+out:
|
|
|
+ if (phys64) {
|
|
|
+ phys64 += frag;
|
|
|
+ map_bh(bh_result, sb, phys64);
|
|
|
+ if (new)
|
|
|
+ set_buffer_new(bh_result);
|
|
|
+ }
|
|
|
+ mutex_unlock(&UFS_I(inode)->truncate_mutex);
|
|
|
return err;
|
|
|
-
|
|
|
-abort_too_big:
|
|
|
- ufs_warning(sb, "ufs_get_block", "block > big");
|
|
|
- goto abort;
|
|
|
}
|
|
|
|
|
|
static int ufs_writepage(struct page *page, struct writeback_control *wbc)
|
|
@@ -526,12 +465,16 @@ int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
|
|
|
return __block_write_begin(page, pos, len, ufs_getfrag_block);
|
|
|
}
|
|
|
|
|
|
+static void ufs_truncate_blocks(struct inode *);
|
|
|
+
|
|
|
static void ufs_write_failed(struct address_space *mapping, loff_t to)
|
|
|
{
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
|
|
- if (to > inode->i_size)
|
|
|
+ if (to > inode->i_size) {
|
|
|
truncate_pagecache(inode, inode->i_size);
|
|
|
+ ufs_truncate_blocks(inode);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int ufs_write_begin(struct file *file, struct address_space *mapping,
|
|
@@ -548,6 +491,18 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int ufs_write_end(struct file *file, struct address_space *mapping,
|
|
|
+ loff_t pos, unsigned len, unsigned copied,
|
|
|
+ struct page *page, void *fsdata)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
|
|
|
+ if (ret < len)
|
|
|
+ ufs_write_failed(mapping, pos + len);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
|
|
|
{
|
|
|
return generic_block_bmap(mapping,block,ufs_getfrag_block);
|
|
@@ -557,7 +512,7 @@ const struct address_space_operations ufs_aops = {
|
|
|
.readpage = ufs_readpage,
|
|
|
.writepage = ufs_writepage,
|
|
|
.write_begin = ufs_write_begin,
|
|
|
- .write_end = generic_write_end,
|
|
|
+ .write_end = ufs_write_end,
|
|
|
.bmap = ufs_bmap
|
|
|
};
|
|
|
|
|
@@ -599,7 +554,7 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
|
|
|
ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
|
|
|
return -1;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
/*
|
|
|
* Linux now has 32-bit uid and gid, so we can support EFT.
|
|
|
*/
|
|
@@ -619,7 +574,7 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
|
|
|
ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
|
|
|
ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
|
|
|
|
|
|
-
|
|
|
+
|
|
|
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
|
|
|
memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
|
|
|
sizeof(ufs_inode->ui_u2.ui_addr));
|
|
@@ -753,7 +708,7 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
|
|
|
|
|
|
ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
|
|
|
ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
|
|
|
-
|
|
|
+
|
|
|
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
|
|
|
ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
|
|
|
ufs_inode->ui_atime.tv_usec = 0;
|
|
@@ -855,23 +810,19 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
|
|
|
|
|
|
ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
mark_buffer_dirty(bh);
|
|
|
if (do_sync)
|
|
|
sync_dirty_buffer(bh);
|
|
|
brelse (bh);
|
|
|
-
|
|
|
+
|
|
|
UFSD("EXIT\n");
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
{
|
|
|
- int ret;
|
|
|
- lock_ufs(inode->i_sb);
|
|
|
- ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
|
|
|
- unlock_ufs(inode->i_sb);
|
|
|
- return ret;
|
|
|
+ return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
|
|
|
}
|
|
|
|
|
|
int ufs_sync_inode (struct inode *inode)
|
|
@@ -888,24 +839,389 @@ void ufs_evict_inode(struct inode * inode)
|
|
|
|
|
|
truncate_inode_pages_final(&inode->i_data);
|
|
|
if (want_delete) {
|
|
|
- loff_t old_i_size;
|
|
|
- /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
|
|
|
- lock_ufs(inode->i_sb);
|
|
|
- mark_inode_dirty(inode);
|
|
|
- ufs_update_inode(inode, IS_SYNC(inode));
|
|
|
- old_i_size = inode->i_size;
|
|
|
inode->i_size = 0;
|
|
|
- if (inode->i_blocks && ufs_truncate(inode, old_i_size))
|
|
|
- ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n");
|
|
|
- unlock_ufs(inode->i_sb);
|
|
|
+ if (inode->i_blocks)
|
|
|
+ ufs_truncate_blocks(inode);
|
|
|
}
|
|
|
|
|
|
invalidate_inode_buffers(inode);
|
|
|
clear_inode(inode);
|
|
|
|
|
|
- if (want_delete) {
|
|
|
- lock_ufs(inode->i_sb);
|
|
|
+ if (want_delete)
|
|
|
ufs_free_inode(inode);
|
|
|
- unlock_ufs(inode->i_sb);
|
|
|
+}
|
|
|
+
|
|
|
+struct to_free {
|
|
|
+ struct inode *inode;
|
|
|
+ u64 to;
|
|
|
+ unsigned count;
|
|
|
+};
|
|
|
+
|
|
|
+static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
|
|
|
+{
|
|
|
+ if (ctx->count && ctx->to != from) {
|
|
|
+ ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
|
|
|
+ ctx->count = 0;
|
|
|
+ }
|
|
|
+ ctx->count += count;
|
|
|
+ ctx->to = from + count;
|
|
|
+}
|
|
|
+
|
|
|
+#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
|
|
|
+#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
|
|
|
+
|
|
|
+static void ufs_trunc_direct(struct inode *inode)
|
|
|
+{
|
|
|
+ struct ufs_inode_info *ufsi = UFS_I(inode);
|
|
|
+ struct super_block * sb;
|
|
|
+ struct ufs_sb_private_info * uspi;
|
|
|
+ void *p;
|
|
|
+ u64 frag1, frag2, frag3, frag4, block1, block2;
|
|
|
+ struct to_free ctx = {.inode = inode};
|
|
|
+ unsigned i, tmp;
|
|
|
+
|
|
|
+ UFSD("ENTER: ino %lu\n", inode->i_ino);
|
|
|
+
|
|
|
+ sb = inode->i_sb;
|
|
|
+ uspi = UFS_SB(sb)->s_uspi;
|
|
|
+
|
|
|
+ frag1 = DIRECT_FRAGMENT;
|
|
|
+ frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
|
|
|
+ frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
|
|
|
+ frag3 = frag4 & ~uspi->s_fpbmask;
|
|
|
+ block1 = block2 = 0;
|
|
|
+ if (frag2 > frag3) {
|
|
|
+ frag2 = frag4;
|
|
|
+ frag3 = frag4 = 0;
|
|
|
+ } else if (frag2 < frag3) {
|
|
|
+ block1 = ufs_fragstoblks (frag2);
|
|
|
+ block2 = ufs_fragstoblks (frag3);
|
|
|
+ }
|
|
|
+
|
|
|
+ UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
|
|
|
+ " frag3 %llu, frag4 %llu\n", inode->i_ino,
|
|
|
+ (unsigned long long)frag1, (unsigned long long)frag2,
|
|
|
+ (unsigned long long)block1, (unsigned long long)block2,
|
|
|
+ (unsigned long long)frag3, (unsigned long long)frag4);
|
|
|
+
|
|
|
+ if (frag1 >= frag2)
|
|
|
+ goto next1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Free first free fragments
|
|
|
+ */
|
|
|
+ p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
|
|
|
+ tmp = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (!tmp )
|
|
|
+ ufs_panic (sb, "ufs_trunc_direct", "internal error");
|
|
|
+ frag2 -= frag1;
|
|
|
+ frag1 = ufs_fragnum (frag1);
|
|
|
+
|
|
|
+ ufs_free_fragments(inode, tmp + frag1, frag2);
|
|
|
+
|
|
|
+next1:
|
|
|
+ /*
|
|
|
+ * Free whole blocks
|
|
|
+ */
|
|
|
+ for (i = block1 ; i < block2; i++) {
|
|
|
+ p = ufs_get_direct_data_ptr(uspi, ufsi, i);
|
|
|
+ tmp = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (!tmp)
|
|
|
+ continue;
|
|
|
+ write_seqlock(&ufsi->meta_lock);
|
|
|
+ ufs_data_ptr_clear(uspi, p);
|
|
|
+ write_sequnlock(&ufsi->meta_lock);
|
|
|
+
|
|
|
+ free_data(&ctx, tmp, uspi->s_fpb);
|
|
|
+ }
|
|
|
+
|
|
|
+ free_data(&ctx, 0, 0);
|
|
|
+
|
|
|
+ if (frag3 >= frag4)
|
|
|
+ goto next3;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Free last free fragments
|
|
|
+ */
|
|
|
+ p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
|
|
|
+ tmp = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (!tmp )
|
|
|
+ ufs_panic(sb, "ufs_truncate_direct", "internal error");
|
|
|
+ frag4 = ufs_fragnum (frag4);
|
|
|
+ write_seqlock(&ufsi->meta_lock);
|
|
|
+ ufs_data_ptr_clear(uspi, p);
|
|
|
+ write_sequnlock(&ufsi->meta_lock);
|
|
|
+
|
|
|
+ ufs_free_fragments (inode, tmp, frag4);
|
|
|
+ next3:
|
|
|
+
|
|
|
+ UFSD("EXIT: ino %lu\n", inode->i_ino);
|
|
|
+}
|
|
|
+
|
|
|
+static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
|
|
|
+{
|
|
|
+ struct super_block *sb = inode->i_sb;
|
|
|
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
+ struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
|
|
|
+ unsigned i;
|
|
|
+
|
|
|
+ if (!ubh)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (--depth) {
|
|
|
+ for (i = 0; i < uspi->s_apb; i++) {
|
|
|
+ void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
|
+ u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (block)
|
|
|
+ free_full_branch(inode, block, depth);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ struct to_free ctx = {.inode = inode};
|
|
|
+
|
|
|
+ for (i = 0; i < uspi->s_apb; i++) {
|
|
|
+ void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
|
+ u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (block)
|
|
|
+ free_data(&ctx, block, uspi->s_fpb);
|
|
|
+ }
|
|
|
+ free_data(&ctx, 0, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ ubh_bforget(ubh);
|
|
|
+ ufs_free_blocks(inode, ind_block, uspi->s_fpb);
|
|
|
+}
|
|
|
+
|
|
|
+static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
|
|
|
+{
|
|
|
+ struct super_block *sb = inode->i_sb;
|
|
|
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
+ unsigned i;
|
|
|
+
|
|
|
+ if (--depth) {
|
|
|
+ for (i = from; i < uspi->s_apb ; i++) {
|
|
|
+ void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
|
+ u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (block) {
|
|
|
+ write_seqlock(&UFS_I(inode)->meta_lock);
|
|
|
+ ufs_data_ptr_clear(uspi, p);
|
|
|
+ write_sequnlock(&UFS_I(inode)->meta_lock);
|
|
|
+ ubh_mark_buffer_dirty(ubh);
|
|
|
+ free_full_branch(inode, block, depth);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ struct to_free ctx = {.inode = inode};
|
|
|
+
|
|
|
+ for (i = from; i < uspi->s_apb; i++) {
|
|
|
+ void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
|
+ u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (block) {
|
|
|
+ write_seqlock(&UFS_I(inode)->meta_lock);
|
|
|
+ ufs_data_ptr_clear(uspi, p);
|
|
|
+ write_sequnlock(&UFS_I(inode)->meta_lock);
|
|
|
+ ubh_mark_buffer_dirty(ubh);
|
|
|
+ free_data(&ctx, block, uspi->s_fpb);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ free_data(&ctx, 0, 0);
|
|
|
+ }
|
|
|
+ if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
|
|
|
+ ubh_sync_block(ubh);
|
|
|
+ ubh_brelse(ubh);
|
|
|
+}
|
|
|
+
|
|
|
+static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
|
|
|
+{
|
|
|
+ int err = 0;
|
|
|
+ struct super_block *sb = inode->i_sb;
|
|
|
+ struct address_space *mapping = inode->i_mapping;
|
|
|
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
+ unsigned i, end;
|
|
|
+ sector_t lastfrag;
|
|
|
+ struct page *lastpage;
|
|
|
+ struct buffer_head *bh;
|
|
|
+ u64 phys64;
|
|
|
+
|
|
|
+ lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
|
|
|
+
|
|
|
+ if (!lastfrag)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ lastfrag--;
|
|
|
+
|
|
|
+ lastpage = ufs_get_locked_page(mapping, lastfrag >>
|
|
|
+ (PAGE_CACHE_SHIFT - inode->i_blkbits));
|
|
|
+ if (IS_ERR(lastpage)) {
|
|
|
+ err = -EIO;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
|
|
|
+ bh = page_buffers(lastpage);
|
|
|
+ for (i = 0; i < end; ++i)
|
|
|
+ bh = bh->b_this_page;
|
|
|
+
|
|
|
+
|
|
|
+ err = ufs_getfrag_block(inode, lastfrag, bh, 1);
|
|
|
+
|
|
|
+ if (unlikely(err))
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ if (buffer_new(bh)) {
|
|
|
+ clear_buffer_new(bh);
|
|
|
+ unmap_underlying_metadata(bh->b_bdev,
|
|
|
+ bh->b_blocknr);
|
|
|
+ /*
|
|
|
+ * we do not zeroize fragment, because of
|
|
|
+ * if it maped to hole, it already contains zeroes
|
|
|
+ */
|
|
|
+ set_buffer_uptodate(bh);
|
|
|
+ mark_buffer_dirty(bh);
|
|
|
+ set_page_dirty(lastpage);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (lastfrag >= UFS_IND_FRAGMENT) {
|
|
|
+ end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
|
|
|
+ phys64 = bh->b_blocknr + 1;
|
|
|
+ for (i = 0; i < end; ++i) {
|
|
|
+ bh = sb_getblk(sb, i + phys64);
|
|
|
+ lock_buffer(bh);
|
|
|
+ memset(bh->b_data, 0, sb->s_blocksize);
|
|
|
+ set_buffer_uptodate(bh);
|
|
|
+ mark_buffer_dirty(bh);
|
|
|
+ unlock_buffer(bh);
|
|
|
+ sync_dirty_buffer(bh);
|
|
|
+ brelse(bh);
|
|
|
+ }
|
|
|
+ }
|
|
|
+out_unlock:
|
|
|
+ ufs_put_locked_page(lastpage);
|
|
|
+out:
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static void __ufs_truncate_blocks(struct inode *inode)
|
|
|
+{
|
|
|
+ struct ufs_inode_info *ufsi = UFS_I(inode);
|
|
|
+ struct super_block *sb = inode->i_sb;
|
|
|
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
|
|
+ unsigned offsets[4];
|
|
|
+ int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
|
|
|
+ int depth2;
|
|
|
+ unsigned i;
|
|
|
+ struct ufs_buffer_head *ubh[3];
|
|
|
+ void *p;
|
|
|
+ u64 block;
|
|
|
+
|
|
|
+ if (!depth)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* find the last non-zero in offsets[] */
|
|
|
+ for (depth2 = depth - 1; depth2; depth2--)
|
|
|
+ if (offsets[depth2])
|
|
|
+ break;
|
|
|
+
|
|
|
+ mutex_lock(&ufsi->truncate_mutex);
|
|
|
+ if (depth == 1) {
|
|
|
+ ufs_trunc_direct(inode);
|
|
|
+ offsets[0] = UFS_IND_BLOCK;
|
|
|
+ } else {
|
|
|
+ /* get the blocks that should be partially emptied */
|
|
|
+ p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
|
|
|
+ for (i = 0; i < depth2; i++) {
|
|
|
+ offsets[i]++; /* next branch is fully freed */
|
|
|
+ block = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (!block)
|
|
|
+ break;
|
|
|
+ ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
|
|
|
+ if (!ubh[i]) {
|
|
|
+ write_seqlock(&ufsi->meta_lock);
|
|
|
+ ufs_data_ptr_clear(uspi, p);
|
|
|
+ write_sequnlock(&ufsi->meta_lock);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
|
|
|
+ }
|
|
|
+ while (i--)
|
|
|
+ free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
|
|
|
+ }
|
|
|
+ for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
|
|
|
+ p = ufs_get_direct_data_ptr(uspi, ufsi, i);
|
|
|
+ block = ufs_data_ptr_to_cpu(sb, p);
|
|
|
+ if (block) {
|
|
|
+ write_seqlock(&ufsi->meta_lock);
|
|
|
+ ufs_data_ptr_clear(uspi, p);
|
|
|
+ write_sequnlock(&ufsi->meta_lock);
|
|
|
+ free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
|
|
|
+ }
|
|
|
}
|
|
|
+ ufsi->i_lastfrag = DIRECT_FRAGMENT;
|
|
|
+ mark_inode_dirty(inode);
|
|
|
+ mutex_unlock(&ufsi->truncate_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+static int ufs_truncate(struct inode *inode, loff_t size)
|
|
|
+{
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+ UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
|
|
|
+ inode->i_ino, (unsigned long long)size,
|
|
|
+ (unsigned long long)i_size_read(inode));
|
|
|
+
|
|
|
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
|
|
|
+ S_ISLNK(inode->i_mode)))
|
|
|
+ return -EINVAL;
|
|
|
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
|
|
|
+ return -EPERM;
|
|
|
+
|
|
|
+ err = ufs_alloc_lastblock(inode, size);
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
|
|
|
+
|
|
|
+ truncate_setsize(inode, size);
|
|
|
+
|
|
|
+ __ufs_truncate_blocks(inode);
|
|
|
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
|
|
|
+ mark_inode_dirty(inode);
|
|
|
+out:
|
|
|
+ UFSD("EXIT: err %d\n", err);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+void ufs_truncate_blocks(struct inode *inode)
|
|
|
+{
|
|
|
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
|
|
|
+ S_ISLNK(inode->i_mode)))
|
|
|
+ return;
|
|
|
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
|
|
|
+ return;
|
|
|
+ __ufs_truncate_blocks(inode);
|
|
|
+}
|
|
|
+
|
|
|
+int ufs_setattr(struct dentry *dentry, struct iattr *attr)
|
|
|
+{
|
|
|
+ struct inode *inode = d_inode(dentry);
|
|
|
+ unsigned int ia_valid = attr->ia_valid;
|
|
|
+ int error;
|
|
|
+
|
|
|
+ error = inode_change_ok(inode, attr);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+
|
|
|
+ if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
|
|
|
+ error = ufs_truncate(inode, attr->ia_size);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ }
|
|
|
+
|
|
|
+ setattr_copy(inode, attr);
|
|
|
+ mark_inode_dirty(inode);
|
|
|
+ return 0;
|
|
|
}
|
|
|
+
|
|
|
+const struct inode_operations ufs_file_inode_operations = {
|
|
|
+ .setattr = ufs_setattr,
|
|
|
+};
|