|
@@ -92,26 +92,28 @@ xfs_qm_adjust_dqlimits(
|
|
|
{
|
|
|
struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
|
struct xfs_disk_dquot *d = &dq->q_core;
|
|
|
+ struct xfs_def_quota *defq;
|
|
|
int prealloc = 0;
|
|
|
|
|
|
ASSERT(d->d_id);
|
|
|
+ defq = xfs_get_defquota(dq, q);
|
|
|
|
|
|
- if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
|
|
|
- d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
|
|
|
+ if (defq->bsoftlimit && !d->d_blk_softlimit) {
|
|
|
+ d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
|
|
|
prealloc = 1;
|
|
|
}
|
|
|
- if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
|
|
|
- d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
|
|
|
+ if (defq->bhardlimit && !d->d_blk_hardlimit) {
|
|
|
+ d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
|
|
|
prealloc = 1;
|
|
|
}
|
|
|
- if (q->qi_isoftlimit && !d->d_ino_softlimit)
|
|
|
- d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
|
|
|
- if (q->qi_ihardlimit && !d->d_ino_hardlimit)
|
|
|
- d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
|
|
|
- if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
|
|
|
- d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
|
|
|
- if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
|
|
|
- d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
|
|
|
+ if (defq->isoftlimit && !d->d_ino_softlimit)
|
|
|
+ d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
|
|
|
+ if (defq->ihardlimit && !d->d_ino_hardlimit)
|
|
|
+ d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
|
|
|
+ if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
|
|
|
+ d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
|
|
|
+ if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
|
|
|
+ d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
|
|
|
|
|
|
if (prealloc)
|
|
|
xfs_dquot_set_prealloc_limits(dq);
|
|
@@ -232,7 +234,8 @@ xfs_qm_init_dquot_blk(
|
|
|
{
|
|
|
struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
|
xfs_dqblk_t *d;
|
|
|
- int curid, i;
|
|
|
+ xfs_dqid_t curid;
|
|
|
+ int i;
|
|
|
|
|
|
ASSERT(tp);
|
|
|
ASSERT(xfs_buf_islocked(bp));
|
|
@@ -243,7 +246,6 @@ xfs_qm_init_dquot_blk(
|
|
|
* ID of the first dquot in the block - id's are zero based.
|
|
|
*/
|
|
|
curid = id - (id % q->qi_dqperchunk);
|
|
|
- ASSERT(curid >= 0);
|
|
|
memset(d, 0, BBTOB(q->qi_dqchunklen));
|
|
|
for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
|
|
|
d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
|
|
@@ -464,12 +466,13 @@ xfs_qm_dqtobp(
|
|
|
struct xfs_bmbt_irec map;
|
|
|
int nmaps = 1, error;
|
|
|
struct xfs_buf *bp;
|
|
|
- struct xfs_inode *quotip = xfs_dq_to_quota_inode(dqp);
|
|
|
+ struct xfs_inode *quotip;
|
|
|
struct xfs_mount *mp = dqp->q_mount;
|
|
|
xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
|
|
|
struct xfs_trans *tp = (tpp ? *tpp : NULL);
|
|
|
uint lock_mode;
|
|
|
|
|
|
+ quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
|
|
|
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
|
|
|
|
|
|
lock_mode = xfs_ilock_data_map_shared(quotip);
|
|
@@ -684,6 +687,56 @@ error0:
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Advance to the next id in the current chunk, or if at the
|
|
|
+ * end of the chunk, skip ahead to first id in next allocated chunk
|
|
|
+ * using the SEEK_DATA interface.
|
|
|
+ */
|
|
|
+int
|
|
|
+xfs_dq_get_next_id(
|
|
|
+ xfs_mount_t *mp,
|
|
|
+ uint type,
|
|
|
+ xfs_dqid_t *id,
|
|
|
+ loff_t eof)
|
|
|
+{
|
|
|
+ struct xfs_inode *quotip;
|
|
|
+ xfs_fsblock_t start;
|
|
|
+ loff_t offset;
|
|
|
+ uint lock;
|
|
|
+ xfs_dqid_t next_id;
|
|
|
+ int error = 0;
|
|
|
+
|
|
|
+ /* Simple advance */
|
|
|
+ next_id = *id + 1;
|
|
|
+
|
|
|
+ /* If new ID is within the current chunk, advancing it sufficed */
|
|
|
+ if (next_id % mp->m_quotainfo->qi_dqperchunk) {
|
|
|
+ *id = next_id;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Nope, next_id is now past the current chunk, so find the next one */
|
|
|
+ start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
|
|
|
+
|
|
|
+ quotip = xfs_quota_inode(mp, type);
|
|
|
+ lock = xfs_ilock_data_map_shared(quotip);
|
|
|
+
|
|
|
+ offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
|
|
|
+ eof, SEEK_DATA);
|
|
|
+ if (offset < 0)
|
|
|
+ error = offset;
|
|
|
+
|
|
|
+ xfs_iunlock(quotip, lock);
|
|
|
+
|
|
|
+ /* -ENXIO is essentially "no more data" */
|
|
|
+ if (error)
|
|
|
+ return (error == -ENXIO ? -ENOENT: error);
|
|
|
+
|
|
|
+ /* Convert next data offset back to a quota id */
|
|
|
+ *id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
|
|
|
* a locked dquot, doing an allocation (if requested) as needed.
|
|
@@ -704,6 +757,7 @@ xfs_qm_dqget(
|
|
|
struct xfs_quotainfo *qi = mp->m_quotainfo;
|
|
|
struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
|
|
|
struct xfs_dquot *dqp;
|
|
|
+ loff_t eof = 0;
|
|
|
int error;
|
|
|
|
|
|
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
|
@@ -731,6 +785,21 @@ xfs_qm_dqget(
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ /* Get the end of the quota file if we need it */
|
|
|
+ if (flags & XFS_QMOPT_DQNEXT) {
|
|
|
+ struct xfs_inode *quotip;
|
|
|
+ xfs_fileoff_t last;
|
|
|
+ uint lock_mode;
|
|
|
+
|
|
|
+ quotip = xfs_quota_inode(mp, type);
|
|
|
+ lock_mode = xfs_ilock_data_map_shared(quotip);
|
|
|
+ error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
|
|
|
+ xfs_iunlock(quotip, lock_mode);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ eof = XFS_FSB_TO_B(mp, last);
|
|
|
+ }
|
|
|
+
|
|
|
restart:
|
|
|
mutex_lock(&qi->qi_tree_lock);
|
|
|
dqp = radix_tree_lookup(tree, id);
|
|
@@ -744,6 +813,18 @@ restart:
|
|
|
goto restart;
|
|
|
}
|
|
|
|
|
|
+ /* uninit / unused quota found in radix tree, keep looking */
|
|
|
+ if (flags & XFS_QMOPT_DQNEXT) {
|
|
|
+ if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
|
|
|
+ xfs_dqunlock(dqp);
|
|
|
+ mutex_unlock(&qi->qi_tree_lock);
|
|
|
+ error = xfs_dq_get_next_id(mp, type, &id, eof);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ goto restart;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
dqp->q_nrefs++;
|
|
|
mutex_unlock(&qi->qi_tree_lock);
|
|
|
|
|
@@ -770,6 +851,13 @@ restart:
|
|
|
if (ip)
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
|
|
+ /* If we are asked to find next active id, keep looking */
|
|
|
+ if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
|
|
|
+ error = xfs_dq_get_next_id(mp, type, &id, eof);
|
|
|
+ if (!error)
|
|
|
+ goto restart;
|
|
|
+ }
|
|
|
+
|
|
|
if (error)
|
|
|
return error;
|
|
|
|
|
@@ -820,6 +908,17 @@ restart:
|
|
|
qi->qi_dquots++;
|
|
|
mutex_unlock(&qi->qi_tree_lock);
|
|
|
|
|
|
+ /* If we are asked to find next active id, keep looking */
|
|
|
+ if (flags & XFS_QMOPT_DQNEXT) {
|
|
|
+ if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
|
|
|
+ xfs_qm_dqput(dqp);
|
|
|
+ error = xfs_dq_get_next_id(mp, type, &id, eof);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ goto restart;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
dqret:
|
|
|
ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
trace_xfs_dqget_miss(dqp);
|