diff options
author | Darrick J. Wong <darrick.wong@oracle.com> | 2020-07-14 10:37:31 -0700 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2020-07-28 20:24:14 -0700 |
commit | be37d40c1ba0b5484ea2f8c109a9eda13e4c690a (patch) | |
tree | 05c574cf82039f117c0d65a318373497a7a3528f | |
parent | d3537cf93e5e2f8b4e95cfe8bc8fa03b58c88e32 (diff) |
xfs: stop using q_core counters in the quota code
Add counter fields to the incore dquot, and use that instead of the ones
in qcore. This eliminates a bunch of endian conversions and will
eventually allow us to remove qcore entirely.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Allison Collins <allison.henderson@oracle.com>
-rw-r--r-- | fs/xfs/scrub/quota.c | 18 | ||||
-rw-r--r-- | fs/xfs/xfs_dquot.c | 47 | ||||
-rw-r--r-- | fs/xfs/xfs_dquot.h | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.c | 6 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.h | 6 | ||||
-rw-r--r-- | fs/xfs/xfs_trace.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_trans_dquot.c | 34 |
7 files changed, 56 insertions, 62 deletions
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c index e73e6c88e76a..20bc763e88b4 100644 --- a/fs/xfs/scrub/quota.c +++ b/fs/xfs/scrub/quota.c @@ -82,9 +82,6 @@ xchk_quota_item( struct xfs_disk_dquot *d = &dq->q_core; struct xfs_quotainfo *qi = mp->m_quotainfo; xfs_fileoff_t offset; - unsigned long long bcount; - unsigned long long icount; - unsigned long long rcount; xfs_ino_t fs_icount; int error = 0; @@ -132,9 +129,6 @@ xchk_quota_item( xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); /* Check the resource counts. */ - bcount = be64_to_cpu(d->d_bcount); - icount = be64_to_cpu(d->d_icount); - rcount = be64_to_cpu(d->d_rtbcount); fs_icount = percpu_counter_sum(&mp->m_icount); /* @@ -143,15 +137,15 @@ xchk_quota_item( * if there are no quota limits. */ if (xfs_sb_version_hasreflink(&mp->m_sb)) { - if (mp->m_sb.sb_dblocks < bcount) + if (mp->m_sb.sb_dblocks < dq->q_blk.count) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); } else { - if (mp->m_sb.sb_dblocks < bcount) + if (mp->m_sb.sb_dblocks < dq->q_blk.count) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); } - if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks) + if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); /* @@ -163,15 +157,15 @@ xchk_quota_item( goto out; if (dq->q_blk.hardlimit != 0 && - bcount > dq->q_blk.hardlimit) + dq->q_blk.count > dq->q_blk.hardlimit) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); if (dq->q_ino.hardlimit != 0 && - icount > dq->q_ino.hardlimit) + dq->q_ino.count > dq->q_ino.hardlimit) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); if (dq->q_rtb.hardlimit != 0 && - rcount > dq->q_rtb.hardlimit) + dq->q_rtb.count > dq->q_rtb.hardlimit) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); out: diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 9116e6ad7e9e..dc3bfce7f28b 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -134,9 +134,9 @@ xfs_qm_adjust_dqtimers( if (!d->d_btimer) { if ((dq->q_blk.softlimit && - (be64_to_cpu(d->d_bcount) > dq->q_blk.softlimit)) || + (dq->q_blk.count > dq->q_blk.softlimit)) || (dq->q_blk.hardlimit && - (be64_to_cpu(d->d_bcount) > dq->q_blk.hardlimit))) { + (dq->q_blk.count > dq->q_blk.hardlimit))) { d->d_btimer = cpu_to_be32(ktime_get_real_seconds() + defq->btimelimit); } else { @@ -144,18 +144,18 @@ xfs_qm_adjust_dqtimers( } } else { if ((!dq->q_blk.softlimit || - (be64_to_cpu(d->d_bcount) <= dq->q_blk.softlimit)) && + (dq->q_blk.count <= dq->q_blk.softlimit)) && (!dq->q_blk.hardlimit || - (be64_to_cpu(d->d_bcount) <= dq->q_blk.hardlimit))) { + (dq->q_blk.count <= dq->q_blk.hardlimit))) { d->d_btimer = 0; } } if (!d->d_itimer) { if ((dq->q_ino.softlimit && - (be64_to_cpu(d->d_icount) > dq->q_ino.softlimit)) || + (dq->q_ino.count > dq->q_ino.softlimit)) || (dq->q_ino.hardlimit && - (be64_to_cpu(d->d_icount) > dq->q_ino.hardlimit))) { + (dq->q_ino.count > dq->q_ino.hardlimit))) { d->d_itimer = cpu_to_be32(ktime_get_real_seconds() + defq->itimelimit); } else { @@ -163,18 +163,18 @@ xfs_qm_adjust_dqtimers( } } else { if ((!dq->q_ino.softlimit || - (be64_to_cpu(d->d_icount) <= dq->q_ino.softlimit)) && + (dq->q_ino.count <= dq->q_ino.softlimit)) && (!dq->q_ino.hardlimit || - (be64_to_cpu(d->d_icount) <= dq->q_ino.hardlimit))) { + (dq->q_ino.count <= dq->q_ino.hardlimit))) { d->d_itimer = 0; } } if (!d->d_rtbtimer) { if ((dq->q_rtb.softlimit && - (be64_to_cpu(d->d_rtbcount) > dq->q_rtb.softlimit)) || + (dq->q_rtb.count > dq->q_rtb.softlimit)) || (dq->q_rtb.hardlimit && - (be64_to_cpu(d->d_rtbcount) > dq->q_rtb.hardlimit))) { + (dq->q_rtb.count > dq->q_rtb.hardlimit))) { d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() + defq->rtbtimelimit); } else { @@ -182,9 +182,9 @@ xfs_qm_adjust_dqtimers( } } else { if ((!dq->q_rtb.softlimit || - (be64_to_cpu(d->d_rtbcount) <= dq->q_rtb.softlimit)) && + (dq->q_rtb.count <= dq->q_rtb.softlimit)) && (!dq->q_rtb.hardlimit || - (be64_to_cpu(d->d_rtbcount) <= dq->q_rtb.hardlimit))) { + (dq->q_rtb.count <= dq->q_rtb.hardlimit))) { d->d_rtbtimer = 0; } } @@ -538,13 +538,17 @@ xfs_dquot_from_disk( dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit); + dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount); + dqp->q_ino.count = be64_to_cpu(ddqp->d_icount); + dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount); + /* * Reservation counters are defined as reservation plus current usage * to avoid having to add every time. */ - dqp->q_blk.reserved = be64_to_cpu(ddqp->d_bcount); - dqp->q_ino.reserved = be64_to_cpu(ddqp->d_icount); - dqp->q_rtb.reserved = be64_to_cpu(ddqp->d_rtbcount); + dqp->q_blk.reserved = dqp->q_blk.count; + dqp->q_ino.reserved = dqp->q_ino.count; + dqp->q_rtb.reserved = dqp->q_rtb.count; /* initialize the dquot speculative prealloc thresholds */ xfs_dquot_set_prealloc_limits(dqp); @@ -564,6 +568,10 @@ xfs_dquot_to_disk( ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit); ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit); ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit); + + ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count); + ddqp->d_icount = cpu_to_be64(dqp->q_ino.count); + ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count); } /* Allocate and initialize the dquot buffer for this in-core dquot. */ @@ -1136,18 +1144,15 @@ xfs_qm_dqflush_check( if (dqp->q_id == 0) return NULL; - if (dqp->q_blk.softlimit && - be64_to_cpu(ddq->d_bcount) > dqp->q_blk.softlimit && + if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit && !ddq->d_btimer) return __this_address; - if (dqp->q_ino.softlimit && - be64_to_cpu(ddq->d_icount) > dqp->q_ino.softlimit && + if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit && !ddq->d_itimer) return __this_address; - if (dqp->q_rtb.softlimit && - be64_to_cpu(ddq->d_rtbcount) > dqp->q_rtb.softlimit && + if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit && !ddq->d_rtbtimer) return __this_address; diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 59790140fcb6..f5f0a15c0f7b 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -31,6 +31,9 @@ struct xfs_dquot_res { /* Total resources allocated and reserved. */ xfs_qcnt_t reserved; + /* Total resources allocated. */ + xfs_qcnt_t count; + /* Absolute and preferred limits. */ xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 83ae59536b2b..d6df2a4ca4ca 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -1089,14 +1089,14 @@ xfs_qm_quotacheck_dqadjust( * Adjust the inode count and the block count to reflect this inode's * resource usage. */ - be64_add_cpu(&dqp->q_core.d_icount, 1); + dqp->q_ino.count++; dqp->q_ino.reserved++; if (nblks) { - be64_add_cpu(&dqp->q_core.d_bcount, nblks); + dqp->q_blk.count += nblks; dqp->q_blk.reserved += nblks; } if (rtblks) { - be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); + dqp->q_rtb.count += rtblks; dqp->q_rtb.reserved += rtblks; } diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index eb5fb7d9d995..57bddadbc051 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -26,9 +26,9 @@ extern struct kmem_zone *xfs_qm_dqtrxzone; !dqp->q_rtb.softlimit && \ !dqp->q_ino.hardlimit && \ !dqp->q_ino.softlimit && \ - !dqp->q_core.d_bcount && \ - !dqp->q_core.d_rtbcount && \ - !dqp->q_core.d_icount) + !dqp->q_blk.count && \ + !dqp->q_rtb.count && \ + !dqp->q_ino.count) /* Defaults for each quota type: time limits, warn limits, usage limits */ struct xfs_def_quota { diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 93fe31a22ce8..f0c2bce69a36 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -880,8 +880,8 @@ DECLARE_EVENT_CLASS(xfs_dquot_class, __entry->flags = dqp->dq_flags | dqp->q_flags; __entry->nrefs = dqp->q_nrefs; __entry->res_bcount = dqp->q_blk.reserved; - __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); - __entry->icount = be64_to_cpu(dqp->q_core.d_icount); + __entry->bcount = dqp->q_blk.count; + __entry->icount = dqp->q_ino.count; __entry->blk_hardlimit = dqp->q_blk.hardlimit; __entry->blk_softlimit = dqp->q_blk.softlimit; __entry->ino_hardlimit = dqp->q_ino.hardlimit; diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 0d6a69a81a58..1c2a45989aaf 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -309,7 +309,6 @@ xfs_trans_apply_dquot_deltas( int i, j; struct xfs_dquot *dqp; struct xfs_dqtrx *qtrx, *qa; - struct xfs_disk_dquot *d; int64_t totalbdelta; int64_t totalrtbdelta; @@ -341,7 +340,6 @@ xfs_trans_apply_dquot_deltas( /* * adjust the actual number of blocks used */ - d = &dqp->q_core; /* * The issue here is - sometimes we don't make a blkquota @@ -362,25 +360,22 @@ xfs_trans_apply_dquot_deltas( qtrx->qt_delrtb_delta; #ifdef DEBUG if (totalbdelta < 0) - ASSERT(be64_to_cpu(d->d_bcount) >= - -totalbdelta); + ASSERT(dqp->q_blk.count >= -totalbdelta); if (totalrtbdelta < 0) - ASSERT(be64_to_cpu(d->d_rtbcount) >= - -totalrtbdelta); + ASSERT(dqp->q_rtb.count >= -totalrtbdelta); if (qtrx->qt_icount_delta < 0) - ASSERT(be64_to_cpu(d->d_icount) >= - -qtrx->qt_icount_delta); + ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta); #endif if (totalbdelta) - be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); + dqp->q_blk.count += totalbdelta; if (qtrx->qt_icount_delta) - be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); + dqp->q_ino.count += qtrx->qt_icount_delta; if (totalrtbdelta) - be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); + dqp->q_rtb.count += totalrtbdelta; /* * Get any default limits in use. @@ -467,12 +462,9 @@ xfs_trans_apply_dquot_deltas( (xfs_qcnt_t)qtrx->qt_icount_delta; } - ASSERT(dqp->q_blk.reserved >= - be64_to_cpu(dqp->q_core.d_bcount)); - ASSERT(dqp->q_ino.reserved >= - be64_to_cpu(dqp->q_core.d_icount)); - ASSERT(dqp->q_rtb.reserved >= - be64_to_cpu(dqp->q_core.d_rtbcount)); + ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); + ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); + ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); } } } @@ -675,7 +667,7 @@ xfs_trans_dqresv( /* * Change the reservation, but not the actual usage. - * Note that q_blk.reserved = q_core.d_bcount + resv + * Note that q_blk.reserved = q_blk.count + resv */ (*resbcountp) += (xfs_qcnt_t)nblks; if (ninos != 0) @@ -700,9 +692,9 @@ xfs_trans_dqresv( XFS_TRANS_DQ_RES_INOS, ninos); } - ASSERT(dqp->q_blk.reserved >= be64_to_cpu(dqp->q_core.d_bcount)); - ASSERT(dqp->q_rtb.reserved >= be64_to_cpu(dqp->q_core.d_rtbcount)); - ASSERT(dqp->q_ino.reserved >= be64_to_cpu(dqp->q_core.d_icount)); + ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); + ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); + ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); xfs_dqunlock(dqp); return 0; |