summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub/trace.h
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2024-02-22 12:30:48 -0800
committerDarrick J. Wong <djwong@kernel.org>2024-02-22 12:30:48 -0800
commit82334a79c6eb1c18b4b38285cf2693bbc5db3933 (patch)
tree29626f6f3ccb602bb9a06e7fbeb7ee642161d2fc /fs/xfs/scrub/trace.h
parenta7a686cb07203fc42a38e66324241b7f2fe4fae2 (diff)
xfs: iscan batching should handle unallocated inodes too
The inode scanner tries to reduce contention on the AGI header buffer lock by grabbing references to consecutive allocated inodes. Batching stops as soon as we encounter an unallocated inode. This is unfortunate because in the worst case performance collapses to the old "one at a time" behavior if every other inode is free. This is correct behavior, but we could do better. Unallocated inodes by definition have nothing to scan, which means the iscan can ignore them as long as someone ensures that the scan data will reflect another thread allocating the inode and adding interesting metadata to that inode. That mechanism is, of course, the live update hooks. Therefore, extend the batching mechanism to track unallocated inodes adjacent to the scan cursor. The _want_live_update predicate can tell the caller's live update hook to incorporate all live updates to what the scanner thinks is an unallocated inode if (after dropping the AGI) some other thread allocates one of those inodes and begins using it. Note that we cannot just copy the ir_free bitmap into the scan cursor because the batching stops if iget says the inode is in an intermediate state (e.g. on the inactivation list) and cannot be igrabbed. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/scrub/trace.h')
-rw-r--r--fs/xfs/scrub/trace.h21
1 files changed, 17 insertions, 4 deletions
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 38d3356466cd..829c90da59c7 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -1172,6 +1172,7 @@ DEFINE_EVENT(xchk_iscan_class, name, \
TP_ARGS(iscan))
DEFINE_ISCAN_EVENT(xchk_iscan_move_cursor);
DEFINE_ISCAN_EVENT(xchk_iscan_visit);
+DEFINE_ISCAN_EVENT(xchk_iscan_skip);
DEFINE_ISCAN_EVENT(xchk_iscan_advance_ag);
DECLARE_EVENT_CLASS(xchk_iscan_ino_class,
@@ -1229,25 +1230,37 @@ TRACE_EVENT(xchk_iscan_iget,
TRACE_EVENT(xchk_iscan_iget_batch,
TP_PROTO(struct xfs_mount *mp, struct xchk_iscan *iscan,
- unsigned int nr),
- TP_ARGS(mp, iscan, nr),
+ unsigned int nr, unsigned int avail),
+ TP_ARGS(mp, iscan, nr, avail),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, cursor)
__field(xfs_ino_t, visited)
__field(unsigned int, nr)
+ __field(unsigned int, avail)
+ __field(unsigned int, unavail)
+ __field(xfs_ino_t, batch_ino)
+ __field(unsigned long long, skipmask)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->cursor = iscan->cursor_ino;
__entry->visited = iscan->__visited_ino;
__entry->nr = nr;
+ __entry->avail = avail;
+ __entry->unavail = hweight64(iscan->__skipped_inomask);
+ __entry->batch_ino = iscan->__batch_ino;
+ __entry->skipmask = iscan->__skipped_inomask;
),
- TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx nr %d",
+ TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx batchino 0x%llx skipmask 0x%llx nr %u avail %u unavail %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->cursor,
__entry->visited,
- __entry->nr)
+ __entry->batch_ino,
+ __entry->skipmask,
+ __entry->nr,
+ __entry->avail,
+ __entry->unavail)
);
TRACE_EVENT(xchk_iscan_iget_retry_wait,