summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-22 11:18:09 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commitf421950f86bf96a11fef932e167ab2e70d4c43a0 (patch)
treea2b62b942b023e37b6aae39891c2b314d8d8a3fb /fs/btrfs/extent_io.c
parenta61e6f29dc7c9d56a776a518eed92bbc61848263 (diff)
Btrfs: Fix some data=ordered related data corruptions
Stress testing was showing data checksum errors, most of which were caused by a lookup bug in the extent_map tree. The tree was caching the last pointer returned, and searches would check the last pointer first. But, search callers also expect the search to return the very first matching extent in the range, which wasn't always true with the last pointer usage. For now, the code to cache the last return value is just removed. It is easy to fix, but I think lookups are rare enough that it isn't required anymore. This commit also replaces do_sync_mapping_range with a local copy of the related functions. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c20
1 files changed, 0 insertions, 20 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7380449cb5b3..9965993748d0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -97,7 +97,6 @@ void extent_io_tree_init(struct extent_io_tree *tree,
spin_lock_init(&tree->lock);
spin_lock_init(&tree->buffer_lock);
tree->mapping = mapping;
- tree->last = NULL;
}
EXPORT_SYMBOL(extent_io_tree_init);
@@ -173,12 +172,6 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
struct tree_entry *entry;
struct tree_entry *prev_entry = NULL;
- if (tree->last) {
- struct extent_state *state;
- state = tree->last;
- if (state->start <= offset && offset <= state->end)
- return &tree->last->rb_node;
- }
while(n) {
entry = rb_entry(n, struct tree_entry, rb_node);
prev = n;
@@ -189,7 +182,6 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
else if (offset > entry->end)
n = n->rb_right;
else {
- tree->last = rb_entry(n, struct extent_state, rb_node);
return n;
}
}
@@ -223,10 +215,6 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree,
ret = __etree_search(tree, offset, &prev, NULL);
if (!ret) {
- if (prev) {
- tree->last = rb_entry(prev, struct extent_state,
- rb_node);
- }
return prev;
}
return ret;
@@ -301,8 +289,6 @@ static int merge_state(struct extent_io_tree *tree,
other->state == state->state) {
state->start = other->start;
other->tree = NULL;
- if (tree->last == other)
- tree->last = state;
rb_erase(&other->rb_node, &tree->state);
free_extent_state(other);
}
@@ -314,8 +300,6 @@ static int merge_state(struct extent_io_tree *tree,
other->state == state->state) {
other->start = state->start;
state->tree = NULL;
- if (tree->last == state)
- tree->last = other;
rb_erase(&state->rb_node, &tree->state);
free_extent_state(state);
}
@@ -378,7 +362,6 @@ static int insert_state(struct extent_io_tree *tree,
return -EEXIST;
}
state->tree = tree;
- tree->last = state;
merge_state(tree, state);
return 0;
}
@@ -444,9 +427,6 @@ static int clear_state_bit(struct extent_io_tree *tree,
if (delete || state->state == 0) {
if (state->tree) {
clear_state_cb(tree, state, state->state);
- if (tree->last == state) {
- tree->last = extent_state_next(state);
- }
rb_erase(&state->rb_node, &tree->state);
state->tree = NULL;
free_extent_state(state);