summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-11-09 11:41:05 -0500
committerMike Snitzer <snitzer@redhat.com>2017-11-10 15:45:05 -0500
commit9768a10dd35c6bca9ea58ae23bd5d5c2500d7005 (patch)
tree7ebf9e28e29a74d0f230b9fc8517584e647aeeda
parent8ee18ede74328906b692403fadb2658cf56f26b3 (diff)
dm cache policy smq: allocate cache blocks in order
Previously, cache blocks were being allocated in reverse order. Fix this by pulling the block off the head of the free list. Shouldn't have any impact on performance or latency but it is more correct to have the cache blocks allocated/mapped in ascending order. This fix will slightly increase the chances of two adjacent oblocks being in adjacent cblocks. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-policy-smq.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 58be846ba5b9..4ab23d0075f6 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -213,6 +213,19 @@ static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
l->nr_elts--;
}
+static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
+{
+ struct entry *e;
+
+ for (e = l_head(es, l); e; e = l_next(es, e))
+ if (!e->sentinel) {
+ l_del(es, l, e);
+ return e;
+ }
+
+ return NULL;
+}
+
static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
{
struct entry *e;
@@ -719,7 +732,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
if (l_empty(&ea->free))
return NULL;
- e = l_pop_tail(ea->es, &ea->free);
+ e = l_pop_head(ea->es, &ea->free);
init_entry(e);
ea->nr_allocated++;