diff options
author | Stephen Bates <sbates@raithlin.com> | 2017-04-20 15:29:16 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-04-20 15:29:16 -0600 |
commit | a37244e4cc5766af68004ad6249fcd0e4694b441 (patch) | |
tree | 6c5cf6437a62483f2be5f5c924fbfecc7b6d3c3d | |
parent | caf7df12272118e0274c8353bcfeaf60c7743a47 (diff) |
blk-stat: convert blk-stat bucket callback to signed
In order to allow for filtering of IO based on some other properties
of the request than direction we allow the bucket function to return
an int.
If the bucket callback returns a negative do no count it in the stats
accumulation.
Signed-off-by: Stephen Bates <sbates@raithlin.com>
Fixed up Kyber scheduler stat callback.
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-stat.c | 6 | ||||
-rw-r--r-- | block/blk-stat.h | 9 | ||||
-rw-r--r-- | block/kyber-iosched.c | 2 |
3 files changed, 10 insertions, 7 deletions
diff --git a/block/blk-stat.c b/block/blk-stat.c index e77ec52f5bb5..dde9d399f707 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -19,7 +19,7 @@ struct blk_queue_stats { bool enable_accounting; }; -unsigned int blk_stat_rq_ddir(const struct request *rq) +int blk_stat_rq_ddir(const struct request *rq) { return rq_data_dir(rq); } @@ -104,6 +104,8 @@ void blk_stat_add(struct request *rq) list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { if (blk_stat_is_active(cb)) { bucket = cb->bucket_fn(rq); + if (bucket < 0) + continue; stat = &this_cpu_ptr(cb->cpu_stat)[bucket]; __blk_stat_add(stat, value); } @@ -135,7 +137,7 @@ static void blk_stat_timer_fn(unsigned long data) struct blk_stat_callback * blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), - unsigned int (*bucket_fn)(const struct request *), + int (*bucket_fn)(const struct request *), unsigned int buckets, void *data) { struct blk_stat_callback *cb; diff --git a/block/blk-stat.h b/block/blk-stat.h index 53f08a63bf15..622a62ce6213 100644 --- a/block/blk-stat.h +++ b/block/blk-stat.h @@ -48,9 +48,10 @@ struct blk_stat_callback { /** * @bucket_fn: Given a request, returns which statistics bucket it - * should be accounted under. + * should be accounted under. Return -1 for no bucket for this + * request. */ - unsigned int (*bucket_fn)(const struct request *); + int (*bucket_fn)(const struct request *); /** * @buckets: Number of statistics buckets. @@ -120,7 +121,7 @@ void blk_stat_enable_accounting(struct request_queue *q); * * Return: Data direction of the request, either READ or WRITE. */ -unsigned int blk_stat_rq_ddir(const struct request *rq); +int blk_stat_rq_ddir(const struct request *rq); /** * blk_stat_alloc_callback() - Allocate a block statistics callback. @@ -135,7 +136,7 @@ unsigned int blk_stat_rq_ddir(const struct request *rq); */ struct blk_stat_callback * blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), - unsigned int (*bucket_fn)(const struct request *), + int (*bucket_fn)(const struct request *), unsigned int buckets, void *data); /** diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index fe4af5b97c0e..3b0090bc5dd1 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -102,7 +102,7 @@ struct kyber_hctx_data { atomic_t wait_index[KYBER_NUM_DOMAINS]; }; -static unsigned int rq_sched_domain(const struct request *rq) +static int rq_sched_domain(const struct request *rq) { unsigned int op = rq->cmd_flags; |