diff options
author | Mike Snitzer <snitzer@redhat.com> | 2015-06-26 09:42:57 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2015-06-26 10:11:07 -0400 |
commit | 4e6e36c3714364b65f2bfea8c73691c663891726 (patch) | |
tree | e5fd2763c7d873bf70457e447922060dd8446223 | |
parent | e262f34741522e0d821642e5449c6eeb512723fc (diff) |
Revert "dm: do not allocate any mempools for blk-mq request-based DM"
This reverts commit cbc4e3c1350beb47beab8f34ad9be3d34a20c705.
Reported-by: Junichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-table.c | 4 | ||||
-rw-r--r-- | drivers/md/dm.c | 69 |
2 files changed, 33 insertions, 40 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 85e1d39e9a38..a5f94125ad01 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -964,8 +964,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * return -EINVAL; } - if (IS_ERR(t->mempools)) - return PTR_ERR(t->mempools); + if (!t->mempools) + return -ENOMEM; return 0; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 90dc49e3c78f..492181e16c69 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2349,52 +2349,39 @@ static void free_dev(struct mapped_device *md) kfree(md); } -static unsigned filter_md_type(unsigned type, struct mapped_device *md) -{ - if (type == DM_TYPE_BIO_BASED) - return type; - - return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; -} - static void __bind_mempools(struct mapped_device *md, struct dm_table *t) { struct dm_md_mempools *p = dm_table_get_md_mempools(t); - switch (filter_md_type(dm_table_get_type(t), md)) { - case DM_TYPE_BIO_BASED: - if (md->bs && md->io_pool) { + if (md->bs) { + /* The md already has necessary mempools. */ + if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { /* - * This bio-based md already has necessary mempools. * Reload bioset because front_pad may have changed * because a different table was loaded. */ bioset_free(md->bs); md->bs = p->bs; p->bs = NULL; - goto out; } - break; - case DM_TYPE_REQUEST_BASED: - if (md->rq_pool && md->io_pool) - /* - * This request-based md already has necessary mempools. - */ - goto out; - break; - case DM_TYPE_MQ_REQUEST_BASED: - BUG_ON(p); /* No mempools needed */ - return; + /* + * There's no need to reload with request-based dm + * because the size of front_pad doesn't change. + * Note for future: If you are to reload bioset, + * prep-ed requests in the queue may refer + * to bio from the old bioset, so you must walk + * through the queue to unprep. + */ + goto out; } - BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); - md->io_pool = p->io_pool; p->io_pool = NULL; md->rq_pool = p->rq_pool; p->rq_pool = NULL; md->bs = p->bs; p->bs = NULL; + out: /* mempool bind completed, no longer need any mempools in the table */ dm_table_free_md_mempools(t); @@ -2774,6 +2761,14 @@ out_tag_set: return err; } +static unsigned filter_md_type(unsigned type, struct mapped_device *md) +{ + if (type == DM_TYPE_BIO_BASED) + return type; + + return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; +} + /* * Setup the DM device's queue based on md's type */ @@ -3495,7 +3490,7 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, pools = kzalloc(sizeof(*pools), GFP_KERNEL); if (!pools) - return ERR_PTR(-ENOMEM); + return NULL; front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); @@ -3514,26 +3509,24 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, return pools; out: dm_free_md_mempools(pools); - return ERR_PTR(-ENOMEM); + return NULL; } struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, unsigned type) { - unsigned int pool_size; + unsigned int pool_size = dm_get_reserved_rq_based_ios(); struct dm_md_mempools *pools; - if (filter_md_type(type, md) == DM_TYPE_MQ_REQUEST_BASED) - return NULL; /* No mempools needed */ - - pool_size = dm_get_reserved_rq_based_ios(); pools = kzalloc(sizeof(*pools), GFP_KERNEL); if (!pools) - return ERR_PTR(-ENOMEM); + return NULL; - pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); - if (!pools->rq_pool) - goto out; + if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) { + pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); + if (!pools->rq_pool) + goto out; + } pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache); if (!pools->io_pool) @@ -3542,7 +3535,7 @@ struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, return pools; out: dm_free_md_mempools(pools); - return ERR_PTR(-ENOMEM); + return NULL; } void dm_free_md_mempools(struct dm_md_mempools *pools) |