diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-14 15:05:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-14 15:05:38 -0700 |
commit | 4815519ed0af833884ce9c288183bf1ae3cb9caa (patch) | |
tree | 04ab0cdbe903165d7286811da64156dc2a523f73 | |
parent | 6e4dc3d59284ea3bc7c3e40694bce84d988b01af (diff) | |
parent | 681cc5e8667e8579a2da8fa4090c48a2d73fc3bb (diff) |
Merge tag 'for-5.10/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
- Improve DM core's bio splitting to use blk_max_size_offset(). Also
fix bio splitting for bios that were deferred to the worker thread
due to a DM device being suspended.
- Remove DM core's special handling of NVMe devices now that block core
has internalized efficiencies drivers previously needed to be
concerned about (via now removed direct_make_request).
- Fix request-based DM to not bounce through indirect dm_submit_bio;
instead have block core make direct call to blk_mq_submit_bio().
- Various DM core cleanups to simplify and improve code.
- Update DM cryot to not use drivers that set
CRYPTO_ALG_ALLOCATES_MEMORY.
- Fix DM raid's raid1 and raid10 discard limits for the purposes of
linux-stable. But then remove DM raid's discard limits settings now
that MD raid can efficiently handle large discards.
- A couple small cleanups across various targets.
* tag 'for-5.10/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm: fix request-based DM to not bounce through indirect dm_submit_bio
dm: remove special-casing of bio-based immutable singleton target on NVMe
dm: export dm_copy_name_and_uuid
dm: fix comment in __dm_suspend()
dm: fold dm_process_bio() into dm_submit_bio()
dm: fix missing imposition of queue_limits from dm_wq_work() thread
dm snap persistent: simplify area_io()
dm thin metadata: Remove unused local variable when create thin and snap
dm raid: remove unnecessary discard limits for raid10
dm raid: fix discard limits for raid1 and raid10
dm crypt: don't use drivers that have CRYPTO_ALG_ALLOCATES_MEMORY
dm: use dm_table_get_device_name() where appropriate in targets
dm table: make 'struct dm_table' definition accessible to all of DM core
dm: eliminate need for start_io_acct() forward declaration
dm: simplify __process_abnormal_io()
dm: push use of on-stack flush_bio down to __send_empty_flush()
dm: optimize max_io_len() by inlining max_io_len_target_boundary()
dm: push md->immutable_target optimization down to __process_bio()
dm: change max_io_len() to use blk_max_size_offset()
dm table: stack 'chunk_sectors' limit to account for target-specific splitting
-rw-r--r-- | block/blk-mq.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-core.h | 56 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 17 | ||||
-rw-r--r-- | drivers/md/dm-ioctl.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 16 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 9 | ||||
-rw-r--r-- | drivers/md/dm-rq.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 11 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 84 | ||||
-rw-r--r-- | drivers/md/dm-thin-metadata.c | 6 | ||||
-rw-r--r-- | drivers/md/dm.c | 404 | ||||
-rw-r--r-- | drivers/md/dm.h | 3 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-btree.c | 3 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 1 | ||||
-rw-r--r-- | include/uapi/linux/dm-ioctl.h | 4 |
16 files changed, 224 insertions, 397 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index deca157032c2..696450257ac1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2270,7 +2270,6 @@ queue_exit: blk_queue_exit(q); return BLK_QC_T_NONE; } -EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 96c93802ee4d..9644424591da 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -925,7 +925,7 @@ static enum cache_metadata_mode get_cache_mode(struct cache *cache) static const char *cache_device_name(struct cache *cache) { - return dm_device_name(dm_table_get_md(cache->ti->table)); + return dm_table_device_name(cache->ti->table); } static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index c4ef1fceead6..d522093cb39d 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -11,6 +11,7 @@ #include <linux/kthread.h> #include <linux/ktime.h> +#include <linux/genhd.h> #include <linux/blk-mq.h> #include <trace/events/block.h> @@ -25,9 +26,11 @@ struct dm_kobject_holder { }; /* - * DM core internal structure that used directly by dm.c and dm-rq.c - * DM targets must _not_ deference a mapped_device to directly access its members! + * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. + * DM targets must _not_ deference a mapped_device or dm_table to directly + * access their members! */ + struct mapped_device { struct mutex suspend_lock; @@ -119,6 +122,55 @@ void disable_discard(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); +static inline sector_t dm_get_size(struct mapped_device *md) +{ + return get_capacity(md->disk); +} + +static inline struct dm_stats *dm_get_stats(struct mapped_device *md) +{ + return &md->stats; +} + +#define DM_TABLE_MAX_DEPTH 16 + +struct dm_table { + struct mapped_device *md; + enum dm_queue_mode type; + + /* btree table */ + unsigned int depth; + unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ + sector_t *index[DM_TABLE_MAX_DEPTH]; + + unsigned int num_targets; + unsigned int num_allocated; + sector_t *highs; + struct dm_target *targets; + + struct target_type *immutable_target_type; + + bool integrity_supported:1; + bool singleton:1; + unsigned integrity_added:1; + + /* + * Indicates the rw permissions for the new logical + * device. This should be a combination of FMODE_READ + * and FMODE_WRITE. + */ + fmode_t mode; + + /* a list of devices used by this table */ + struct list_head devices; + + /* events get handed up using this callback */ + void (*event_fn)(void *); + void *event_context; + + struct dm_md_mempools *mempools; +}; + static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) { return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 380386c36921..392337f16ecf 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -424,7 +424,8 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); + lmk->hash_tfm = crypto_alloc_shash("md5", 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(lmk->hash_tfm)) { ti->error = "Error initializing LMK hash"; return PTR_ERR(lmk->hash_tfm); @@ -586,7 +587,8 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); + tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(tcw->crc32_tfm)) { ti->error = "Error initializing CRC32 in TCW"; return PTR_ERR(tcw->crc32_tfm); @@ -773,7 +775,8 @@ static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti, struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; int r; - elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); + elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(elephant->tfm)) { r = PTR_ERR(elephant->tfm); elephant->tfm = NULL; @@ -2154,7 +2157,8 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) return -ENOMEM; for (i = 0; i < cc->tfms_count; i++) { - cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); + cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(cc->cipher_tfm.tfms[i])) { err = PTR_ERR(cc->cipher_tfm.tfms[i]); crypt_free_tfms(cc); @@ -2180,7 +2184,8 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode) if (!cc->cipher_tfm.tfms) return -ENOMEM; - cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); + cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, + CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); crypt_free_tfms(cc); @@ -2667,7 +2672,7 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) return -ENOMEM; strncpy(mac_alg, start, end - start); - mac = crypto_alloc_ahash(mac_alg, 0, 0); + mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY); kfree(mac_alg); if (IS_ERR(mac)) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 28122e850ea1..cd0478d44058 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -2044,7 +2044,7 @@ out: return r; } - +EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid); /** * dm_early_create - create a mapped device in early boot. diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index de4da825ade6..bced42f082b0 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -466,10 +466,8 @@ failed: */ #define dm_report_EIO(m) \ do { \ - struct mapped_device *md = dm_table_get_md((m)->ti->table); \ - \ DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \ - dm_device_name(md), \ + dm_table_device_name((m)->ti->table), \ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ dm_noflush_suspending((m)->ti)); \ @@ -736,7 +734,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, { unsigned long flags; bool queue_if_no_path_bit, saved_queue_if_no_path_bit; - const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table)); + const char *dm_dev_name = dm_table_device_name(m->ti->table); DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d", dm_dev_name, __func__, caller, queue_if_no_path, save_old_value); @@ -781,9 +779,9 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, static void queue_if_no_path_timeout_work(struct timer_list *t) { struct multipath *m = from_timer(m, t, nopath_timer); - struct mapped_device *md = dm_table_get_md(m->ti->table); - DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md)); + DMWARN("queue_if_no_path timeout on %s, failing queued IO", + dm_table_device_name(m->ti->table)); queue_if_no_path(m, false, false, __func__); } @@ -1334,7 +1332,7 @@ static int fail_path(struct pgpath *pgpath) goto out; DMWARN("%s: Failing path %s.", - dm_device_name(dm_table_get_md(m->ti->table)), + dm_table_device_name(m->ti->table), pgpath->path.dev->name); pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); @@ -1375,7 +1373,7 @@ static int reinstate_path(struct pgpath *pgpath) goto out; DMWARN("%s: Reinstating path %s.", - dm_device_name(dm_table_get_md(m->ti->table)), + dm_table_device_name(m->ti->table), pgpath->path.dev->name); r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); @@ -1766,7 +1764,7 @@ static void multipath_resume(struct dm_target *ti) } DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d", - dm_device_name(dm_table_get_md(m->ti->table)), __func__, + dm_table_device_name(m->ti->table), __func__, test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 56b723d012ac..9c1f7c4de65b 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3728,15 +3728,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, chunk_size_bytes); blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); - - /* - * RAID1 and RAID10 personalities require bio splitting, - * RAID0/4/5/6 don't and process large discard bios properly. - */ - if (rs_is_raid1(rs) || rs_is_raid10(rs)) { - limits->discard_granularity = chunk_size_bytes; - limits->max_discard_sectors = rs->md.chunk_sectors; - } } static void raid_postsuspend(struct dm_target *ti) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 6d743ff6a314..729a72ec30cc 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -175,7 +175,7 @@ static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long mse void dm_mq_kick_requeue_list(struct mapped_device *md) { - __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); + __dm_mq_kick_requeue_list(md->queue, 0); } EXPORT_SYMBOL(dm_mq_kick_requeue_list); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 63fab7c769be..8e329c3f3a78 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -284,16 +284,9 @@ static void skip_metadata(struct pstore *ps) */ static int area_io(struct pstore *ps, int op, int op_flags) { - int r; - chunk_t chunk; - - chunk = area_location(ps, ps->current_area); - - r = chunk_io(ps, ps->area, chunk, op, op_flags, 0); - if (r) - return r; + chunk_t chunk = area_location(ps, ps->current_area); - return 0; + return chunk_io(ps, ps->area, chunk, op, op_flags, 0); } static void zero_memory_area(struct pstore *ps) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index c3be7cb2570c..ce543b761be7 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -18,54 +18,17 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/atomic.h> +#include <linux/lcm.h> #include <linux/blk-mq.h> #include <linux/mount.h> #include <linux/dax.h> #define DM_MSG_PREFIX "table" -#define MAX_DEPTH 16 #define NODE_SIZE L1_CACHE_BYTES #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) -struct dm_table { - struct mapped_device *md; - enum dm_queue_mode type; - - /* btree table */ - unsigned int depth; - unsigned int counts[MAX_DEPTH]; /* in nodes */ - sector_t *index[MAX_DEPTH]; - - unsigned int num_targets; - unsigned int num_allocated; - sector_t *highs; - struct dm_target *targets; - - struct target_type *immutable_target_type; - - bool integrity_supported:1; - bool singleton:1; - unsigned integrity_added:1; - - /* - * Indicates the rw permissions for the new logical - * device. This should be a combination of FMODE_READ - * and FMODE_WRITE. - */ - fmode_t mode; - - /* a list of devices used by this table */ - struct list_head devices; - - /* events get handed up using this callback */ - void (*event_fn)(void *); - void *event_context; - - struct dm_md_mempools *mempools; -}; - /* * Similar to ceiling(log_size(n)) */ @@ -841,8 +804,7 @@ EXPORT_SYMBOL(dm_consume_args); static bool __table_type_bio_based(enum dm_queue_mode table_type) { return (table_type == DM_TYPE_BIO_BASED || - table_type == DM_TYPE_DAX_BIO_BASED || - table_type == DM_TYPE_NVME_BIO_BASED); + table_type == DM_TYPE_DAX_BIO_BASED); } static bool __table_type_request_based(enum dm_queue_mode table_type) @@ -898,8 +860,6 @@ bool dm_table_supports_dax(struct dm_table *t, return true; } -static bool dm_table_does_not_support_partial_completion(struct dm_table *t); - static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -929,7 +889,6 @@ static int dm_table_determine_type(struct dm_table *t) goto verify_bio_based; } BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); - BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); goto verify_rq_based; } @@ -968,15 +927,6 @@ verify_bio_based: if (dm_table_supports_dax(t, device_supports_dax, &page_size) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { t->type = DM_TYPE_DAX_BIO_BASED; - } else { - /* Check if upgrading to NVMe bio-based is valid or required */ - tgt = dm_table_get_immutable_target(t); - if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) { - t->type = DM_TYPE_NVME_BIO_BASED; - goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */ - } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) { - t->type = DM_TYPE_NVME_BIO_BASED; - } } return 0; } @@ -993,8 +943,7 @@ verify_rq_based: * (e.g. request completion process for partial completion.) */ if (t->num_targets > 1) { - DMERR("%s DM doesn't support multiple targets", - t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based"); + DMERR("request-based DM doesn't support multiple targets"); return -EINVAL; } @@ -1506,6 +1455,10 @@ int dm_calculate_queue_limits(struct dm_table *table, zone_sectors = ti_limits.chunk_sectors; } + /* Stack chunk_sectors if target-specific splitting is required */ + if (ti->max_io_len) + ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len, + ti_limits.chunk_sectors); /* Set I/O hints portion of queue limits */ if (ti->type->io_hints) ti->type->io_hints(ti, &ti_limits); @@ -1684,20 +1637,6 @@ static bool dm_table_all_devices_attribute(struct dm_table *t, return true; } -static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - char b[BDEVNAME_SIZE]; - - /* For now, NVMe devices are the only devices of this class */ - return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); -} - -static bool dm_table_does_not_support_partial_completion(struct dm_table *t) -{ - return dm_table_all_devices_attribute(t, device_no_partial_completion); -} - static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -2080,16 +2019,11 @@ EXPORT_SYMBOL_GPL(dm_table_device_name); void dm_table_run_md_queue_async(struct dm_table *t) { - struct mapped_device *md; - struct request_queue *queue; - if (!dm_table_request_based(t)) return; - md = dm_table_get_md(t); - queue = dm_get_md_queue(md); - if (queue) - blk_mq_run_hw_queues(queue, true); + if (t->md->queue) + blk_mq_run_hw_queues(t->md->queue, true); } EXPORT_SYMBOL(dm_table_run_md_queue_async); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index b461836b6d26..6ebb2127f3e2 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1051,12 +1051,11 @@ static int __create_thin(struct dm_pool_metadata *pmd, int r; dm_block_t dev_root; uint64_t key = dev; - struct disk_device_details details_le; struct dm_thin_device *td; __le64 value; r = dm_btree_lookup(&pmd->details_info, pmd->details_root, - &key, &details_le); + &key, NULL); if (!r) return -EEXIST; @@ -1129,12 +1128,11 @@ static int __create_snap(struct dm_pool_metadata *pmd, dm_block_t origin_root; uint64_t key = origin, dev_key = dev; struct dm_thin_device *td; - struct disk_device_details details_le; __le64 value; /* check this device is unused */ r = dm_btree_lookup(&pmd->details_info, pmd->details_root, - &dev_key, &details_le); + &dev_key, NULL); if (!r) return -EEXIST; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index cd2b3526c07b..c18fc2548518 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w) dm_deferred_remove(); } -sector_t dm_get_size(struct mapped_device *md) -{ - return get_capacity(md->disk); -} - -struct request_queue *dm_get_md_queue(struct mapped_device *md) -{ - return md->queue; -} - -struct dm_stats *dm_get_stats(struct mapped_device *md) -{ - return &md->stats; -} - static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mapped_device *md = bdev->bd_disk->private_data; @@ -591,7 +576,44 @@ out: return r; } -static void start_io_acct(struct dm_io *io); +u64 dm_start_time_ns_from_clone(struct bio *bio) +{ + struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); + struct dm_io *io = tio->io; + + return jiffies_to_nsecs(io->start_time); +} +EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); + +static void start_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + struct bio *bio = io->orig_bio; + + io->start_time = bio_start_io_acct(bio); + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + false, 0, &io->stats_aux); +} + +static void end_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + struct bio *bio = io->orig_bio; + unsigned long duration = jiffies - io->start_time; + + bio_end_io_acct(bio, io->start_time); + + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + true, duration, &io->stats_aux); + + /* nudge anyone waiting on suspend queue */ + if (unlikely(wq_has_sleeper(&md->wait))) + wake_up(&md->wait); +} static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) { @@ -657,45 +679,6 @@ static void free_tio(struct dm_target_io *tio) bio_put(&tio->clone); } -u64 dm_start_time_ns_from_clone(struct bio *bio) -{ - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - struct dm_io *io = tio->io; - - return jiffies_to_nsecs(io->start_time); -} -EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); - -static void start_io_acct(struct dm_io *io) -{ - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - - io->start_time = bio_start_io_acct(bio); - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - false, 0, &io->stats_aux); -} - -static void end_io_acct(struct dm_io *io) -{ - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - unsigned long duration = jiffies - io->start_time; - - bio_end_io_acct(bio, io->start_time); - - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - true, duration, &io->stats_aux); - - /* nudge anyone waiting on suspend queue */ - if (unlikely(wq_has_sleeper(&md->wait))) - wake_up(&md->wait); -} - /* * Add the bio to the list of deferred io. */ @@ -992,7 +975,7 @@ static void clone_endio(struct bio *bio) dm_endio_fn endio = tio->ti->type->end_io; struct bio *orig_bio = io->orig_bio; - if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { + if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_DISCARD && !bio->bi_disk->queue->limits.max_discard_sectors) disable_discard(md); @@ -1041,32 +1024,28 @@ static void clone_endio(struct bio *bio) * Return maximum size of I/O possible at the supplied sector up to the current * target boundary. */ -static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) +static inline sector_t max_io_len_target_boundary(struct dm_target *ti, + sector_t target_offset) { - sector_t target_offset = dm_target_offset(ti, sector); - return ti->len - target_offset; } -static sector_t max_io_len(sector_t sector, struct dm_target *ti) +static sector_t max_io_len(struct dm_target *ti, sector_t sector) { - sector_t len = max_io_len_target_boundary(sector, ti); - sector_t offset, max_len; + sector_t target_offset = dm_target_offset(ti, sector); + sector_t len = max_io_len_target_boundary(ti, target_offset); + sector_t max_len; /* * Does the target need to split even further? + * - q->limits.chunk_sectors reflects ti->max_io_len so + * blk_max_size_offset() provides required splitting. + * - blk_max_size_offset() also respects q->limits.max_sectors */ - if (ti->max_io_len) { - offset = dm_target_offset(ti, sector); - if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) - max_len = sector_div(offset, ti->max_io_len); - else - max_len = offset & (ti->max_io_len - 1); - max_len = ti->max_io_len - max_len; - - if (len > max_len) - len = max_len; - } + max_len = blk_max_size_offset(ti->table->md->queue, + target_offset); + if (len > max_len) + len = max_len; return len; } @@ -1119,7 +1098,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, goto out; if (!ti->type->direct_access) goto out; - len = max_io_len(sector, ti) / PAGE_SECTORS; + len = max_io_len(ti, sector) / PAGE_SECTORS; if (len < 1) goto out; nr_pages = min(len, nr_pages); @@ -1431,6 +1410,17 @@ static int __send_empty_flush(struct clone_info *ci) { unsigned target_nr = 0; struct dm_target *ti; + struct bio flush_bio; + + /* + * Use an on-stack bio for this, it's safe since we don't + * need to reference it after submit. It's just used as + * the basis for the clone(s). + */ + bio_init(&flush_bio, NULL, 0); + flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + ci->bio = &flush_bio; + ci->sector_count = 0; /* * Empty flush uses a statically initialized bio, as the base for @@ -1444,6 +1434,8 @@ static int __send_empty_flush(struct clone_info *ci) BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); + + bio_uninit(ci->bio); return 0; } @@ -1466,28 +1458,6 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, return 0; } -typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); - -static unsigned get_num_discard_bios(struct dm_target *ti) -{ - return ti->num_discard_bios; -} - -static unsigned get_num_secure_erase_bios(struct dm_target *ti) -{ - return ti->num_secure_erase_bios; -} - -static unsigned get_num_write_same_bios(struct dm_target *ti) -{ - return ti->num_write_same_bios; -} - -static unsigned get_num_write_zeroes_bios(struct dm_target *ti) -{ - return ti->num_write_zeroes_bios; -} - static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, unsigned num_bios) { @@ -1502,7 +1472,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * if (!num_bios) return -EOPNOTSUPP; - len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); + len = min_t(sector_t, ci->sector_count, + max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); __send_duplicate_bios(ci, ti, num_bios, &len); @@ -1512,26 +1483,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * return 0; } -static int __send_discard(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); -} - -static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); -} - -static int __send_write_same(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); -} - -static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); -} - static bool is_abnormal_io(struct bio *bio) { bool r = false; @@ -1552,18 +1503,26 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, int *result) { struct bio *bio = ci->bio; + unsigned num_bios = 0; - if (bio_op(bio) == REQ_OP_DISCARD) - *result = __send_discard(ci, ti); - else if (bio_op(bio) == REQ_OP_SECURE_ERASE) - *result = __send_secure_erase(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_SAME) - *result = __send_write_same(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) - *result = __send_write_zeroes(ci, ti); - else + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + num_bios = ti->num_discard_bios; + break; + case REQ_OP_SECURE_ERASE: + num_bios = ti->num_secure_erase_bios; + break; + case REQ_OP_WRITE_SAME: + num_bios = ti->num_write_same_bios; + break; + case REQ_OP_WRITE_ZEROES: + num_bios = ti->num_write_zeroes_bios; + break; + default: return false; + } + *result = __send_changing_extent_only(ci, ti, num_bios); return true; } @@ -1583,7 +1542,7 @@ static int __split_and_process_non_flush(struct clone_info *ci) if (__process_abnormal_io(ci, ti, &r)) return r; - len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); + len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); if (r < 0) @@ -1619,19 +1578,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; - - /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). - */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; error = __send_empty_flush(&ci); - bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else if (op_is_zone_mgmt(bio_op(bio))) { ci.bio = bio; @@ -1680,88 +1627,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, return ret; } -/* - * Optimized variant of __split_and_process_bio that leverages the - * fact that targets that use it do _not_ have a need to split bios. - */ -static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, - struct bio *bio, struct dm_target *ti) -{ - struct clone_info ci; - blk_qc_t ret = BLK_QC_T_NONE; - int error = 0; - - init_clone_info(&ci, md, map, bio); - - if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; - - /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). - */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; - error = __send_empty_flush(&ci); - bio_uninit(ci.bio); - /* dec_pending submits any data associated with flush */ - } else { - struct dm_target_io *tio; - - ci.bio = bio; - ci.sector_count = bio_sectors(bio); - if (__process_abnormal_io(&ci, ti, &error)) - goto out; - - tio = alloc_tio(&ci, ti, 0, GFP_NOIO); - ret = __clone_and_map_simple_bio(&ci, tio, NULL); - } -out: - /* drop the extra reference count */ - dec_pending(ci.io, errno_to_blk_status(error)); - return ret; -} - -static blk_qc_t dm_process_bio(struct mapped_device *md, - struct dm_table *map, struct bio *bio) -{ - blk_qc_t ret = BLK_QC_T_NONE; - struct dm_target *ti = md->immutable_target; - - if (unlikely(!map)) { - bio_io_error(bio); - return ret; - } - - if (!ti) { - ti = dm_table_find_target(map, bio->bi_iter.bi_sector); - if (unlikely(!ti)) { - bio_io_error(bio); - return ret; - } - } - - /* - * If in ->submit_bio we need to use blk_queue_split(), otherwise - * queue_limits for abnormal requests (e.g. discard, writesame, etc) - * won't be imposed. - * If called from dm_wq_work() for deferred bio processing, bio - * was already handled by following code with previous ->submit_bio. - */ - if (current->bio_list) { - if (is_abnormal_io(bio)) - blk_queue_split(&bio); - /* regular IO is split by __split_and_process_bio */ - } - - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - return __process_bio(md, map, bio, ti); - return __split_and_process_bio(md, map, bio); -} - static blk_qc_t dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_disk->private_data; @@ -1769,35 +1634,34 @@ static blk_qc_t dm_submit_bio(struct bio *bio) int srcu_idx; struct dm_table *map; - if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { - /* - * We are called with a live reference on q_usage_counter, but - * that one will be released as soon as we return. Grab an - * extra one as blk_mq_submit_bio expects to be able to consume - * a reference (which lives until the request is freed in case a - * request is allocated). - */ - percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); - return blk_mq_submit_bio(bio); - } - map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); + bio_io_error(bio); + goto out; + } - /* if we're suspended, we have to queue this io for later */ + /* If suspended, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { - dm_put_live_table(md, srcu_idx); - if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); - else if (!(bio->bi_opf & REQ_RAHEAD)) - queue_io(md, bio); - else + else if (bio->bi_opf & REQ_RAHEAD) bio_io_error(bio); - return ret; + else + queue_io(md, bio); + goto out; } - ret = dm_process_bio(md, map, bio); + /* + * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) + * otherwise associated queue_limits won't be imposed. + */ + if (is_abnormal_io(bio)) + blk_queue_split(&bio); + ret = __split_and_process_bio(md, map, bio); +out: dm_put_live_table(md, srcu_idx); return ret; } @@ -1852,6 +1716,7 @@ static int next_free_minor(int *minor) } static const struct block_device_operations dm_blk_dops; +static const struct block_device_operations dm_rq_blk_dops; static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); @@ -2121,12 +1986,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, if (request_based) dm_stop_queue(q); - if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { + if (request_based) { /* - * Leverage the fact that request-based DM targets and - * NVMe bio based targets are immutable singletons - * - used to optimize both dm_request_fn and dm_mq_queue_rq; - * and __process_bio. + * Leverage the fact that request-based DM targets are + * immutable singletons - used to optimize dm_mq_queue_rq. */ md->immutable_target = dm_table_get_immutable_target(t); } @@ -2240,15 +2103,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) switch (type) { case DM_TYPE_REQUEST_BASED: + md->disk->fops = &dm_rq_blk_dops; r = dm_mq_init_request_queue(md, t); if (r) { - DMERR("Cannot initialize queue for request-based dm-mq mapped device"); + DMERR("Cannot initialize queue for request-based dm mapped device"); return r; } break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2453,29 +2316,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) */ static void dm_wq_work(struct work_struct *work) { - struct mapped_device *md = container_of(work, struct mapped_device, - work); - struct bio *c; - int srcu_idx; - struct dm_table *map; - - map = dm_get_live_table(md, &srcu_idx); + struct mapped_device *md = container_of(work, struct mapped_device, work); + struct bio *bio; while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { spin_lock_irq(&md->deferred_lock); - c = bio_list_pop(&md->deferred); + bio = bio_list_pop(&md->deferred); spin_unlock_irq(&md->deferred_lock); - if (!c) + if (!bio) break; - if (dm_request_based(md)) - (void) submit_bio_noacct(c); - else - (void) dm_process_bio(md, map, c); + submit_bio_noacct(bio); } - - dm_put_live_table(md, srcu_idx); } static void dm_queue_flush(struct mapped_device *md) @@ -2612,13 +2465,12 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, /* * Here we must make sure that no processes are submitting requests * to target drivers i.e. no one may be executing - * __split_and_process_bio. This is called from dm_request and - * dm_wq_work. + * __split_and_process_bio from dm_submit_bio. * - * To get all processes out of __split_and_process_bio in dm_request, + * To get all processes out of __split_and_process_bio in dm_submit_bio, * we take the write lock. To prevent any process from reentering - * __split_and_process_bio from dm_request and quiesce the thread - * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call + * __split_and_process_bio from dm_submit_bio and quiesce the thread + * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call * flush_workqueue(md->wq). */ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); @@ -2986,19 +2838,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md) int dm_suspended(struct dm_target *ti) { - return dm_suspended_md(dm_table_get_md(ti->table)); + return dm_suspended_md(ti->table->md); } EXPORT_SYMBOL_GPL(dm_suspended); int dm_post_suspending(struct dm_target *ti) { - return dm_post_suspending_md(dm_table_get_md(ti->table)); + return dm_post_suspending_md(ti->table->md); } EXPORT_SYMBOL_GPL(dm_post_suspending); int dm_noflush_suspending(struct dm_target *ti) { - return __noflush_suspending(dm_table_get_md(ti->table)); + return __noflush_suspending(ti->table->md); } EXPORT_SYMBOL_GPL(dm_noflush_suspending); @@ -3017,7 +2869,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu switch (type) { case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); @@ -3235,6 +3086,15 @@ static const struct block_device_operations dm_blk_dops = { .owner = THIS_MODULE }; +static const struct block_device_operations dm_rq_blk_dops = { + .open = dm_blk_open, + .release = dm_blk_close, + .ioctl = dm_blk_ioctl, + .getgeo = dm_blk_getgeo, + .pr_ops = &dm_pr_ops, + .owner = THIS_MODULE +}; + static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .dax_supported = dm_dax_supported, diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 4f5fe664d05a..fffe1e289c53 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -179,12 +179,9 @@ int dm_open_count(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); int dm_cancel_deferred_remove(struct mapped_device *md); int dm_request_based(struct mapped_device *md); -sector_t dm_get_size(struct mapped_device *md); -struct request_queue *dm_get_md_queue(struct mapped_device *md); int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, struct dm_dev **result); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); -struct dm_stats *dm_get_stats(struct mapped_device *md); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie); diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 8aae0624a297..ef6e78d45d5b 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -366,7 +366,8 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, } while (!(flags & LEAF_NODE)); *result_key = le64_to_cpu(ro_node(s)->keys[i]); - memcpy(v, value_ptr(ro_node(s), i), value_size); + if (v) + memcpy(v, value_ptr(ro_node(s), i), value_size); return 0; } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index d6f8d4ba8d48..61a66fb8ebb3 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -29,7 +29,6 @@ enum dm_queue_mode { DM_TYPE_BIO_BASED = 1, DM_TYPE_REQUEST_BASED = 2, DM_TYPE_DAX_BIO_BASED = 3, - DM_TYPE_NVME_BIO_BASED = 4, }; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 6622912c2342..4933b6b67b85 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -272,9 +272,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 42 +#define DM_VERSION_MINOR 43 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2020-02-27)" +#define DM_VERSION_EXTRA "-ioctl (2020-10-01)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |