summaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2021-03-01 04:58:43 -0500
committerMike Snitzer <snitzer@redhat.com>2021-03-26 14:53:41 -0400
commit8615cb65bd638ba5f9ebe71115cc5956eb1713d0 (patch)
tree93d0b0910d51db6dfc049ab7a1ce9700a3083d08 /drivers/md/dm.c
parentc40819f267f76e69418d3bc9fbb57962a6845673 (diff)
dm: remove useless loop in __split_and_process_bio
Remove useless "while" loop. If the condition ci.sector_count && !error is true, we go to a branch that ends with "break". If this condition is false, the "while" loop will not be executed again. So, the loop can't be executed more than once. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c61
1 files changed, 29 insertions, 32 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3f3be9408afa..1dac75cb55ab 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1641,38 +1641,35 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- while (ci.sector_count && !error) {
- error = __split_and_process_non_flush(&ci);
- if (ci.sector_count && !error) {
- /*
- * Remainder must be passed to submit_bio_noacct()
- * so that it gets handled *after* bios already submitted
- * have been completely processed.
- * We take a clone of the original to store in
- * ci.io->orig_bio to be used by end_io_acct() and
- * for dec_pending to use for completion handling.
- */
- struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
- GFP_NOIO, &md->queue->bio_split);
- ci.io->orig_bio = b;
-
- /*
- * Adjust IO stats for each split, otherwise upon queue
- * reentry there will be redundant IO accounting.
- * NOTE: this is a stop-gap fix, a proper fix involves
- * significant refactoring of DM core's bio splitting
- * (by eliminating DM's splitting and just using bio_split)
- */
- part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0,
- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
- part_stat_unlock();
-
- bio_chain(b, bio);
- trace_block_split(b, bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
- break;
- }
+ error = __split_and_process_non_flush(&ci);
+ if (ci.sector_count && !error) {
+ /*
+ * Remainder must be passed to submit_bio_noacct()
+ * so that it gets handled *after* bios already submitted
+ * have been completely processed.
+ * We take a clone of the original to store in
+ * ci.io->orig_bio to be used by end_io_acct() and
+ * for dec_pending to use for completion handling.
+ */
+ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+ GFP_NOIO, &md->queue->bio_split);
+ ci.io->orig_bio = b;
+
+ /*
+ * Adjust IO stats for each split, otherwise upon queue
+ * reentry there will be redundant IO accounting.
+ * NOTE: this is a stop-gap fix, a proper fix involves
+ * significant refactoring of DM core's bio splitting
+ * (by eliminating DM's splitting and just using bio_split)
+ */
+ part_stat_lock();
+ __dm_part_stat_sub(dm_disk(md)->part0,
+ sectors[op_stat_group(bio_op(bio))], ci.sector_count);
+ part_stat_unlock();
+
+ bio_chain(b, bio);
+ trace_block_split(b, bio->bi_iter.bi_sector);
+ ret = submit_bio_noacct(bio);
}
}