diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/super.c | 27 | ||||
-rw-r--r-- | drivers/md/dm-bufio.c | 26 | ||||
-rw-r--r-- | drivers/md/dm-ebs-target.c | 15 | ||||
-rw-r--r-- | drivers/md/dm-flakey.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-integrity.c | 76 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 38 | ||||
-rw-r--r-- | drivers/md/dm-kcopyd.c | 26 | ||||
-rw-r--r-- | drivers/md/dm-log.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 25 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-writecache.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-zone.c | 88 | ||||
-rw-r--r-- | drivers/md/dm-zoned-metadata.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-zoned-target.c | 25 | ||||
-rw-r--r-- | drivers/md/dm-zoned.h | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 33 | ||||
-rw-r--r-- | drivers/md/md-bitmap.c | 6 | ||||
-rw-r--r-- | drivers/md/md.c | 16 | ||||
-rw-r--r-- | drivers/md/md.h | 3 | ||||
-rw-r--r-- | drivers/md/raid1.c | 14 | ||||
-rw-r--r-- | drivers/md/raid10.c | 22 | ||||
-rw-r--r-- | drivers/md/raid5-cache.c | 12 | ||||
-rw-r--r-- | drivers/md/raid5-ppl.c | 12 | ||||
-rw-r--r-- | drivers/md/raid5.c | 3 |
26 files changed, 254 insertions, 268 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3563d15dbaf2..ba3909bb6bea 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -414,8 +414,8 @@ static void uuid_io_unlock(struct closure *cl) up(&c->uuid_write_mutex); } -static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, - struct bkey *k, struct closure *parent) +static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k, + struct closure *parent) { struct closure *cl = &c->uuid_write; struct uuid_entry *u; @@ -429,22 +429,22 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, for (i = 0; i < KEY_PTRS(k); i++) { struct bio *bio = bch_bbio_alloc(c); - bio->bi_opf = REQ_SYNC | REQ_META | op_flags; + bio->bi_opf = opf | REQ_SYNC | REQ_META; bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_end_io = uuid_endio; bio->bi_private = cl; - bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, c->uuids); bch_submit_bbio(bio, c, k, i); - if (op != REQ_OP_WRITE) + if ((opf & REQ_OP_MASK) != REQ_OP_WRITE) break; } bch_extent_to_text(buf, sizeof(buf), k); - pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); + pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ? + "wrote" : "read", buf); for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) if (!bch_is_zero(u->uuid, 16)) @@ -463,7 +463,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) return "bad uuid pointer"; bkey_copy(&c->uuid_bucket, k); - uuid_io(c, REQ_OP_READ, 0, k, cl); + uuid_io(c, REQ_OP_READ, k, cl); if (j->version < BCACHE_JSET_VERSION_UUIDv1) { struct uuid_entry_v0 *u0 = (void *) c->uuids; @@ -511,7 +511,7 @@ static int __uuid_write(struct cache_set *c) size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS; SET_KEY_SIZE(&k.key, size); - uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); + uuid_io(c, REQ_OP_WRITE, &k.key, &cl); closure_sync(&cl); /* Only one bucket used for uuid write */ @@ -587,8 +587,7 @@ static void prio_endio(struct bio *bio) closure_put(&ca->prio); } -static void prio_io(struct cache *ca, uint64_t bucket, int op, - unsigned long op_flags) +static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf) { struct closure *cl = &ca->prio; struct bio *bio = bch_bbio_alloc(ca->set); @@ -601,7 +600,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op, bio->bi_end_io = prio_endio; bio->bi_private = ca; - bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); + bio->bi_opf = opf | REQ_SYNC | REQ_META; bch_bio_map(bio, ca->disk_buckets); closure_bio_submit(ca->set, bio, &ca->prio); @@ -661,7 +660,7 @@ int bch_prio_write(struct cache *ca, bool wait) BUG_ON(bucket == -1); mutex_unlock(&ca->set->bucket_lock); - prio_io(ca, bucket, REQ_OP_WRITE, 0); + prio_io(ca, bucket, REQ_OP_WRITE); mutex_lock(&ca->set->bucket_lock); ca->prio_buckets[i] = bucket; @@ -705,7 +704,7 @@ static int prio_read(struct cache *ca, uint64_t bucket) ca->prio_last_buckets[bucket_nr] = bucket; bucket_nr++; - prio_io(ca, bucket, REQ_OP_READ, 0); + prio_io(ca, bucket, REQ_OP_READ); if (p->csum != bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { @@ -884,7 +883,7 @@ static void bcache_device_free(struct bcache_device *d) if (disk) { ida_simple_remove(&bcache_device_idx, first_minor_to_idx(disk->first_minor)); - blk_cleanup_disk(disk); + put_disk(disk); } bioset_exit(&d->bio_split); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 5ffa1dcf84cf..dc01ce33265b 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -577,13 +577,12 @@ static void dmio_complete(unsigned long error, void *context) b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); } -static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, +static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, unsigned n_sectors, unsigned offset) { int r; struct dm_io_request io_req = { - .bi_op = rw, - .bi_op_flags = 0, + .bi_opf = op, .notify.fn = dmio_complete, .notify.context = b, .client = b->c->dm_io, @@ -616,7 +615,7 @@ static void bio_complete(struct bio *bio) b->end_io(b, status); } -static void use_bio(struct dm_buffer *b, int rw, sector_t sector, +static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, unsigned n_sectors, unsigned offset) { struct bio *bio; @@ -630,10 +629,10 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector, bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); if (!bio) { dmio: - use_dmio(b, rw, sector, n_sectors, offset); + use_dmio(b, op, sector, n_sectors, offset); return; } - bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw); + bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op); bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_complete; bio->bi_private = b; @@ -669,7 +668,8 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block return sector; } -static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t)) +static void submit_io(struct dm_buffer *b, enum req_op op, + void (*end_io)(struct dm_buffer *, blk_status_t)) { unsigned n_sectors; sector_t sector; @@ -679,7 +679,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff sector = block_to_sector(b->c, b->block); - if (rw != REQ_OP_WRITE) { + if (op != REQ_OP_WRITE) { n_sectors = b->c->block_size >> SECTOR_SHIFT; offset = 0; } else { @@ -698,9 +698,9 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff } if (b->data_mode != DATA_MODE_VMALLOC) - use_bio(b, rw, sector, n_sectors, offset); + use_bio(b, op, sector, n_sectors, offset); else - use_dmio(b, rw, sector, n_sectors, offset); + use_dmio(b, op, sector, n_sectors, offset); } /*---------------------------------------------------------------- @@ -1341,8 +1341,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); int dm_bufio_issue_flush(struct dm_bufio_client *c) { struct dm_io_request io_req = { - .bi_op = REQ_OP_WRITE, - .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = c->dm_io, @@ -1365,8 +1364,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) { struct dm_io_request io_req = { - .bi_op = REQ_OP_DISCARD, - .bi_op_flags = REQ_SYNC, + .bi_opf = REQ_OP_DISCARD | REQ_SYNC, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = c->dm_io, diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index 0221fa63f888..223e8e1a7a13 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -61,7 +61,8 @@ static inline bool __ebs_check_bs(unsigned int bs) * * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages. */ -static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter) +static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, + struct bvec_iter *iter) { int r = 0; unsigned char *ba, *pa; @@ -81,7 +82,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len); /* Avoid reading for writes in case bio vector's page overwrites block completely. */ - if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) + if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) ba = dm_bufio_read(ec->bufio, block, &b); else ba = dm_bufio_new(ec->bufio, block, &b); @@ -95,7 +96,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv } else { /* Copy data to/from bio to buffer if read/new was successful above. */ ba += buf_off; - if (rw == READ) { + if (op == REQ_OP_READ) { memcpy(pa, ba, cur_len); flush_dcache_page(bv->bv_page); } else { @@ -117,14 +118,14 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv } /* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */ -static int __ebs_rw_bio(struct ebs_c *ec, int rw, struct bio *bio) +static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio) { int r = 0, rr; struct bio_vec bv; struct bvec_iter iter; bio_for_each_bvec(bv, bio, iter) { - rr = __ebs_rw_bvec(ec, rw, &bv, &iter); + rr = __ebs_rw_bvec(ec, op, &bv, &iter); if (rr) r = rr; } @@ -205,10 +206,10 @@ static void __ebs_process_bios(struct work_struct *ws) bio_list_for_each(bio, &bios) { r = -EIO; if (bio_op(bio) == REQ_OP_READ) - r = __ebs_rw_bio(ec, READ, bio); + r = __ebs_rw_bio(ec, REQ_OP_READ, bio); else if (bio_op(bio) == REQ_OP_WRITE) { write = true; - r = __ebs_rw_bio(ec, WRITE, bio); + r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio); } else if (bio_op(bio) == REQ_OP_DISCARD) { __ebs_forget_bio(ec, bio); r = __ebs_discard_bio(ec, bio); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index f2305eb758a2..89fa7a68c6c4 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -32,7 +32,7 @@ struct flakey_c { unsigned corrupt_bio_byte; unsigned corrupt_bio_rw; unsigned corrupt_bio_value; - unsigned corrupt_bio_flags; + blk_opf_t corrupt_bio_flags; }; enum feature_flag_bits { @@ -145,7 +145,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, /* * Only corrupt bios with these flags set. */ - r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); + BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) != + sizeof(unsigned int)); + r = dm_read_arg(_args + 3, as, + (__force unsigned *)&fc->corrupt_bio_flags, + &ti->error); if (r) return r; argc--; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 3d5a0ce123c9..c60f9b2ece2d 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -298,7 +298,7 @@ struct dm_integrity_io { struct work_struct work; struct dm_integrity_c *ic; - enum req_opf op; + enum req_op op; bool fua; struct dm_integrity_range range; @@ -551,14 +551,14 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr) return 0; } -static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) +static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) { struct dm_io_request io_req; struct dm_io_region io_loc; + const enum req_op op = opf & REQ_OP_MASK; int r; - io_req.bi_op = op; - io_req.bi_op_flags = op_flags; + io_req.bi_opf = opf; io_req.mem.type = DM_IO_KMEM; io_req.mem.ptr.addr = ic->sb; io_req.notify.fn = NULL; @@ -1050,8 +1050,9 @@ static void complete_journal_io(unsigned long error, void *context) complete_journal_op(comp); } -static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, - unsigned sector, unsigned n_sectors, struct journal_completion *comp) +static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, + unsigned sector, unsigned n_sectors, + struct journal_completion *comp) { struct dm_io_request io_req; struct dm_io_region io_loc; @@ -1067,8 +1068,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); - io_req.bi_op = op; - io_req.bi_op_flags = op_flags; + io_req.bi_opf = opf; io_req.mem.type = DM_IO_PAGE_LIST; if (ic->journal_io) io_req.mem.ptr.pl = &ic->journal_io[pl_index]; @@ -1088,7 +1088,8 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, r = dm_io(&io_req, 1, &io_loc, NULL); if (unlikely(r)) { - dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); + dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? + "reading journal" : "writing journal", r); if (comp) { WARN_ONCE(1, "asynchronous dm_io failed: %d", r); complete_journal_io(-1UL, comp); @@ -1096,15 +1097,16 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, } } -static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, - unsigned n_sections, struct journal_completion *comp) +static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf, + unsigned section, unsigned n_sections, + struct journal_completion *comp) { unsigned sector, n_sectors; sector = section * ic->journal_section_sectors; n_sectors = n_sections * ic->journal_section_sectors; - rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp); + rw_journal_sectors(ic, opf, sector, n_sectors, comp); } static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections) @@ -1129,7 +1131,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi for (i = 0; i < commit_sections; i++) rw_section_mac(ic, commit_start + i, true); } - rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, + rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start, commit_sections, &io_comp); } else { unsigned to_end; @@ -1141,7 +1143,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); if (try_wait_for_completion(&crypt_comp_1.comp)) { - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, + commit_start, to_end, &io_comp); reinit_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); @@ -1152,17 +1155,17 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); wait_for_completion_io(&crypt_comp_1.comp); - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); wait_for_completion_io(&crypt_comp_2.comp); } } else { for (i = 0; i < to_end; i++) rw_section_mac(ic, commit_start + i, true); - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); for (i = 0; i < commit_sections - to_end; i++) rw_section_mac(ic, i, true); } - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp); } wait_for_completion_io(&io_comp.comp); @@ -1188,8 +1191,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); - io_req.bi_op = REQ_OP_WRITE; - io_req.bi_op_flags = 0; + io_req.bi_opf = REQ_OP_WRITE; io_req.mem.type = DM_IO_PAGE_LIST; io_req.mem.ptr.pl = &ic->journal[pl_index]; io_req.mem.offset = pl_offset; @@ -1516,8 +1518,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat if (!ic->meta_dev) flush_data = false; if (flush_data) { - fr.io_req.bi_op = REQ_OP_WRITE, - fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, fr.io_req.mem.type = DM_IO_KMEM, fr.io_req.mem.ptr.addr = NULL, fr.io_req.notify.fn = flush_notify, @@ -2626,7 +2627,7 @@ static void recalc_write_super(struct dm_integrity_c *ic) if (dm_integrity_failed(ic)) return; - r = sync_rw_sb(ic, REQ_OP_WRITE, 0); + r = sync_rw_sb(ic, REQ_OP_WRITE); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); } @@ -2706,8 +2707,7 @@ next_chunk: if (unlikely(dm_integrity_failed(ic))) goto err; - io_req.bi_op = REQ_OP_READ; - io_req.bi_op_flags = 0; + io_req.bi_opf = REQ_OP_READ; io_req.mem.type = DM_IO_VMA; io_req.mem.ptr.addr = ic->recalc_buffer; io_req.notify.fn = NULL; @@ -2800,7 +2800,7 @@ static void bitmap_block_work(struct work_struct *w) if (bio_list_empty(&waiting)) return; - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL); @@ -2846,7 +2846,7 @@ static void bitmap_flush_work(struct work_struct *work) block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); spin_lock_irq(&ic->endio_wait.lock); @@ -2918,7 +2918,7 @@ static void replay_journal(struct dm_integrity_c *ic) if (!ic->just_formatted) { DEBUG_print("reading journal\n"); - rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL); + rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL); if (ic->journal_io) DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); if (ic->journal_io) { @@ -3113,7 +3113,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) /* set to 0 to test bitmap replay code */ init_journal(ic, 0, ic->journal_sections, 0); ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); #endif @@ -3136,23 +3136,23 @@ static void dm_integrity_resume(struct dm_target *ti) if (ic->provided_data_sectors > old_provided_data_sectors && ic->mode == 'B' && ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { - rw_journal_sectors(ic, REQ_OP_READ, 0, 0, + rw_journal_sectors(ic, REQ_OP_READ, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); block_bitmap_op(ic, ic->journal, old_provided_data_sectors, ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET); - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); } ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); } if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { DEBUG_print("resume dirty_bitmap\n"); - rw_journal_sectors(ic, REQ_OP_READ, 0, 0, + rw_journal_sectors(ic, REQ_OP_READ, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); if (ic->mode == 'B') { if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && @@ -3171,7 +3171,7 @@ static void dm_integrity_resume(struct dm_target *ti) block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); ic->sb->recalc_sector = cpu_to_le64(0); @@ -3187,7 +3187,7 @@ static void dm_integrity_resume(struct dm_target *ti) replay_journal(ic); ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); } - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); } else { @@ -3199,7 +3199,7 @@ static void dm_integrity_resume(struct dm_target *ti) if (ic->mode == 'B') { ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); @@ -3215,7 +3215,7 @@ static void dm_integrity_resume(struct dm_target *ti) block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); } - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); } } @@ -4256,7 +4256,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } - r = sync_rw_sb(ic, REQ_OP_READ, 0); + r = sync_rw_sb(ic, REQ_OP_READ); if (r) { ti->error = "Error reading superblock"; goto bad; @@ -4500,7 +4500,7 @@ try_smaller_buffer: ti->error = "Error initializing journal"; goto bad; } - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (r) { ti->error = "Error initializing superblock"; goto bad; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index e4b95eaeec8c..783564533459 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -293,7 +293,7 @@ static void km_dp_init(struct dpages *dp, void *data) /*----------------------------------------------------------------- * IO routines that accept a list of pages. *---------------------------------------------------------------*/ -static void do_region(int op, int op_flags, unsigned region, +static void do_region(const blk_opf_t opf, unsigned region, struct dm_io_region *where, struct dpages *dp, struct io *io) { @@ -306,6 +306,7 @@ static void do_region(int op, int op_flags, unsigned region, struct request_queue *q = bdev_get_queue(where->bdev); sector_t num_sectors; unsigned int special_cmd_max_sectors; + const enum req_op op = opf & REQ_OP_MASK; /* * Reject unsupported discard and write same requests. @@ -339,8 +340,8 @@ static void do_region(int op, int op_flags, unsigned region, (PAGE_SIZE >> SECTOR_SHIFT))); } - bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags, - GFP_NOIO, &io->client->bios); + bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO, + &io->client->bios); bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_end_io = endio; store_io_and_region_in_bio(bio, io, region); @@ -368,7 +369,7 @@ static void do_region(int op, int op_flags, unsigned region, } while (remaining); } -static void dispatch_io(int op, int op_flags, unsigned int num_regions, +static void dispatch_io(blk_opf_t opf, unsigned int num_regions, struct dm_io_region *where, struct dpages *dp, struct io *io, int sync) { @@ -378,7 +379,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions, BUG_ON(num_regions > DM_IO_MAX_REGIONS); if (sync) - op_flags |= REQ_SYNC; + opf |= REQ_SYNC; /* * For multiple regions we need to be careful to rewind @@ -386,8 +387,8 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (op_flags & REQ_PREFLUSH)) - do_region(op, op_flags, i, where + i, dp, io); + if (where[i].count || (opf & REQ_PREFLUSH)) + do_region(opf, i, where + i, dp, io); } /* @@ -411,13 +412,13 @@ static void sync_io_complete(unsigned long error, void *context) } static int sync_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int op, int op_flags, - struct dpages *dp, unsigned long *error_bits) + struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, + unsigned long *error_bits) { struct io *io; struct sync_io sio; - if (num_regions > 1 && !op_is_write(op)) { + if (num_regions > 1 && !op_is_write(opf)) { WARN_ON(1); return -EIO; } @@ -434,7 +435,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(op, op_flags, num_regions, where, dp, io, 1); + dispatch_io(opf, num_regions, where, dp, io, 1); wait_for_completion_io(&sio.wait); @@ -445,12 +446,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, } static int async_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int op, int op_flags, + struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, io_notify_fn fn, void *context) { struct io *io; - if (num_regions > 1 && !op_is_write(op)) { + if (num_regions > 1 && !op_is_write(opf)) { WARN_ON(1); fn(1, context); return -EIO; @@ -466,7 +467,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(op, op_flags, num_regions, where, dp, io, 0); + dispatch_io(opf, num_regions, where, dp, io, 0); return 0; } @@ -489,7 +490,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, case DM_IO_VMA: flush_kernel_vmap_range(io_req->mem.ptr.vma, size); - if (io_req->bi_op == REQ_OP_READ) { + if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) { dp->vma_invalidate_address = io_req->mem.ptr.vma; dp->vma_invalidate_size = size; } @@ -519,11 +520,10 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, if (!io_req->notify.fn) return sync_io(io_req->client, num_regions, where, - io_req->bi_op, io_req->bi_op_flags, &dp, - sync_error_bits); + io_req->bi_opf, &dp, sync_error_bits); - return async_io(io_req->client, num_regions, where, io_req->bi_op, - io_req->bi_op_flags, &dp, io_req->notify.fn, + return async_io(io_req->client, num_regions, where, + io_req->bi_opf, &dp, io_req->notify.fn, io_req->notify.context); } EXPORT_SYMBOL(dm_io); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 37b03ab7e5c9..9c8f3544e99d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -350,9 +350,9 @@ struct kcopyd_job { unsigned long write_err; /* - * Either READ or WRITE + * REQ_OP_READ, REQ_OP_WRITE or REQ_OP_WRITE_ZEROES. */ - int rw; + enum req_op op; struct dm_io_region source; /* @@ -418,7 +418,8 @@ static struct kcopyd_job *pop_io_job(struct list_head *jobs, * constraint and sequential writes that are at the right position. */ list_for_each_entry(job, jobs, list) { - if (job->rw == READ || !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { + if (job->op == REQ_OP_READ || + !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { list_del(&job->list); return job; } @@ -518,7 +519,7 @@ static void complete_io(unsigned long error, void *context) io_job_finish(kc->throttle); if (error) { - if (op_is_write(job->rw)) + if (op_is_write(job->op)) job->write_err |= error; else job->read_err = 1; @@ -530,11 +531,11 @@ static void complete_io(unsigned long error, void *context) } } - if (op_is_write(job->rw)) + if (op_is_write(job->op)) push(&kc->complete_jobs, job); else { - job->rw = WRITE; + job->op = REQ_OP_WRITE; push(&kc->io_jobs, job); } @@ -549,8 +550,7 @@ static int run_io_job(struct kcopyd_job *job) { int r; struct dm_io_request io_req = { - .bi_op = job->rw, - .bi_op_flags = 0, + .bi_opf = job->op, .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, .mem.offset = 0, @@ -571,7 +571,7 @@ static int run_io_job(struct kcopyd_job *job) io_job_start(job->kc->throttle); - if (job->rw == READ) + if (job->op == REQ_OP_READ) r = dm_io(&io_req, 1, &job->source, NULL); else r = dm_io(&io_req, job->num_dests, job->dests, NULL); @@ -614,7 +614,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, if (r < 0) { /* error this rogue job */ - if (op_is_write(job->rw)) + if (op_is_write(job->op)) job->write_err = (unsigned long) -1L; else job->read_err = 1; @@ -817,7 +817,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, if (from) { job->source = *from; job->pages = NULL; - job->rw = READ; + job->op = REQ_OP_READ; } else { memset(&job->source, 0, sizeof job->source); job->source.count = job->dests[0].count; @@ -826,10 +826,10 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, /* * Use WRITE ZEROES to optimize zeroing if all dests support it. */ - job->rw = REQ_OP_WRITE_ZEROES; + job->op = REQ_OP_WRITE_ZEROES; for (i = 0; i < job->num_dests; i++) if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) { - job->rw = WRITE; + job->op = REQ_OP_WRITE; break; } } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 0c6620e7b7bf..cf10fa667797 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -291,10 +291,9 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis core->nr_regions = le64_to_cpu(disk->nr_regions); } -static int rw_header(struct log_c *lc, int op) +static int rw_header(struct log_c *lc, enum req_op op) { - lc->io_req.bi_op = op; - lc->io_req.bi_op_flags = 0; + lc->io_req.bi_opf = op; return dm_io(&lc->io_req, 1, &lc->header_location, NULL); } @@ -307,8 +306,7 @@ static int flush_header(struct log_c *lc) .count = 0, }; - lc->io_req.bi_op = REQ_OP_WRITE; - lc->io_req.bi_op_flags = REQ_PREFLUSH; + lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 80c9f7134e9b..66486b14ec33 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2038,7 +2038,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload) rdev->sb_loaded = 0; - if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) { + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) { DMERR("Failed to read superblock of device at position %d", rdev->raid_disk); md_error(rdev->mddev, rdev); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 8811d484fdd1..06a38dc32025 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -260,8 +260,7 @@ static int mirror_flush(struct dm_target *ti) struct dm_io_region io[MAX_NR_MIRRORS]; struct mirror *m; struct dm_io_request io_req = { - .bi_op = REQ_OP_WRITE, - .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = ms->io_client, @@ -535,8 +534,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio) { struct dm_io_region io; struct dm_io_request io_req = { - .bi_op = REQ_OP_READ, - .bi_op_flags = 0, + .bi_opf = REQ_OP_READ, .mem.type = DM_IO_BIO, .mem.ptr.bio = bio, .notify.fn = read_callback, @@ -648,9 +646,9 @@ static void do_write(struct mirror_set *ms, struct bio *bio) unsigned int i; struct dm_io_region io[MAX_NR_MIRRORS], *dest = io; struct mirror *m; + blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH); struct dm_io_request io_req = { - .bi_op = REQ_OP_WRITE, - .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), + .bi_opf = REQ_OP_WRITE | op_flags, .mem.type = DM_IO_BIO, .mem.ptr.bio = bio, .notify.fn = write_callback, @@ -659,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) }; if (bio_op(bio) == REQ_OP_DISCARD) { - io_req.bi_op = REQ_OP_DISCARD; + io_req.bi_opf = REQ_OP_DISCARD | op_flags; io_req.mem.type = DM_IO_KMEM; io_req.mem.ptr.addr = NULL; } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3bb5cff5d6fc..f46f930eedf9 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work) /* * Read or write a chunk aligned and sized block of data from a device. */ -static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, - int op_flags, int metadata) +static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf, + int metadata) { struct dm_io_region where = { .bdev = dm_snap_cow(ps->store->snap)->bdev, @@ -235,8 +235,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, .count = ps->store->chunk_size, }; struct dm_io_request io_req = { - .bi_op = op, - .bi_op_flags = op_flags, + .bi_opf = opf, .mem.type = DM_IO_VMA, .mem.ptr.vma = area, .client = ps->io_client, @@ -282,11 +281,11 @@ static void skip_metadata(struct pstore *ps) * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. */ -static int area_io(struct pstore *ps, int op, int op_flags) +static int area_io(struct pstore *ps, blk_opf_t opf) { chunk_t chunk = area_location(ps, ps->current_area); - return chunk_io(ps, ps->area, chunk, op, op_flags, 0); + return chunk_io(ps, ps->area, chunk, opf, 0); } static void zero_memory_area(struct pstore *ps) @@ -297,7 +296,7 @@ static void zero_memory_area(struct pstore *ps) static int zero_disk_area(struct pstore *ps, chunk_t area) { return chunk_io(ps, ps->zero_area, area_location(ps, area), - REQ_OP_WRITE, 0, 0); + REQ_OP_WRITE, 0); } static int read_header(struct pstore *ps, int *new_snapshot) @@ -329,7 +328,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) if (r) return r; - r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1); + r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1); if (r) goto bad; @@ -390,7 +389,7 @@ static int write_header(struct pstore *ps) dh->version = cpu_to_le32(ps->version); dh->chunk_size = cpu_to_le32(ps->store->chunk_size); - return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1); + return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1); } /* @@ -734,8 +733,8 @@ static void persistent_commit_exception(struct dm_exception_store *store, /* * Commit exceptions to disk. */ - if (ps->valid && area_io(ps, REQ_OP_WRITE, - REQ_PREFLUSH | REQ_FUA | REQ_SYNC)) + if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | + REQ_SYNC)) ps->valid = 0; /* @@ -775,7 +774,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, return 0; ps->current_area--; - r = area_io(ps, REQ_OP_READ, 0); + r = area_io(ps, REQ_OP_READ); if (r < 0) return r; ps->current_committed = ps->exceptions_per_area; @@ -812,7 +811,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); - r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA); + r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA); if (r < 0) return r; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index bd539afbfe88..df904b7e95ce 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1620,13 +1620,11 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data; - if (!blk_queue_is_zoned(q)) + if (!bdev_is_zoned(dev->bdev)) return 0; - - return blk_queue_zone_sectors(q) != *zone_sectors; + return bdev_zone_sectors(dev->bdev) != *zone_sectors; } /* diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index d74c5a7a0ab4..2b994b3e22a7 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -523,8 +523,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) region.sector += wc->start_sector; atomic_inc(&endio.count); - req.bi_op = REQ_OP_WRITE; - req.bi_op_flags = REQ_SYNC; + req.bi_opf = REQ_OP_WRITE | REQ_SYNC; req.mem.type = DM_IO_VMA; req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY; req.client = wc->dm_io; @@ -562,8 +561,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc) region.sector += wc->start_sector; - req.bi_op = REQ_OP_WRITE; - req.bi_op_flags = REQ_SYNC | REQ_FUA; + req.bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_FUA; req.mem.type = DM_IO_VMA; req.mem.ptr.vma = (char *)wc->memory_map; req.client = wc->dm_io; @@ -592,8 +590,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) region.bdev = dev->bdev; region.sector = 0; region.count = 0; - req.bi_op = REQ_OP_WRITE; - req.bi_op_flags = REQ_PREFLUSH; + req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; req.mem.type = DM_IO_KMEM; req.mem.ptr.addr = NULL; req.client = wc->dm_io; @@ -981,8 +978,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors region.bdev = wc->ssd_dev->bdev; region.sector = wc->start_sector; region.count = n_sectors; - req.bi_op = REQ_OP_READ; - req.bi_op_flags = REQ_SYNC; + req.bi_opf = REQ_OP_READ | REQ_SYNC; req.mem.type = DM_IO_VMA; req.mem.ptr.vma = (char *)wc->memory_map; req.client = wc->dm_io; diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 3e7b1fe1580b..4d10f302c62e 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -139,13 +139,11 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) void dm_cleanup_zoned_dev(struct mapped_device *md) { - struct request_queue *q = md->queue; - - if (q) { - kfree(q->conv_zones_bitmap); - q->conv_zones_bitmap = NULL; - kfree(q->seq_zones_wlock); - q->seq_zones_wlock = NULL; + if (md->disk) { + kfree(md->disk->conv_zones_bitmap); + md->disk->conv_zones_bitmap = NULL; + kfree(md->disk->seq_zones_wlock); + md->disk->seq_zones_wlock = NULL; } kvfree(md->zwp_offset); @@ -179,31 +177,31 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx, void *data) { struct mapped_device *md = data; - struct request_queue *q = md->queue; + struct gendisk *disk = md->disk; switch (zone->type) { case BLK_ZONE_TYPE_CONVENTIONAL: - if (!q->conv_zones_bitmap) { - q->conv_zones_bitmap = - kcalloc(BITS_TO_LONGS(q->nr_zones), + if (!disk->conv_zones_bitmap) { + disk->conv_zones_bitmap = + kcalloc(BITS_TO_LONGS(disk->nr_zones), sizeof(unsigned long), GFP_NOIO); - if (!q->conv_zones_bitmap) + if (!disk->conv_zones_bitmap) return -ENOMEM; } - set_bit(idx, q->conv_zones_bitmap); + set_bit(idx, disk->conv_zones_bitmap); break; case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF: - if (!q->seq_zones_wlock) { - q->seq_zones_wlock = - kcalloc(BITS_TO_LONGS(q->nr_zones), + if (!disk->seq_zones_wlock) { + disk->seq_zones_wlock = + kcalloc(BITS_TO_LONGS(disk->nr_zones), sizeof(unsigned long), GFP_NOIO); - if (!q->seq_zones_wlock) + if (!disk->seq_zones_wlock) return -ENOMEM; } if (!md->zwp_offset) { md->zwp_offset = - kvcalloc(q->nr_zones, sizeof(unsigned int), + kvcalloc(disk->nr_zones, sizeof(unsigned int), GFP_KERNEL); if (!md->zwp_offset) return -ENOMEM; @@ -228,7 +226,7 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx, */ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) { - struct request_queue *q = md->queue; + struct gendisk *disk = md->disk; unsigned int noio_flag; int ret; @@ -236,7 +234,7 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) * Check if something changed. If yes, cleanup the current resources * and reallocate everything. */ - if (!q->nr_zones || q->nr_zones != md->nr_zones) + if (!disk->nr_zones || disk->nr_zones != md->nr_zones) dm_cleanup_zoned_dev(md); if (md->nr_zones) return 0; @@ -246,17 +244,17 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) * operations in this context are done as if GFP_NOIO was specified. */ noio_flag = memalloc_noio_save(); - ret = dm_blk_do_report_zones(md, t, 0, q->nr_zones, + ret = dm_blk_do_report_zones(md, t, 0, disk->nr_zones, dm_zone_revalidate_cb, md); memalloc_noio_restore(noio_flag); if (ret < 0) goto err; - if (ret != q->nr_zones) { + if (ret != disk->nr_zones) { ret = -EIO; goto err; } - md->nr_zones = q->nr_zones; + md->nr_zones = disk->nr_zones; return 0; @@ -270,7 +268,7 @@ static int device_not_zone_append_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - return !blk_queue_is_zoned(bdev_get_queue(dev->bdev)); + return !bdev_is_zoned(dev->bdev); } static bool dm_table_supports_zone_append(struct dm_table *t) @@ -301,7 +299,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q) * correct value to be exposed in sysfs queue/nr_zones. */ WARN_ON_ONCE(queue_is_mq(q)); - q->nr_zones = blkdev_nr_zones(md->disk); + md->disk->nr_zones = bdev_nr_zones(md->disk->part0); /* Check if zone append is natively supported */ if (dm_table_supports_zone_append(t)) { @@ -334,7 +332,7 @@ static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx, static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno, unsigned int *wp_ofst) { - sector_t sector = zno * blk_queue_zone_sectors(md->queue); + sector_t sector = zno * bdev_zone_sectors(md->disk->part0); unsigned int noio_flag; struct dm_table *t; int srcu_idx, ret; @@ -361,7 +359,7 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno, } struct orig_bio_details { - unsigned int op; + enum req_op op; unsigned int nr_sectors; }; @@ -373,7 +371,7 @@ struct orig_bio_details { static bool dm_zone_map_bio_begin(struct mapped_device *md, unsigned int zno, struct bio *clone) { - sector_t zsectors = blk_queue_zone_sectors(md->queue); + sector_t zsectors = bdev_zone_sectors(md->disk->part0); unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]); /* @@ -443,7 +441,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z return BLK_STS_OK; case REQ_OP_ZONE_FINISH: WRITE_ONCE(md->zwp_offset[zno], - blk_queue_zone_sectors(md->queue)); + bdev_zone_sectors(md->disk->part0)); return BLK_STS_OK; case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE: @@ -466,26 +464,26 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z } } -static inline void dm_zone_lock(struct request_queue *q, - unsigned int zno, struct bio *clone) +static inline void dm_zone_lock(struct gendisk *disk, unsigned int zno, + struct bio *clone) { if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))) return; - wait_on_bit_lock_io(q->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE); + wait_on_bit_lock_io(disk->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE); bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED); } -static inline void dm_zone_unlock(struct request_queue *q, - unsigned int zno, struct bio *clone) +static inline void dm_zone_unlock(struct gendisk *disk, unsigned int zno, + struct bio *clone) { if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)) return; - WARN_ON_ONCE(!test_bit(zno, q->seq_zones_wlock)); - clear_bit_unlock(zno, q->seq_zones_wlock); + WARN_ON_ONCE(!test_bit(zno, disk->seq_zones_wlock)); + clear_bit_unlock(zno, disk->seq_zones_wlock); smp_mb__after_atomic(); - wake_up_bit(q->seq_zones_wlock, zno); + wake_up_bit(disk->seq_zones_wlock, zno); bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED); } @@ -520,7 +518,6 @@ int dm_zone_map_bio(struct dm_target_io *tio) struct dm_io *io = tio->io; struct dm_target *ti = tio->ti; struct mapped_device *md = io->md; - struct request_queue *q = md->queue; struct bio *clone = &tio->clone; struct orig_bio_details orig_bio_details; unsigned int zno; @@ -536,7 +533,7 @@ int dm_zone_map_bio(struct dm_target_io *tio) /* Lock the target zone */ zno = bio_zone_no(clone); - dm_zone_lock(q, zno, clone); + dm_zone_lock(md->disk, zno, clone); orig_bio_details.nr_sectors = bio_sectors(clone); orig_bio_details.op = bio_op(clone); @@ -546,7 +543,7 @@ int dm_zone_map_bio(struct dm_target_io *tio) * both valid, and if the bio is a zone append, remap it to a write. */ if (!dm_zone_map_bio_begin(md, zno, clone)) { - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(md->disk, zno, clone); return DM_MAPIO_KILL; } @@ -570,12 +567,12 @@ int dm_zone_map_bio(struct dm_target_io *tio) sts = dm_zone_map_bio_end(md, zno, &orig_bio_details, *tio->len_ptr); if (sts != BLK_STS_OK) - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(md->disk, zno, clone); break; case DM_MAPIO_REQUEUE: case DM_MAPIO_KILL: default: - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(md->disk, zno, clone); sts = BLK_STS_IOERR; break; } @@ -592,7 +589,7 @@ int dm_zone_map_bio(struct dm_target_io *tio) void dm_zone_endio(struct dm_io *io, struct bio *clone) { struct mapped_device *md = io->md; - struct request_queue *q = md->queue; + struct gendisk *disk = md->disk; struct bio *orig_bio = io->orig_bio; unsigned int zwp_offset; unsigned int zno; @@ -608,7 +605,8 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone) */ if (clone->bi_status == BLK_STS_OK && bio_op(clone) == REQ_OP_ZONE_APPEND) { - sector_t mask = (sector_t)blk_queue_zone_sectors(q) - 1; + sector_t mask = + (sector_t)bdev_zone_sectors(disk->part0) - 1; orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask; @@ -649,5 +647,5 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone) zwp_offset - bio_sectors(orig_bio); } - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(disk, zno, clone); } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index d1ea66114d14..34db364c23a8 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -737,7 +737,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, /* * Read/write a metadata block. */ -static int dmz_rdwr_block(struct dmz_dev *dev, int op, +static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op, sector_t block, struct page *page) { struct bio *bio; @@ -2045,7 +2045,8 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, * allocated and used to map the chunk. * The zone returned will be set to the active state. */ -struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op) +struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, + unsigned int chunk, enum req_op op) { struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 0ec5d8b9b1a4..95b132b52f33 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -764,8 +764,7 @@ static void dmz_put_zoned_device(struct dm_target *ti) static int dmz_fixup_devices(struct dm_target *ti) { struct dmz_target *dmz = ti->private; - struct dmz_dev *reg_dev, *zoned_dev; - struct request_queue *q; + struct dmz_dev *reg_dev = NULL; sector_t zone_nr_sectors = 0; int i; @@ -780,32 +779,32 @@ static int dmz_fixup_devices(struct dm_target *ti) return -EINVAL; } for (i = 1; i < dmz->nr_ddevs; i++) { - zoned_dev = &dmz->dev[i]; + struct dmz_dev *zoned_dev = &dmz->dev[i]; + struct block_device *bdev = zoned_dev->bdev; + if (zoned_dev->flags & DMZ_BDEV_REGULAR) { ti->error = "Secondary disk is not a zoned device"; return -EINVAL; } - q = bdev_get_queue(zoned_dev->bdev); if (zone_nr_sectors && - zone_nr_sectors != blk_queue_zone_sectors(q)) { + zone_nr_sectors != bdev_zone_sectors(bdev)) { ti->error = "Zone nr sectors mismatch"; return -EINVAL; } - zone_nr_sectors = blk_queue_zone_sectors(q); + zone_nr_sectors = bdev_zone_sectors(bdev); zoned_dev->zone_nr_sectors = zone_nr_sectors; - zoned_dev->nr_zones = - blkdev_nr_zones(zoned_dev->bdev->bd_disk); + zoned_dev->nr_zones = bdev_nr_zones(bdev); } } else { - reg_dev = NULL; - zoned_dev = &dmz->dev[0]; + struct dmz_dev *zoned_dev = &dmz->dev[0]; + struct block_device *bdev = zoned_dev->bdev; + if (zoned_dev->flags & DMZ_BDEV_REGULAR) { ti->error = "Disk is not a zoned device"; return -EINVAL; } - q = bdev_get_queue(zoned_dev->bdev); - zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q); - zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk); + zoned_dev->zone_nr_sectors = bdev_zone_sectors(bdev); + zoned_dev->nr_zones = bdev_nr_zones(bdev); } if (reg_dev) { diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h index a02744a0846c..265494d3f711 100644 --- a/drivers/md/dm-zoned.h +++ b/drivers/md/dm-zoned.h @@ -248,7 +248,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, unsigned int dev_idx, bool idle); struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, - unsigned int chunk, int op); + unsigned int chunk, enum req_op op); void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone); struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd, struct dm_zone *dzone); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b75f1ef7386..54c2a23f4e55 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -716,7 +716,7 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) } static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, - int *srcu_idx, unsigned bio_opf) + int *srcu_idx, blk_opf_t bio_opf) { if (bio_opf & REQ_NOWAIT) return dm_get_live_table_fast(md); @@ -725,7 +725,7 @@ static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, } static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, - unsigned bio_opf) + blk_opf_t bio_opf) { if (bio_opf & REQ_NOWAIT) dm_put_live_table_fast(md); @@ -1033,7 +1033,7 @@ static void clone_endio(struct bio *bio) } if (static_branch_unlikely(&zoned_enabled) && - unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev)))) + unlikely(bdev_is_zoned(bio->bi_bdev))) dm_zone_endio(io, bio); if (endio) { @@ -1086,23 +1086,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector) { sector_t target_offset = dm_target_offset(ti, sector); sector_t len = max_io_len_target_boundary(ti, target_offset); - sector_t max_len; /* * Does the target need to split IO even further? * - varied (per target) IO splitting is a tenet of DM; this * explains why stacked chunk_sectors based splitting via - * blk_max_size_offset() isn't possible here. So pass in - * ti->max_io_len to override stacked chunk_sectors. + * blk_queue_split() isn't possible here. */ - if (ti->max_io_len) { - max_len = blk_max_size_offset(ti->table->md->queue, - target_offset, ti->max_io_len); - if (len > max_len) - len = max_len; - } - - return len; + if (!ti->max_io_len) + return len; + return min_t(sector_t, len, + min(queue_max_sectors(ti->table->md->queue), + blk_chunk_sectors_left(target_offset, ti->max_io_len))); } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) @@ -1516,7 +1511,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target static bool is_abnormal_io(struct bio *bio) { - unsigned int op = bio_op(bio); + enum req_op op = bio_op(bio); if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { switch (op) { @@ -1547,6 +1542,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, case REQ_OP_WRITE_ZEROES: num_bios = ti->num_write_zeroes_bios; break; + default: + break; } /* @@ -1628,7 +1625,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) * Only support bio polling for normal IO, and the target io is * exactly inside the dm_io instance (verified in dm_poll_dm_io) */ - ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; + ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); setup_split_accounting(ci, len); @@ -1725,7 +1722,7 @@ static void dm_submit_bio(struct bio *bio) struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; - unsigned bio_opf = bio->bi_opf; + blk_opf_t bio_opf = bio->bi_opf; map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); @@ -1899,7 +1896,7 @@ static void cleanup_mapped_device(struct mapped_device *md) del_gendisk(md->disk); } dm_queue_destroy_crypto_profile(md->queue); - blk_cleanup_disk(md->disk); + put_disk(md->disk); } if (md->pending_io) { diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index d87f674ab762..bf6dffadbe6f 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -165,7 +165,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset, if (sync_page_io(rdev, target, roundup(size, bdev_logical_block_size(rdev->bdev)), - page, REQ_OP_READ, 0, true)) { + page, REQ_OP_READ, true)) { page->index = index; return 0; } @@ -302,7 +302,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); + submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); bh = bh->b_this_page; } @@ -394,7 +394,7 @@ static int read_page(struct file *file, unsigned long index, atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(REQ_OP_READ, 0, bh); + submit_bh(REQ_OP_READ, bh); } blk_cur++; bh = bh->b_this_page; diff --git a/drivers/md/md.c b/drivers/md/md.c index c7ecb0bffda0..4df78e30b76a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -993,15 +993,15 @@ int md_super_wait(struct mddev *mddev) } int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, - struct page *page, int op, int op_flags, bool metadata_op) + struct page *page, blk_opf_t opf, bool metadata_op) { struct bio bio; struct bio_vec bvec; if (metadata_op && rdev->meta_bdev) - bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags); + bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); else - bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags); + bio_init(&bio, rdev->bdev, &bvec, 1, opf); if (metadata_op) bio.bi_iter.bi_sector = sector + rdev->sb_start; @@ -1024,7 +1024,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) if (rdev->sb_loaded) return 0; - if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) goto fail; rdev->sb_loaded = 1; return 0; @@ -1722,7 +1722,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ return -EINVAL; bb_sector = (long long)offset; if (!sync_page_io(rdev, bb_sector, sectors << 9, - rdev->bb_page, REQ_OP_READ, 0, true)) + rdev->bb_page, REQ_OP_READ, true)) return -EIO; bbp = (__le64 *)page_address(rdev->bb_page); rdev->badblocks.shift = sb->bblog_shift; @@ -2438,7 +2438,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) mdname(mddev), mddev->max_disks); return -EBUSY; } - bdevname(rdev->bdev,b); + snprintf(b, sizeof(b), "%pg", rdev->bdev); strreplace(b, '/', '!'); rdev->mddev = mddev; @@ -5579,7 +5579,7 @@ static void md_free(struct kobject *ko) if (mddev->gendisk) { del_gendisk(mddev->gendisk); - blk_cleanup_disk(mddev->gendisk); + put_disk(mddev->gendisk); } percpu_ref_exit(&mddev->writes_pending); @@ -5718,7 +5718,7 @@ static int md_alloc(dev_t dev, char *name) out_del_gendisk: del_gendisk(disk); out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_unlock_disks_mutex: mutex_unlock(&disks_mutex); mddev_put(mddev); diff --git a/drivers/md/md.h b/drivers/md/md.h index cf2cbb17acbd..b4f84b27bdef 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -738,8 +738,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); extern int md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, - struct page *page, int op, int op_flags, - bool metadata_op); + struct page *page, blk_opf_t opf, bool metadata_op); extern void md_do_sync(struct md_thread *thread); extern void md_new_event(void); extern void md_allow_write(struct mddev *mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 258d4eb2d63c..05d8438cfec8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1220,8 +1220,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct raid1_info *mirror; struct bio *read_bio; struct bitmap *bitmap = mddev->bitmap; - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + const enum req_op op = bio_op(bio); + const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; int rdisk; bool r1bio_existed = !!r1_bio; @@ -1240,7 +1240,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, rcu_read_lock(); rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); if (rdev) - bdevname(rdev->bdev, b); + snprintf(b, sizeof(b), "%pg", rdev->bdev); else strcpy(b, "???"); rcu_read_unlock(); @@ -1988,9 +1988,9 @@ static void end_sync_write(struct bio *bio) } static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, int rw) + int sectors, struct page *page, int rw) { - if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) /* success */ return 1; if (rw == WRITE) { @@ -2057,7 +2057,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) rdev = conf->mirrors[d].rdev; if (sync_page_io(rdev, sect, s<<9, pages[idx], - REQ_OP_READ, 0, false)) { + REQ_OP_READ, false)) { success = 1; break; } @@ -2305,7 +2305,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, atomic_inc(&rdev->nr_pending); rcu_read_unlock(); if (sync_page_io(rdev, sect, s<<9, - conf->tmppage, REQ_OP_READ, 0, false)) + conf->tmppage, REQ_OP_READ, false)) success = 1; rdev_dec_pending(rdev, mddev); if (success) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d589f823feb1..26545950ca42 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1136,8 +1136,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, { struct r10conf *conf = mddev->private; struct bio *read_bio; - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + const enum req_op op = bio_op(bio); + const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; struct md_rdev *rdev; char b[BDEVNAME_SIZE]; @@ -1164,7 +1164,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, disk = r10_bio->devs[slot].devnum; err_rdev = rcu_dereference(conf->mirrors[disk].rdev); if (err_rdev) - bdevname(err_rdev->bdev, b); + snprintf(b, sizeof(b), "%pg", err_rdev->bdev); else { strcpy(b, "???"); /* This never gets dereferenced */ @@ -1230,9 +1230,9 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, struct bio *bio, bool replacement, int n_copy) { - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); - const unsigned long do_fua = (bio->bi_opf & REQ_FUA); + const enum req_op op = bio_op(bio); + const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; + const blk_opf_t do_fua = bio->bi_opf & REQ_FUA; unsigned long flags; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; @@ -2512,7 +2512,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) addr, s << 9, pages[idx], - REQ_OP_READ, 0, false); + REQ_OP_READ, false); if (ok) { rdev = conf->mirrors[dw].rdev; addr = r10_bio->devs[1].addr + sect; @@ -2520,7 +2520,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) addr, s << 9, pages[idx], - REQ_OP_WRITE, 0, false); + REQ_OP_WRITE, false); if (!ok) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, @@ -2644,7 +2644,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) return -1; - if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) /* success */ return 1; if (rw == WRITE) { @@ -2726,7 +2726,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 sect, s<<9, conf->tmppage, - REQ_OP_READ, 0, false); + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); rcu_read_lock(); if (success) @@ -5107,7 +5107,7 @@ static int handle_reshape_read_error(struct mddev *mddev, addr, s << 9, pages[idx], - REQ_OP_READ, 0, false); + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); rcu_read_lock(); if (success) diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 83c184eddbda..6f2dd73128b0 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1788,7 +1788,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, mb = page_address(page); mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, mb, PAGE_SIZE)); - if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, + if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false)) { __free_page(page); return -EIO; @@ -1898,7 +1898,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, atomic_inc(&rdev->nr_pending); rcu_read_unlock(); sync_page_io(rdev, sh->sector, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_WRITE, 0, + sh->dev[disk_index].page, REQ_OP_WRITE, false); rdev_dec_pending(rdev, rdev->mddev); rcu_read_lock(); @@ -1908,7 +1908,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, atomic_inc(&rrdev->nr_pending); rcu_read_unlock(); sync_page_io(rrdev, sh->sector, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_WRITE, 0, + sh->dev[disk_index].page, REQ_OP_WRITE, false); rdev_dec_pending(rrdev, rrdev->mddev); rcu_read_lock(); @@ -2394,7 +2394,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, PAGE_SIZE)); kunmap_atomic(addr); sync_page_io(log->rdev, write_pos, PAGE_SIZE, - dev->page, REQ_OP_WRITE, 0, false); + dev->page, REQ_OP_WRITE, false); write_pos = r5l_ring_add(log, write_pos, BLOCK_SECTORS); offset += sizeof(__le32) + @@ -2406,7 +2406,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, mb, PAGE_SIZE)); sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, - REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); + REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false); sh->log_start = ctx->pos; list_add_tail(&sh->r5c, &log->stripe_in_journal_list); atomic_inc(&log->stripe_in_journal_count); @@ -2971,7 +2971,7 @@ static int r5l_load_log(struct r5l_log *log) if (!page) return -ENOMEM; - if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { + if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) { ret = -EIO; goto ioerr; } diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 0a2e4806b1ec..98988cb26295 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -897,7 +897,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, __func__, indent, "", rdev->bdev, (unsigned long long)sector); if (!sync_page_io(rdev, sector, block_size, page2, - REQ_OP_READ, 0, false)) { + REQ_OP_READ, false)) { md_error(mddev, rdev); pr_debug("%s:%*s read failed!\n", __func__, indent, ""); @@ -919,7 +919,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)(ppl_sector + i)); if (!sync_page_io(log->rdev, ppl_sector - log->rdev->data_offset + i, - block_size, page2, REQ_OP_READ, 0, + block_size, page2, REQ_OP_READ, false)) { pr_debug("%s:%*s read failed!\n", __func__, indent, ""); @@ -946,7 +946,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)parity_sector, parity_rdev->bdev); if (!sync_page_io(parity_rdev, parity_sector, block_size, - page1, REQ_OP_WRITE, 0, false)) { + page1, REQ_OP_WRITE, false)) { pr_debug("%s:%*s parity write error!\n", __func__, indent, ""); md_error(mddev, parity_rdev); @@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size; if (!sync_page_io(rdev, sector - rdev->data_offset, - s, page, REQ_OP_READ, 0, false)) { + s, page, REQ_OP_READ, false)) { md_error(mddev, rdev); ret = -EIO; goto out; @@ -1062,7 +1062,7 @@ static int ppl_write_empty_header(struct ppl_log *log) if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | - REQ_FUA, 0, false)) { + REQ_FUA, false)) { md_error(rdev->mddev, rdev); ret = -EIO; } @@ -1100,7 +1100,7 @@ static int ppl_load_distributed(struct ppl_log *log) if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset + pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, - 0, false)) { + false)) { md_error(mddev, rdev); ret = -EIO; /* if not able to read - don't recover any PPL */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c8539d0e12dd..5cabdbbac48b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1082,7 +1082,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) should_defer = conf->batch_bio_dispatch && conf->group_cnt; for (i = disks; i--; ) { - int op, op_flags = 0; + enum req_op op; + blk_opf_t op_flags = 0; int replace_only = 0; struct bio *bi, *rbi; struct md_rdev *rdev, *rrdev = NULL; |