diff options
author | Bart Van Assche <bart.vanassche@wdc.com> | 2018-03-07 17:10:10 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-03-08 14:13:48 -0700 |
commit | 8b904b5b6b58b9a29dcf3f82d936d9e7fd69fda6 (patch) | |
tree | c5ba5ff20820c410a422e7461f400b0546111916 | |
parent | bf3a2b310ea35ae2f641bb734892574bd820d4a5 (diff) |
block: Use blk_queue_flag_*() in drivers instead of queue_flag_*()
This patch has been generated as follows:
for verb in set_unlocked clear_unlocked set clear; do
replace-in-files queue_flag_${verb} blk_queue_flag_${verb%_unlocked} \
$(git grep -lw queue_flag_${verb} drivers block/bsg*)
done
Except for protecting all queue flag changes with the queue lock
this patch does not change any functionality.
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Shaohua Li <shli@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
38 files changed, 89 insertions, 89 deletions
diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 1474153f73e3..b4fe1a48f111 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -275,8 +275,8 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, q->queuedata = dev; q->bsg_job_fn = job_fn; - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_BIDI, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index a12f77e6891e..b4f02768ba47 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1212,10 +1212,10 @@ static void decide_on_discard_support(struct drbd_device *device, * topology on all peers. */ blk_queue_discard_granularity(q, 512); q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection); } else { - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); blk_queue_discard_granularity(q, 0); q->limits.max_discard_sectors = 0; q->limits.max_write_zeroes_sectors = 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 9d29aa6413e5..7952ed5c607b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -214,10 +214,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; if (use_dio) { - queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags |= LO_FLAGS_DIRECT_IO; } else { - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; } blk_mq_unfreeze_queue(lo->lo_queue); @@ -817,7 +817,7 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); return; } @@ -826,7 +826,7 @@ static void loop_config_discard(struct loop_device *lo) blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) @@ -1808,7 +1808,7 @@ static int loop_add(struct loop_device **l, int i) * page. For directio mode, merge does help to dispatch bigger request * to underlayer disk. We will enable merge once directio is enabled. */ - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); err = -ENOMEM; disk = lo->lo_disk = alloc_disk(1 << part_shift); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 86258b00a1d4..afbc202ca6fd 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -964,7 +964,7 @@ static void nbd_parse_flags(struct nbd_device *nbd) else set_disk_ro(nbd->disk, false); if (config->flags & NBD_FLAG_SEND_TRIM) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); if (config->flags & NBD_FLAG_SEND_FLUSH) { if (config->flags & NBD_FLAG_SEND_FUA) blk_queue_write_cache(nbd->disk->queue, true, true); @@ -1040,7 +1040,7 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); mutex_unlock(&nbd->config_lock); nbd_put(nbd); @@ -1488,8 +1488,8 @@ static int nbd_dev_add(int index) /* * Tell the block layer that we are not a rotational device */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); disk->queue->limits.discard_granularity = 512; blk_queue_max_discard_sectors(disk->queue, UINT_MAX); blk_queue_max_segment_size(disk->queue, UINT_MAX); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index d6be7a6d8ca6..0517613afccb 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1525,7 +1525,7 @@ static void null_config_discard(struct nullb *nullb) nullb->q->limits.discard_granularity = nullb->dev->blocksize; nullb->q->limits.discard_alignment = nullb->dev->blocksize; blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); } static int null_open(struct block_device *bdev, fmode_t mode) @@ -1810,8 +1810,8 @@ static int null_add_dev(struct nullb_device *dev) } nullb->q->queuedata = nullb; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); mutex_lock(&lock); nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e40da093766..0016170cde0a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4370,7 +4370,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) goto out_tag_set; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ /* set io sizes to object size */ @@ -4383,7 +4383,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_io_opt(q, segment_size); /* enable the discard support */ - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.discard_granularity = segment_size; blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index e397d3ee7308..dddb3f2490b6 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -287,10 +287,10 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue); if (rsxx_discard_supported(card)) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue); blk_queue_max_discard_sectors(card->queue, RSXX_HW_BLK_SIZE >> 9); card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index e41935ab41ef..bc7aea6d7b7c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2858,8 +2858,8 @@ static int skd_cons_disk(struct skd_device *skdev) /* set optimal I/O size to 8KB */ blk_queue_io_opt(q, 8192); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_rq_timeout(q, 8 * HZ); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e126e4cac2ca..3fcdc0d8eed3 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -931,15 +931,15 @@ static void blkif_set_queue_limits(struct blkfront_info *info) unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; - queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); + blk_queue_flag_set(QUEUE_FLAG_VIRT, rq); if (info->feature_discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ @@ -1610,8 +1610,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) blkif_req(req)->error = BLK_STS_NOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; - queue_flag_clear(QUEUE_FLAG_DISCARD, rq); - queue_flag_clear(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq); } break; case BLKIF_OP_FLUSH_DISKCACHE: diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 85110e7931e5..71b449613cfa 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1530,8 +1530,8 @@ static int zram_add(void) /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ set_capacity(zram->disk, 0); /* zram devices sort of resembles non-rotational disks */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); /* * To ensure that we always get PAGE_SIZE aligned @@ -1544,7 +1544,7 @@ static int zram_add(void) blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); zram->disk->queue->limits.discard_granularity = PAGE_SIZE; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue); /* * zram_bio_discard() will clear all logical blocks if logical block diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 188d1b03715d..9c47f975567f 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -687,8 +687,8 @@ static void ide_disk_setup(ide_drive_t *drive) queue_max_sectors(q) / 2); if (ata_id_is_ssd(id)) { - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } /* calculate drive capacity, and select LBA if possible */ diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index d6b8c7e1545d..2019e66eada7 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -773,7 +773,7 @@ static int ide_init_queue(ide_drive_t *drive) q->request_fn = do_ide_request; q->initialize_rq_fn = ide_initialize_rq; q->cmd_size = sizeof(struct ide_request); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); if (blk_init_allocated_queue(q) < 0) { blk_cleanup_queue(q); return 1; diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 93d671ca518e..5b46924ac66c 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -1067,7 +1067,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size; tqueue->limits.discard_alignment = 0; blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n", tdisk->disk_name, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..54c39ad4ef01 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1861,7 +1861,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits = *limits; if (!dm_table_supports_discards(t)) { - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); /* Must also clear discard limits... */ q->limits.max_discard_sectors = 0; q->limits.max_hw_discard_sectors = 0; @@ -1869,7 +1869,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.discard_alignment = 0; q->limits.discard_misaligned = 0; } else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { wc = true; @@ -1879,15 +1879,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, blk_queue_write_cache(q, wc, fua); if (dm_table_supports_dax(t)) - queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + blk_queue_flag_set(QUEUE_FLAG_DAX, q); if (dm_table_supports_dax_write_cache(t)) dax_write_cache(t->md->dax_dev, true); /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); else - queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1895,9 +1895,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.max_write_zeroes_sectors = 0; if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) - queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q); else - queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q); dm_table_verify_integrity(t); @@ -1908,7 +1908,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * have it set. */ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } unsigned int dm_table_get_num_targets(struct dm_table *t) diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 773fc70dced7..4964323d936b 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -138,9 +138,9 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) } if (!discard_supported) - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); /* * Here we calculate the device offsets. diff --git a/drivers/md/md.c b/drivers/md/md.c index eba7fa2f0abb..de2b26fba5d8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5608,9 +5608,9 @@ int md_run(struct mddev *mddev) if (mddev->degraded) nonrot = false; if (nonrot) - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); mddev->queue->backing_dev_info->congested_data = mddev; mddev->queue->backing_dev_info->congested_fn = md_congested; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 5ecba9eef441..584c10347267 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev) discard_supported = true; } if (!discard_supported) - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); } /* calculate array device size */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b2eae332e1a2..f1635eb9e95a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1760,7 +1760,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) } } if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; } @@ -3099,10 +3099,10 @@ static int raid1_run(struct mddev *mddev) if (mddev->queue) { if (discard_supported) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99c9207899a7..e9c409c5f344 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1845,7 +1845,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; @@ -3844,10 +3844,10 @@ static int raid10_run(struct mddev *mddev) if (mddev->queue) { if (discard_supported) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); } /* need to check that every block has at least one working mirror */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 50d01144b805..14714b23a2fa 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7444,10 +7444,10 @@ static int raid5_run(struct mddev *mddev) if (devices_handle_discard_safely && mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && mddev->queue->limits.discard_granularity >= stripe) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 421fab7250ac..56e9a803db21 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -185,14 +185,14 @@ static void mmc_queue_setup_discard(struct request_queue *q, if (!max_discard) return; - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); blk_queue_max_discard_sectors(q, max_discard); q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) q->limits.discard_granularity = 0; if (mmc_can_secure_erase_trim(card)) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); + blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); } /** @@ -356,8 +356,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 9ec8f033ac5f..16ae4ae8e8f9 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -419,11 +419,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) blk_queue_logical_block_size(new->rq, tr->blksize); blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq); + blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); if (tr->discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); blk_queue_max_discard_sectors(new->rq, UINT_MAX); } diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 345acca576b3..7bde764f939a 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -266,7 +266,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) blk_queue_make_request(q, nd_blk_make_request); blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q->queuedata = nsblk; disk = alloc_disk(0); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 2ef544f10ec8..6f311f88a8e8 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1542,7 +1542,7 @@ static int btt_blk_init(struct btt *btt) blk_queue_make_request(btt->btt_queue, btt_make_request); blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue); btt->btt_queue->queuedata = btt; set_capacity(btt->btt_disk, 0); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index cfb15ac50925..145db2ad712f 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -388,8 +388,8 @@ static int pmem_attach_disk(struct device *dev, blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_max_hw_sectors(q, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_DAX, q); q->queuedata = pmem; disk = alloc_disk_node(0, nid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 817e5e2766da..72e241923e7d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1358,7 +1358,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, blk_queue_max_discard_sectors(queue, UINT_MAX); blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, queue); if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); @@ -2949,7 +2949,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns->queue = blk_mq_init_queue(ctrl->tagset); if (IS_ERR(ns->queue)) goto out_free_ns; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); ns->queue->queuedata = ns; ns->ctrl = ctrl; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 88440562a197..7283d7149baf 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -168,7 +168,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) q->queuedata = head; blk_queue_make_request(q, nvme_ns_head_make_request); q->poll_fn = nvme_ns_head_poll; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a7c15f0085e2..7be803afcb43 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3210,7 +3210,7 @@ static void dasd_setup_queue(struct dasd_block *block) } else { max = block->base->discipline->max_blocks << block->s2b_shift; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q->limits.max_dev_sectors = max; blk_queue_logical_block_size(q, logical_block_size); blk_queue_max_hw_sectors(q, max); @@ -3233,7 +3233,7 @@ static void dasd_setup_queue(struct dasd_block *block) blk_queue_max_discard_sectors(q, max_discard_sectors); blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 9cae08b36b80..0a312e450207 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -633,7 +633,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->private_data = dev_info; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); + blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index b4130c7880d8..b1fcb76dd272 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -472,8 +472,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) blk_queue_logical_block_size(rq, 1 << 12); blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ blk_queue_max_segments(rq, nr_max_blk); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); + blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); bdev->gendisk = alloc_disk(SCM_NR_PARTS); if (!bdev->gendisk) { diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 2a6334ca750e..3df5d68d09f0 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -348,8 +348,8 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); + blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a71ee67df084..dc234650014c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1864,7 +1864,7 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 073ced07e662..298019cf08a2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1908,7 +1908,7 @@ megasas_is_prp_possible(struct megasas_instance *instance, * then sending IOs with holes. * * Though driver can request block layer to disable IO merging by calling- - * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but + * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but * user may tune sysfs parameter- nomerges again to 0 or 1. * * If in future IO scheduling is enabled with SCSI BLK MQ, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 74fca184dba9..e3843828e59a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2352,7 +2352,7 @@ scsih_slave_configure(struct scsi_device *sdev) ** merged and can eliminate holes created during merging ** operation. **/ - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, ioc->page_size - 1); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a5986dae9020..1cb353f18d08 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3897,7 +3897,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp) if (sdebug_verbose) pr_info("slave_alloc <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); + blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue); return 0; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 71d1135f94d0..538152f3528e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2140,7 +2140,7 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) { struct device *dev = shost->dma_dev; - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); /* * this limit is imposed by hardware restrictions diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 736a1f4f9676..7c0987616684 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -227,8 +227,8 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_BIDI, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); return 0; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bff21e636ddd..98de3207ac5d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -714,7 +714,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) case SD_LBP_FULL: case SD_LBP_DISABLE: blk_queue_max_discard_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); return; case SD_LBP_UNMAP: @@ -747,7 +747,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) } blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) @@ -2952,8 +2952,8 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) rot = get_unaligned_be16(&buffer[4]); if (rot == 1) { - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { |