diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 87 | ||||
-rw-r--r-- | block/blk-core.c | 13 | ||||
-rw-r--r-- | block/blk-mq.c | 20 | ||||
-rw-r--r-- | block/blk-throttle.c | 5 | ||||
-rw-r--r-- | block/t10-pi.c | 2 |
5 files changed, 89 insertions, 38 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4b1a35ab0ea4..37e6cc91d576 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -322,6 +322,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, blkg->q = disk->queue; INIT_LIST_HEAD(&blkg->q_node); blkg->blkcg = blkcg; + blkg->iostat.blkg = blkg; #ifdef CONFIG_BLK_CGROUP_PUNT_BIO spin_lock_init(&blkg->async_bio_lock); bio_list_init(&blkg->async_bios); @@ -618,12 +619,45 @@ restart: spin_unlock_irq(&q->queue_lock); } +static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) +{ + int i; + + for (i = 0; i < BLKG_IOSTAT_NR; i++) { + dst->bytes[i] = src->bytes[i]; + dst->ios[i] = src->ios[i]; + } +} + +static void __blkg_clear_stat(struct blkg_iostat_set *bis) +{ + struct blkg_iostat cur = {0}; + unsigned long flags; + + flags = u64_stats_update_begin_irqsave(&bis->sync); + blkg_iostat_set(&bis->cur, &cur); + blkg_iostat_set(&bis->last, &cur); + u64_stats_update_end_irqrestore(&bis->sync, flags); +} + +static void blkg_clear_stat(struct blkcg_gq *blkg) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu); + + __blkg_clear_stat(s); + } + __blkg_clear_stat(&blkg->iostat); +} + static int blkcg_reset_stats(struct cgroup_subsys_state *css, struct cftype *cftype, u64 val) { struct blkcg *blkcg = css_to_blkcg(css); struct blkcg_gq *blkg; - int i, cpu; + int i; mutex_lock(&blkcg_pol_mutex); spin_lock_irq(&blkcg->lock); @@ -634,18 +668,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, * anyway. If you get hit by a race, retry. */ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { - for_each_possible_cpu(cpu) { - struct blkg_iostat_set *bis = - per_cpu_ptr(blkg->iostat_cpu, cpu); - memset(bis, 0, sizeof(*bis)); - - /* Re-initialize the cleared blkg_iostat_set */ - u64_stats_init(&bis->sync); - bis->blkg = blkg; - } - memset(&blkg->iostat, 0, sizeof(blkg->iostat)); - u64_stats_init(&blkg->iostat.sync); - + blkg_clear_stat(blkg); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; @@ -948,16 +971,6 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx) } EXPORT_SYMBOL_GPL(blkg_conf_exit); -static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) -{ - int i; - - for (i = 0; i < BLKG_IOSTAT_NR; i++) { - dst->bytes[i] = src->bytes[i]; - dst->ios[i] = src->ios[i]; - } -} - static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) { int i; @@ -1023,7 +1036,19 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) struct blkg_iostat cur; unsigned int seq; + /* + * Order assignment of `next_bisc` from `bisc->lnode.next` in + * llist_for_each_entry_safe and clearing `bisc->lqueued` for + * avoiding to assign `next_bisc` with new next pointer added + * in blk_cgroup_bio_start() in case of re-ordering. + * + * The pair barrier is implied in llist_add() in blk_cgroup_bio_start(). + */ + smp_mb(); + WRITE_ONCE(bisc->lqueued, false); + if (bisc == &blkg->iostat) + goto propagate_up; /* propagate up to parent only */ /* fetch the current per-cpu values */ do { @@ -1033,10 +1058,24 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) blkcg_iostat_update(blkg, &cur, &bisc->last); +propagate_up: /* propagate global delta to parent (unless that's root) */ - if (parent && parent->parent) + if (parent && parent->parent) { blkcg_iostat_update(parent, &blkg->iostat.cur, &blkg->iostat.last); + /* + * Queue parent->iostat to its blkcg's lockless + * list to propagate up to the grandparent if the + * iostat hasn't been queued yet. + */ + if (!parent->iostat.lqueued) { + struct llist_head *plhead; + + plhead = per_cpu_ptr(parent->blkcg->lhead, cpu); + llist_add(&parent->iostat.lnode, plhead); + parent->iostat.lqueued = true; + } + } } raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); out: diff --git a/block/blk-core.c b/block/blk-core.c index ea44b13af9af..82c3ae22d76d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -615,9 +615,14 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, static void __submit_bio(struct bio *bio) { + /* If plug is not used, add new plug here to cache nsecs time. */ + struct blk_plug plug; + if (unlikely(!blk_crypto_bio_prep(&bio))) return; + blk_start_plug(&plug); + if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { blk_mq_submit_bio(bio); } else if (likely(bio_queue_enter(bio) == 0)) { @@ -626,6 +631,8 @@ static void __submit_bio(struct bio *bio) disk->fops->submit_bio(bio); blk_queue_exit(disk->queue); } + + blk_finish_plug(&plug); } /* @@ -650,13 +657,11 @@ static void __submit_bio(struct bio *bio) static void __submit_bio_noacct(struct bio *bio) { struct bio_list bio_list_on_stack[2]; - struct blk_plug plug; BUG_ON(bio->bi_next); bio_list_init(&bio_list_on_stack[0]); current->bio_list = bio_list_on_stack; - blk_start_plug(&plug); do { struct request_queue *q = bdev_get_queue(bio->bi_bdev); @@ -690,23 +695,19 @@ static void __submit_bio_noacct(struct bio *bio) bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); - blk_finish_plug(&plug); current->bio_list = NULL; } static void __submit_bio_noacct_mq(struct bio *bio) { struct bio_list bio_list[2] = { }; - struct blk_plug plug; current->bio_list = bio_list; - blk_start_plug(&plug); do { __submit_bio(bio); } while ((bio = bio_list_pop(&bio_list[0]))); - blk_finish_plug(&plug); current->bio_list = NULL; } diff --git a/block/blk-mq.c b/block/blk-mq.c index fc364a226e95..3b4df8e5ac9e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3545,12 +3545,28 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) return 0; } +/* + * Check if one CPU is mapped to the specified hctx + * + * Isolated CPUs have been ruled out from hctx->cpumask, which is supposed + * to be used for scheduling kworker only. For other usage, please call this + * helper for checking if one CPU belongs to the specified hctx + */ +static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu, + const struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue, + hctx->type, cpu); + + return mapped_hctx == hctx; +} + static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) { struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_online); - if (cpumask_test_cpu(cpu, hctx->cpumask)) + if (blk_mq_cpu_mapped_to_hctx(cpu, hctx)) clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); return 0; } @@ -3568,7 +3584,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) enum hctx_type type; hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); - if (!cpumask_test_cpu(cpu, hctx->cpumask)) + if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx)) return 0; ctx = __blk_mq_get_ctx(hctx->queue, cpu); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 80aaca18bfb0..0be180f9a789 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -39,11 +39,6 @@ struct latency_bucket { int samples; }; -struct avg_latency_bucket { - unsigned long latency; /* ns / 1024 */ - bool valid; -}; - struct throtl_data { /* service tree for active throtl groups */ diff --git a/block/t10-pi.c b/block/t10-pi.c index d90892fd6f2a..f4cc91156da1 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -495,5 +495,5 @@ const struct blk_integrity_profile ext_pi_type3_crc64 = { }; EXPORT_SYMBOL_GPL(ext_pi_type3_crc64); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("T10 Protection Information module"); MODULE_LICENSE("GPL"); |