summaryrefslogtreecommitdiff
path: root/drivers/md/raid5-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5-cache.c')
-rw-r--r--drivers/md/raid5-cache.c362
1 files changed, 285 insertions, 77 deletions
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 218b6f37da85..26ba09282e7c 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -30,6 +30,7 @@
* underneath hardware sector size. only works with PAGE_SIZE == 4096
*/
#define BLOCK_SECTORS (8)
+#define BLOCK_SECTOR_SHIFT (3)
/*
* log->max_free_space is min(1/4 disk size, 10G reclaimable space).
@@ -43,7 +44,7 @@
/* wake up reclaim thread periodically */
#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
/* start flush with these full stripes */
-#define R5C_FULL_STRIPE_FLUSH_BATCH 256
+#define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
/* reclaim stripes in groups */
#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
@@ -297,8 +298,7 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
}
static void
-r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
- struct bio_list *return_bi)
+r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
{
struct bio *wbi, *wbi2;
@@ -307,24 +307,21 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
- if (!raid5_dec_bi_active_stripes(wbi)) {
- md_write_end(conf->mddev);
- bio_list_add(return_bi, wbi);
- }
+ md_write_end(conf->mddev);
+ bio_endio(wbi);
wbi = wbi2;
}
}
void r5c_handle_cached_data_endio(struct r5conf *conf,
- struct stripe_head *sh, int disks, struct bio_list *return_bi)
+ struct stripe_head *sh, int disks)
{
int i;
for (i = sh->disks; i--; ) {
if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
- r5c_return_dev_pending_writes(conf, &sh->dev[i],
- return_bi);
+ r5c_return_dev_pending_writes(conf, &sh->dev[i]);
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
@@ -333,6 +330,8 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
}
}
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+
/* Check whether we should flush some stripes to free up stripe cache */
void r5c_check_stripe_cache_usage(struct r5conf *conf)
{
@@ -371,7 +370,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf)
* or a full stripe (chunk size / 4k stripes).
*/
if (atomic_read(&conf->r5c_cached_full_stripes) >=
- min(R5C_FULL_STRIPE_FLUSH_BATCH,
+ min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
conf->chunk_sectors >> STRIPE_SHIFT))
r5l_wake_reclaim(conf->log, 0);
}
@@ -580,7 +579,7 @@ static void r5l_log_endio(struct bio *bio)
spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
- if (log->need_cache_flush)
+ if (log->need_cache_flush && !list_empty(&io->stripe_list))
r5l_move_to_end_ios(log);
else
r5l_log_run_stripes(log);
@@ -608,9 +607,11 @@ static void r5l_log_endio(struct bio *bio)
bio_endio(bi);
atomic_dec(&io->pending_stripe);
}
- if (atomic_read(&io->pending_stripe) == 0)
- __r5l_stripe_write_finished(io);
}
+
+ /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
+ if (atomic_read(&io->pending_stripe) == 0)
+ __r5l_stripe_write_finished(io);
}
static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
@@ -832,6 +833,41 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
r5_reserve_log_entry(log, io);
}
+static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct r5l_io_unit *io;
+ struct r5l_payload_flush *payload;
+ int meta_size;
+
+ /*
+ * payload_flush requires extra writes to the journal.
+ * To avoid handling the extra IO in quiesce, just skip
+ * flush_payload
+ */
+ if (conf->quiesce)
+ return;
+
+ mutex_lock(&log->io_mutex);
+ meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
+
+ if (r5l_get_meta(log, meta_size)) {
+ mutex_unlock(&log->io_mutex);
+ return;
+ }
+
+ /* current implementation is one stripe per flush payload */
+ io = log->current_io;
+ payload = page_address(io->meta_page) + io->meta_offset;
+ payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
+ payload->header.flags = cpu_to_le16(0);
+ payload->size = cpu_to_le32(sizeof(__le64));
+ payload->flush_stripes[0] = cpu_to_le64(sect);
+ io->meta_offset += meta_size;
+ mutex_unlock(&log->io_mutex);
+}
+
static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
int data_pages, int parity_pages)
{
@@ -1383,7 +1419,7 @@ static void r5c_do_reclaim(struct r5conf *conf)
stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
- R5C_FULL_STRIPE_FLUSH_BATCH)
+ R5C_FULL_STRIPE_FLUSH_BATCH(conf))
/*
* if stripe cache pressure moderate, or if there is many full
* stripes,flush all full stripes
@@ -1542,6 +1578,8 @@ bool r5l_log_disk_error(struct r5conf *conf)
return ret;
}
+#define R5L_RECOVERY_PAGE_POOL_SIZE 256
+
struct r5l_recovery_ctx {
struct page *meta_page; /* current meta */
sector_t meta_total_blocks; /* total size of current meta and data */
@@ -1550,18 +1588,131 @@ struct r5l_recovery_ctx {
int data_parity_stripes; /* number of data_parity stripes */
int data_only_stripes; /* number of data_only stripes */
struct list_head cached_list;
+
+ /*
+ * read ahead page pool (ra_pool)
+ * in recovery, log is read sequentially. It is not efficient to
+ * read every page with sync_page_io(). The read ahead page pool
+ * reads multiple pages with one IO, so further log read can
+ * just copy data from the pool.
+ */
+ struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
+ sector_t pool_offset; /* offset of first page in the pool */
+ int total_pages; /* total allocated pages */
+ int valid_pages; /* pages with valid data */
+ struct bio *ra_bio; /* bio to do the read ahead */
};
+static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct page *page;
+
+ ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs);
+ if (!ctx->ra_bio)
+ return -ENOMEM;
+
+ ctx->valid_pages = 0;
+ ctx->total_pages = 0;
+ while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
+ page = alloc_page(GFP_KERNEL);
+
+ if (!page)
+ break;
+ ctx->ra_pool[ctx->total_pages] = page;
+ ctx->total_pages += 1;
+ }
+
+ if (ctx->total_pages == 0) {
+ bio_put(ctx->ra_bio);
+ return -ENOMEM;
+ }
+
+ ctx->pool_offset = 0;
+ return 0;
+}
+
+static void r5l_recovery_free_ra_pool(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->total_pages; ++i)
+ put_page(ctx->ra_pool[i]);
+ bio_put(ctx->ra_bio);
+}
+
+/*
+ * fetch ctx->valid_pages pages from offset
+ * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
+ * However, if the offset is close to the end of the journal device,
+ * ctx->valid_pages could be smaller than ctx->total_pages
+ */
+static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ sector_t offset)
+{
+ bio_reset(ctx->ra_bio);
+ ctx->ra_bio->bi_bdev = log->rdev->bdev;
+ bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
+ ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
+
+ ctx->valid_pages = 0;
+ ctx->pool_offset = offset;
+
+ while (ctx->valid_pages < ctx->total_pages) {
+ bio_add_page(ctx->ra_bio,
+ ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
+ ctx->valid_pages += 1;
+
+ offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
+
+ if (offset == 0) /* reached end of the device */
+ break;
+ }
+
+ return submit_bio_wait(ctx->ra_bio);
+}
+
+/*
+ * try read a page from the read ahead page pool, if the page is not in the
+ * pool, call r5l_recovery_fetch_ra_pool
+ */
+static int r5l_recovery_read_page(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ struct page *page,
+ sector_t offset)
+{
+ int ret;
+
+ if (offset < ctx->pool_offset ||
+ offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
+ ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
+ if (ret)
+ return ret;
+ }
+
+ BUG_ON(offset < ctx->pool_offset ||
+ offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
+
+ memcpy(page_address(page),
+ page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
+ BLOCK_SECTOR_SHIFT]),
+ PAGE_SIZE);
+ return 0;
+}
+
static int r5l_recovery_read_meta_block(struct r5l_log *log,
struct r5l_recovery_ctx *ctx)
{
struct page *page = ctx->meta_page;
struct r5l_meta_block *mb;
u32 crc, stored_crc;
+ int ret;
- if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
- false))
- return -EIO;
+ ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
+ if (ret != 0)
+ return ret;
mb = page_address(page);
stored_crc = le32_to_cpu(mb->checksum);
@@ -1643,8 +1794,7 @@ static void r5l_recovery_load_data(struct r5l_log *log,
raid5_compute_sector(conf,
le64_to_cpu(payload->location), 0,
&dd_idx, sh);
- sync_page_io(log->rdev, log_offset, PAGE_SIZE,
- sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
+ r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
sh->dev[dd_idx].log_checksum =
le32_to_cpu(payload->checksum[0]);
ctx->meta_total_blocks += BLOCK_SECTORS;
@@ -1663,17 +1813,15 @@ static void r5l_recovery_load_parity(struct r5l_log *log,
struct r5conf *conf = mddev->private;
ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
- sync_page_io(log->rdev, log_offset, PAGE_SIZE,
- sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
+ r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
sh->dev[sh->pd_idx].log_checksum =
le32_to_cpu(payload->checksum[0]);
set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
if (sh->qd_idx >= 0) {
- sync_page_io(log->rdev,
- r5l_ring_add(log, log_offset, BLOCK_SECTORS),
- PAGE_SIZE, sh->dev[sh->qd_idx].page,
- REQ_OP_READ, 0, false);
+ r5l_recovery_read_page(
+ log, ctx, sh->dev[sh->qd_idx].page,
+ r5l_ring_add(log, log_offset, BLOCK_SECTORS));
sh->dev[sh->qd_idx].log_checksum =
le32_to_cpu(payload->checksum[1]);
set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
@@ -1804,14 +1952,15 @@ r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
/* if matches return 0; otherwise return -EINVAL */
static int
-r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page,
+r5l_recovery_verify_data_checksum(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ struct page *page,
sector_t log_offset, __le32 log_checksum)
{
void *addr;
u32 checksum;
- sync_page_io(log->rdev, log_offset, PAGE_SIZE,
- page, REQ_OP_READ, 0, false);
+ r5l_recovery_read_page(log, ctx, page, log_offset);
addr = kmap_atomic(page);
checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
kunmap_atomic(addr);
@@ -1833,6 +1982,7 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
struct page *page;
struct r5l_payload_data_parity *payload;
+ struct r5l_payload_flush *payload_flush;
page = alloc_page(GFP_KERNEL);
if (!page)
@@ -1840,33 +1990,42 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
while (mb_offset < le32_to_cpu(mb->meta_size)) {
payload = (void *)mb + mb_offset;
+ payload_flush = (void *)mb + mb_offset;
- if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
if (r5l_recovery_verify_data_checksum(
- log, page, log_offset,
+ log, ctx, page, log_offset,
payload->checksum[0]) < 0)
goto mismatch;
- } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
+ } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
if (r5l_recovery_verify_data_checksum(
- log, page, log_offset,
+ log, ctx, page, log_offset,
payload->checksum[0]) < 0)
goto mismatch;
if (conf->max_degraded == 2 && /* q for RAID 6 */
r5l_recovery_verify_data_checksum(
- log, page,
+ log, ctx, page,
r5l_ring_add(log, log_offset,
BLOCK_SECTORS),
payload->checksum[1]) < 0)
goto mismatch;
- } else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */
+ } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
+ /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
+ } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
goto mismatch;
- log_offset = r5l_ring_add(log, log_offset,
- le32_to_cpu(payload->size));
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
+ mb_offset += sizeof(struct r5l_payload_flush) +
+ le32_to_cpu(payload_flush->size);
+ } else {
+ /* DATA or PARITY payload */
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ }
- mb_offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
}
put_page(page);
@@ -1894,6 +2053,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
struct r5conf *conf = mddev->private;
struct r5l_meta_block *mb;
struct r5l_payload_data_parity *payload;
+ struct r5l_payload_flush *payload_flush;
int mb_offset;
sector_t log_offset;
sector_t stripe_sect;
@@ -1919,7 +2079,31 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
int dd;
payload = (void *)mb + mb_offset;
- stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ?
+ payload_flush = (void *)mb + mb_offset;
+
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
+ int i, count;
+
+ count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
+ for (i = 0; i < count; ++i) {
+ stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
+ sh = r5c_recovery_lookup_stripe(cached_stripe_list,
+ stripe_sect);
+ if (sh) {
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5l_recovery_reset_stripe(sh);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+ }
+
+ mb_offset += sizeof(struct r5l_payload_flush) +
+ le32_to_cpu(payload_flush->size);
+ continue;
+ }
+
+ /* DATA or PARITY payload */
+ stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
raid5_compute_sector(
conf, le64_to_cpu(payload->location), 0, &dd,
NULL)
@@ -1957,7 +2141,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
list_add_tail(&sh->lru, cached_stripe_list);
}
- if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
r5l_recovery_replay_one_stripe(conf, sh, ctx);
@@ -1965,7 +2149,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
}
r5l_recovery_load_data(log, sh, ctx, payload,
log_offset);
- } else if (payload->header.type == R5LOG_PAYLOAD_PARITY)
+ } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
r5l_recovery_load_parity(log, sh, ctx, payload,
log_offset);
else
@@ -2167,7 +2351,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
payload = (void *)mb + offset;
payload->header.type = cpu_to_le16(
R5LOG_PAYLOAD_DATA);
- payload->size = BLOCK_SECTORS;
+ payload->size = cpu_to_le32(BLOCK_SECTORS);
payload->location = cpu_to_le64(
raid5_compute_blocknr(sh, i, 0));
addr = kmap_atomic(dev->page);
@@ -2231,55 +2415,70 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
static int r5l_recovery_log(struct r5l_log *log)
{
struct mddev *mddev = log->rdev->mddev;
- struct r5l_recovery_ctx ctx;
+ struct r5l_recovery_ctx *ctx;
int ret;
sector_t pos;
- ctx.pos = log->last_checkpoint;
- ctx.seq = log->last_cp_seq;
- ctx.meta_page = alloc_page(GFP_KERNEL);
- ctx.data_only_stripes = 0;
- ctx.data_parity_stripes = 0;
- INIT_LIST_HEAD(&ctx.cached_list);
-
- if (!ctx.meta_page)
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
return -ENOMEM;
- ret = r5c_recovery_flush_log(log, &ctx);
- __free_page(ctx.meta_page);
+ ctx->pos = log->last_checkpoint;
+ ctx->seq = log->last_cp_seq;
+ INIT_LIST_HEAD(&ctx->cached_list);
+ ctx->meta_page = alloc_page(GFP_KERNEL);
- if (ret)
- return ret;
+ if (!ctx->meta_page) {
+ ret = -ENOMEM;
+ goto meta_page;
+ }
- pos = ctx.pos;
- ctx.seq += 10000;
+ if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
+ ret = -ENOMEM;
+ goto ra_pool;
+ }
+ ret = r5c_recovery_flush_log(log, ctx);
- if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
+ if (ret)
+ goto error;
+
+ pos = ctx->pos;
+ ctx->seq += 10000;
+
+ if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
pr_debug("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
else
pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
- mdname(mddev), ctx.data_only_stripes,
- ctx.data_parity_stripes);
-
- if (ctx.data_only_stripes == 0) {
- log->next_checkpoint = ctx.pos;
- r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
- ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ mdname(mddev), ctx->data_only_stripes,
+ ctx->data_parity_stripes);
+
+ if (ctx->data_only_stripes == 0) {
+ log->next_checkpoint = ctx->pos;
+ r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
+ ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+ } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
mdname(mddev));
- return -EIO;
+ ret = -EIO;
+ goto error;
}
- log->log_start = ctx.pos;
- log->seq = ctx.seq;
+ log->log_start = ctx->pos;
+ log->seq = ctx->seq;
log->last_checkpoint = pos;
r5l_write_super(log, pos);
- r5c_recovery_flush_data_only_stripes(log, &ctx);
- return 0;
+ r5c_recovery_flush_data_only_stripes(log, ctx);
+ ret = 0;
+error:
+ r5l_recovery_free_ra_pool(log, ctx);
+ra_pool:
+ __free_page(ctx->meta_page);
+meta_page:
+ kfree(ctx);
+ return ret;
}
static void r5l_write_super(struct r5l_log *log, sector_t cp)
@@ -2624,11 +2823,11 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
atomic_dec(&conf->r5c_flushing_full_stripes);
atomic_dec(&conf->r5c_cached_full_stripes);
}
+
+ r5l_append_flush_payload(log, sh->sector);
}
-int
-r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
- struct stripe_head_state *s)
+int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
int pages = 0;
@@ -2791,6 +2990,10 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
struct request_queue *q = bdev_get_queue(rdev->bdev);
struct r5l_log *log;
+ char b[BDEVNAME_SIZE];
+
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(conf->mddev), bdevname(rdev->bdev, b));
if (PAGE_SIZE != 4096)
return -EINVAL;
@@ -2893,8 +3096,13 @@ io_kc:
return -EINVAL;
}
-void r5l_exit_log(struct r5l_log *log)
+void r5l_exit_log(struct r5conf *conf)
{
+ struct r5l_log *log = conf->log;
+
+ conf->log = NULL;
+ synchronize_rcu();
+
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
mempool_destroy(log->meta_pool);