diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 00:36:56 -0600 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 00:36:56 -0600 |
commit | cf9b59e9d3e008591d1f54830f570982bb307a0d (patch) | |
tree | 113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/md/raid5.c | |
parent | 44504b2bebf8b5823c59484e73096a7d6574471d (diff) | |
parent | f4b87dee923342505e1ddba8d34ce9de33e75050 (diff) |
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and
build failures in vio.c after merge.
Conflicts:
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-mpc.c
drivers/net/gianfar.c
Also fixed up one line in arch/powerpc/kernel/vio.c to use the
correct node pointer.
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 285 |
1 files changed, 152 insertions, 133 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e3e9a36ea3b7..9ea17d6c799b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -53,6 +53,7 @@ #include <linux/slab.h> #include "md.h" #include "raid5.h" +#include "raid0.h" #include "bitmap.h" /* @@ -1509,7 +1510,7 @@ static void raid5_end_read_request(struct bio * bi, int error) set_bit(R5_UPTODATE, &sh->dev[i].flags); if (test_bit(R5_ReadError, &sh->dev[i].flags)) { rdev = conf->disks[i].rdev; - printk_rl(KERN_INFO "raid5:%s: read error corrected" + printk_rl(KERN_INFO "md/raid:%s: read error corrected" " (%lu sectors at %llu on %s)\n", mdname(conf->mddev), STRIPE_SECTORS, (unsigned long long)(sh->sector @@ -1527,9 +1528,9 @@ static void raid5_end_read_request(struct bio * bi, int error) clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&rdev->read_errors); - if (conf->mddev->degraded) + if (conf->mddev->degraded >= conf->max_degraded) printk_rl(KERN_WARNING - "raid5:%s: read error not correctable " + "md/raid:%s: read error not correctable " "(sector %llu on %s).\n", mdname(conf->mddev), (unsigned long long)(sh->sector @@ -1538,7 +1539,7 @@ static void raid5_end_read_request(struct bio * bi, int error) else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) /* Oh, no!!! */ printk_rl(KERN_WARNING - "raid5:%s: read error NOT corrected!! " + "md/raid:%s: read error NOT corrected!! " "(sector %llu on %s).\n", mdname(conf->mddev), (unsigned long long)(sh->sector @@ -1547,7 +1548,7 @@ static void raid5_end_read_request(struct bio * bi, int error) else if (atomic_read(&rdev->read_errors) > conf->max_nr_stripes) printk(KERN_WARNING - "raid5:%s: Too many read errors, failing device %s.\n", + "md/raid:%s: Too many read errors, failing device %s.\n", mdname(conf->mddev), bdn); else retry = 1; @@ -1619,8 +1620,8 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous) static void error(mddev_t *mddev, mdk_rdev_t *rdev) { char b[BDEVNAME_SIZE]; - raid5_conf_t *conf = (raid5_conf_t *) mddev->private; - pr_debug("raid5: error called\n"); + raid5_conf_t *conf = mddev->private; + pr_debug("raid456: error called\n"); if (!test_bit(Faulty, &rdev->flags)) { set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -1636,9 +1637,13 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) } set_bit(Faulty, &rdev->flags); printk(KERN_ALERT - "raid5: Disk failure on %s, disabling device.\n" - "raid5: Operation continuing on %d devices.\n", - bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); + "md/raid:%s: Disk failure on %s, disabling device.\n" + KERN_ALERT + "md/raid:%s: Operation continuing on %d devices.\n", + mdname(mddev), + bdevname(rdev->bdev, b), + mdname(mddev), + conf->raid_disks - mddev->degraded); } } @@ -1650,8 +1655,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, int previous, int *dd_idx, struct stripe_head *sh) { - long stripe; - unsigned long chunk_number; + sector_t stripe, stripe2; + sector_t chunk_number; unsigned int chunk_offset; int pd_idx, qd_idx; int ddf_layout = 0; @@ -1671,18 +1676,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, */ chunk_offset = sector_div(r_sector, sectors_per_chunk); chunk_number = r_sector; - BUG_ON(r_sector != chunk_number); /* * Compute the stripe number */ - stripe = chunk_number / data_disks; - - /* - * Compute the data disk and parity disk indexes inside the stripe - */ - *dd_idx = chunk_number % data_disks; - + stripe = chunk_number; + *dd_idx = sector_div(stripe, data_disks); + stripe2 = stripe; /* * Select the parity disk based on the user selected algorithm. */ @@ -1694,21 +1694,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case 5: switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_PARITY_0: @@ -1719,8 +1719,6 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, pd_idx = data_disks; break; default: - printk(KERN_ERR "raid5: unsupported algorithm %d\n", - algorithm); BUG(); } break; @@ -1728,7 +1726,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1737,7 +1735,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1746,12 +1744,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; @@ -1770,7 +1768,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, /* Exactly the same as RIGHT_ASYMMETRIC, but or * of blocks for computing Q is different. */ - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1785,7 +1783,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, * D D D P Q rather than * Q D D D P */ - pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); + stripe2 += 1; + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1797,7 +1796,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case ALGORITHM_ROTATING_N_CONTINUE: /* Same as left_symmetric but Q is before P */ - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + raid_disks - 1) % raid_disks; *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; ddf_layout = 1; @@ -1805,27 +1804,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case ALGORITHM_LEFT_ASYMMETRIC_6: /* RAID5 left_asymmetric, with Q on last device */ - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_ASYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_LEFT_SYMMETRIC_6: - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_SYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; @@ -1836,10 +1835,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, qd_idx = raid_disks - 1; break; - default: - printk(KERN_CRIT "raid6: unsupported algorithm %d\n", - algorithm); BUG(); } break; @@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) : conf->algorithm; sector_t stripe; int chunk_offset; - int chunk_number, dummy1, dd_idx = i; + sector_t chunk_number; + int dummy1, dd_idx = i; sector_t r_sector; struct stripe_head sh2; chunk_offset = sector_div(new_sector, sectors_per_chunk); stripe = new_sector; - BUG_ON(new_sector != stripe); if (i == sh->pd_idx) return 0; @@ -1902,8 +1898,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) case ALGORITHM_PARITY_N: break; default: - printk(KERN_ERR "raid5: unsupported algorithm %d\n", - algorithm); BUG(); } break; @@ -1962,21 +1956,20 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) i -= 1; break; default: - printk(KERN_CRIT "raid6: unsupported algorithm %d\n", - algorithm); BUG(); } break; } chunk_number = stripe * data_disks + i; - r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; + r_sector = chunk_number * sectors_per_chunk + chunk_offset; check = raid5_compute_sector(conf, r_sector, previous, &dummy1, &sh2); if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx || sh2.qd_idx != sh->qd_idx) { - printk(KERN_ERR "compute_blocknr: map not correct\n"); + printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", + mdname(conf->mddev)); return 0; } return r_sector; @@ -3713,10 +3706,10 @@ static void raid5_align_endio(struct bio *bi, int error) bio_put(bi); - mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; - conf = mddev->private; rdev = (void*)raid_bi->bi_next; raid_bi->bi_next = NULL; + mddev = rdev->mddev; + conf = mddev->private; rdev_dec_pending(rdev, conf->mddev); @@ -3753,9 +3746,8 @@ static int bio_fits_rdev(struct bio *bi) } -static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) +static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) { - mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev->private; int dd_idx; struct bio* align_bi; @@ -3870,16 +3862,15 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) return sh; } -static int make_request(struct request_queue *q, struct bio * bi) +static int make_request(mddev_t *mddev, struct bio * bi) { - mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev->private; int dd_idx; sector_t new_sector; sector_t logical_sector, last_sector; struct stripe_head *sh; const int rw = bio_data_dir(bi); - int cpu, remaining; + int remaining; if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { /* Drain all pending writes. We only really need @@ -3894,15 +3885,9 @@ static int make_request(struct request_queue *q, struct bio * bi) md_write_start(mddev, bi); - cpu = part_stat_lock(); - part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); - part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], - bio_sectors(bi)); - part_stat_unlock(); - if (rw == READ && mddev->reshape_position == MaxSector && - chunk_aligned_read(q,bi)) + chunk_aligned_read(mddev,bi)) return 0; logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); @@ -3950,7 +3935,7 @@ static int make_request(struct request_queue *q, struct bio * bi) new_sector = raid5_compute_sector(conf, logical_sector, previous, &dd_idx, NULL); - pr_debug("raid5: make_request, sector %llu logical %llu\n", + pr_debug("raid456: make_request, sector %llu logical %llu\n", (unsigned long long)new_sector, (unsigned long long)logical_sector); @@ -4058,7 +4043,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped * As the reads complete, handle_stripe will copy the data * into the destination stripe and release that stripe. */ - raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + raid5_conf_t *conf = mddev->private; struct stripe_head *sh; sector_t first_sector, last_sector; int raid_disks = conf->previous_raid_disks; @@ -4267,7 +4252,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped /* FIXME go_faster isn't used */ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) { - raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + raid5_conf_t *conf = mddev->private; struct stripe_head *sh; sector_t max_sector = mddev->dev_sectors; int sync_blocks; @@ -4729,7 +4714,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) if (mddev->new_level != 5 && mddev->new_level != 4 && mddev->new_level != 6) { - printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", + printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", mdname(mddev), mddev->new_level); return ERR_PTR(-EIO); } @@ -4737,12 +4722,12 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) && !algorithm_valid_raid5(mddev->new_layout)) || (mddev->new_level == 6 && !algorithm_valid_raid6(mddev->new_layout))) { - printk(KERN_ERR "raid5: %s: layout %d not supported\n", + printk(KERN_ERR "md/raid:%s: layout %d not supported\n", mdname(mddev), mddev->new_layout); return ERR_PTR(-EIO); } if (mddev->new_level == 6 && mddev->raid_disks < 4) { - printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", + printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", mdname(mddev), mddev->raid_disks); return ERR_PTR(-EINVAL); } @@ -4750,8 +4735,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) if (!mddev->new_chunk_sectors || (mddev->new_chunk_sectors << 9) % PAGE_SIZE || !is_power_of_2(mddev->new_chunk_sectors)) { - printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", - mddev->new_chunk_sectors << 9, mdname(mddev)); + printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", + mdname(mddev), mddev->new_chunk_sectors << 9); return ERR_PTR(-EINVAL); } @@ -4793,7 +4778,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) if (raid5_alloc_percpu(conf) != 0) goto abort; - pr_debug("raid5: run(%s) called.\n", mdname(mddev)); + pr_debug("raid456: run(%s) called.\n", mdname(mddev)); list_for_each_entry(rdev, &mddev->disks, same_set) { raid_disk = rdev->raid_disk; @@ -4806,9 +4791,9 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) if (test_bit(In_sync, &rdev->flags)) { char b[BDEVNAME_SIZE]; - printk(KERN_INFO "raid5: device %s operational as raid" - " disk %d\n", bdevname(rdev->bdev,b), - raid_disk); + printk(KERN_INFO "md/raid:%s: device %s operational as raid" + " disk %d\n", + mdname(mddev), bdevname(rdev->bdev, b), raid_disk); } else /* Cannot rely on bitmap to complete recovery */ conf->fullsync = 1; @@ -4832,16 +4817,17 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; if (grow_stripes(conf, conf->max_nr_stripes)) { printk(KERN_ERR - "raid5: couldn't allocate %dkB for buffers\n", memory); + "md/raid:%s: couldn't allocate %dkB for buffers\n", + mdname(mddev), memory); goto abort; } else - printk(KERN_INFO "raid5: allocated %dkB for %s\n", - memory, mdname(mddev)); + printk(KERN_INFO "md/raid:%s: allocated %dkB\n", + mdname(mddev), memory); conf->thread = md_register_thread(raid5d, mddev, NULL); if (!conf->thread) { printk(KERN_ERR - "raid5: couldn't allocate thread for %s\n", + "md/raid:%s: couldn't allocate thread.\n", mdname(mddev)); goto abort; } @@ -4892,7 +4878,7 @@ static int run(mddev_t *mddev) sector_t reshape_offset = 0; if (mddev->recovery_cp != MaxSector) - printk(KERN_NOTICE "raid5: %s is not clean" + printk(KERN_NOTICE "md/raid:%s: not clean" " -- starting background reconstruction\n", mdname(mddev)); if (mddev->reshape_position != MaxSector) { @@ -4906,7 +4892,7 @@ static int run(mddev_t *mddev) int max_degraded = (mddev->level == 6 ? 2 : 1); if (mddev->new_level != mddev->level) { - printk(KERN_ERR "raid5: %s: unsupported reshape " + printk(KERN_ERR "md/raid:%s: unsupported reshape " "required - aborting.\n", mdname(mddev)); return -EINVAL; @@ -4919,8 +4905,8 @@ static int run(mddev_t *mddev) here_new = mddev->reshape_position; if (sector_div(here_new, mddev->new_chunk_sectors * (mddev->raid_disks - max_degraded))) { - printk(KERN_ERR "raid5: reshape_position not " - "on a stripe boundary\n"); + printk(KERN_ERR "md/raid:%s: reshape_position not " + "on a stripe boundary\n", mdname(mddev)); return -EINVAL; } reshape_offset = here_new * mddev->new_chunk_sectors; @@ -4941,8 +4927,9 @@ static int run(mddev_t *mddev) if ((here_new * mddev->new_chunk_sectors != here_old * mddev->chunk_sectors) || mddev->ro == 0) { - printk(KERN_ERR "raid5: in-place reshape must be started" - " in read-only mode - aborting\n"); + printk(KERN_ERR "md/raid:%s: in-place reshape must be started" + " in read-only mode - aborting\n", + mdname(mddev)); return -EINVAL; } } else if (mddev->delta_disks < 0 @@ -4951,11 +4938,13 @@ static int run(mddev_t *mddev) : (here_new * mddev->new_chunk_sectors >= here_old * mddev->chunk_sectors)) { /* Reading from the same stripe as writing to - bad */ - printk(KERN_ERR "raid5: reshape_position too early for " - "auto-recovery - aborting.\n"); + printk(KERN_ERR "md/raid:%s: reshape_position too early for " + "auto-recovery - aborting.\n", + mdname(mddev)); return -EINVAL; } - printk(KERN_INFO "raid5: reshape will continue\n"); + printk(KERN_INFO "md/raid:%s: reshape will continue\n", + mdname(mddev)); /* OK, we should be able to continue; */ } else { BUG_ON(mddev->level != mddev->new_level); @@ -4997,18 +4986,6 @@ static int run(mddev_t *mddev) mddev->minor_version > 90) rdev->recovery_offset = reshape_offset; - printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n", - rdev->raid_disk, working_disks, conf->prev_algo, - conf->previous_raid_disks, conf->max_degraded, - conf->algorithm, conf->raid_disks, - only_parity(rdev->raid_disk, - conf->prev_algo, - conf->previous_raid_disks, - conf->max_degraded), - only_parity(rdev->raid_disk, - conf->algorithm, - conf->raid_disks, - conf->max_degraded)); if (rdev->recovery_offset < reshape_offset) { /* We need to check old and new layout */ if (!only_parity(rdev->raid_disk, @@ -5029,7 +5006,7 @@ static int run(mddev_t *mddev) - working_disks); if (mddev->degraded > conf->max_degraded) { - printk(KERN_ERR "raid5: not enough operational devices for %s" + printk(KERN_ERR "md/raid:%s: not enough operational devices" " (%d/%d failed)\n", mdname(mddev), mddev->degraded, conf->raid_disks); goto abort; @@ -5043,32 +5020,32 @@ static int run(mddev_t *mddev) mddev->recovery_cp != MaxSector) { if (mddev->ok_start_degraded) printk(KERN_WARNING - "raid5: starting dirty degraded array: %s" - "- data corruption possible.\n", + "md/raid:%s: starting dirty degraded array" + " - data corruption possible.\n", mdname(mddev)); else { printk(KERN_ERR - "raid5: cannot start dirty degraded array for %s\n", + "md/raid:%s: cannot start dirty degraded array.\n", mdname(mddev)); goto abort; } } if (mddev->degraded == 0) - printk("raid5: raid level %d set %s active with %d out of %d" - " devices, algorithm %d\n", conf->level, mdname(mddev), + printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" + " devices, algorithm %d\n", mdname(mddev), conf->level, mddev->raid_disks-mddev->degraded, mddev->raid_disks, mddev->new_layout); else - printk(KERN_ALERT "raid5: raid level %d set %s active with %d" - " out of %d devices, algorithm %d\n", conf->level, - mdname(mddev), mddev->raid_disks - mddev->degraded, - mddev->raid_disks, mddev->new_layout); + printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" + " out of %d devices, algorithm %d\n", + mdname(mddev), conf->level, + mddev->raid_disks - mddev->degraded, + mddev->raid_disks, mddev->new_layout); print_raid5_conf(conf); if (conf->reshape_progress != MaxSector) { - printk("...ok start reshape thread\n"); conf->reshape_safe = conf->reshape_progress; atomic_set(&conf->reshape_stripes, 0); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); @@ -5091,9 +5068,11 @@ static int run(mddev_t *mddev) } /* Ok, everything is just fine now */ - if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) + if (mddev->to_remove == &raid5_attrs_group) + mddev->to_remove = NULL; + else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) printk(KERN_WARNING - "raid5: failed to create sysfs attributes for %s\n", + "md/raid:%s: failed to create sysfs attributes.\n", mdname(mddev)); mddev->queue->queue_lock = &conf->device_lock; @@ -5123,22 +5102,21 @@ abort: free_conf(conf); } mddev->private = NULL; - printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); + printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); return -EIO; } - - static int stop(mddev_t *mddev) { - raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + raid5_conf_t *conf = mddev->private; md_unregister_thread(mddev->thread); mddev->thread = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ free_conf(conf); - mddev->private = &raid5_attrs_group; + mddev->private = NULL; + mddev->to_remove = &raid5_attrs_group; return 0; } @@ -5179,7 +5157,7 @@ static void printall(struct seq_file *seq, raid5_conf_t *conf) static void status(struct seq_file *seq, mddev_t *mddev) { - raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + raid5_conf_t *conf = mddev->private; int i; seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, @@ -5201,21 +5179,22 @@ static void print_raid5_conf (raid5_conf_t *conf) int i; struct disk_info *tmp; - printk("RAID5 conf printout:\n"); + printk(KERN_DEBUG "RAID conf printout:\n"); if (!conf) { printk("(conf==NULL)\n"); return; } - printk(" --- rd:%d wd:%d\n", conf->raid_disks, - conf->raid_disks - conf->mddev->degraded); + printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, + conf->raid_disks, + conf->raid_disks - conf->mddev->degraded); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; tmp = conf->disks + i; if (tmp->rdev) - printk(" disk %d, o:%d, dev:%s\n", - i, !test_bit(Faulty, &tmp->rdev->flags), - bdevname(tmp->rdev->bdev,b)); + printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", + i, !test_bit(Faulty, &tmp->rdev->flags), + bdevname(tmp->rdev->bdev, b)); } } @@ -5338,7 +5317,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) raid5_size(mddev, sectors, mddev->raid_disks)) return -EINVAL; set_capacity(mddev->gendisk, mddev->array_sectors); - mddev->changed = 1; revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { mddev->recovery_cp = mddev->dev_sectors; @@ -5364,7 +5342,8 @@ static int check_stripe_cache(mddev_t *mddev) > conf->max_nr_stripes || ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { - printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", + printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", + mdname(mddev), ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) / STRIPE_SIZE)*4); return 0; @@ -5435,7 +5414,7 @@ static int raid5_start_reshape(mddev_t *mddev) */ if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) < mddev->array_sectors) { - printk(KERN_ERR "md: %s: array size must be reduced " + printk(KERN_ERR "md/raid:%s: array size must be reduced " "before number of disks\n", mdname(mddev)); return -EINVAL; } @@ -5473,9 +5452,9 @@ static int raid5_start_reshape(mddev_t *mddev) if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) printk(KERN_WARNING - "raid5: failed to create " - " link %s for %s\n", - nm, mdname(mddev)); + "md/raid:%s: failed to create " + " link %s\n", + mdname(mddev), nm); } else break; } @@ -5552,7 +5531,6 @@ static void raid5_finish_reshape(mddev_t *mddev) if (mddev->delta_disks > 0) { md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); - mddev->changed = 1; revalidate_disk(mddev->gendisk); } else { int d; @@ -5617,6 +5595,29 @@ static void raid5_quiesce(mddev_t *mddev, int state) } +static void *raid45_takeover_raid0(mddev_t *mddev, int level) +{ + struct raid0_private_data *raid0_priv = mddev->private; + + /* for raid0 takeover only one zone is supported */ + if (raid0_priv->nr_strip_zones > 1) { + printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", + mdname(mddev)); + return ERR_PTR(-EINVAL); + } + + mddev->new_level = level; + mddev->new_layout = ALGORITHM_PARITY_N; + mddev->new_chunk_sectors = mddev->chunk_sectors; + mddev->raid_disks += 1; + mddev->delta_disks = 1; + /* make sure it will be not marked as dirty */ + mddev->recovery_cp = MaxSector; + + return setup_conf(mddev); +} + + static void *raid5_takeover_raid1(mddev_t *mddev) { int chunksect; @@ -5741,12 +5742,13 @@ static int raid6_check_reshape(mddev_t *mddev) static void *raid5_takeover(mddev_t *mddev) { /* raid5 can take over: - * raid0 - if all devices are the same - make it a raid4 layout + * raid0 - if there is only one strip zone - make it a raid4 layout * raid1 - if there are two drives. We need to know the chunk size * raid4 - trivial - just use a raid4 layout. * raid6 - Providing it is a *_6 layout */ - + if (mddev->level == 0) + return raid45_takeover_raid0(mddev, 5); if (mddev->level == 1) return raid5_takeover_raid1(mddev); if (mddev->level == 4) { @@ -5760,6 +5762,22 @@ static void *raid5_takeover(mddev_t *mddev) return ERR_PTR(-EINVAL); } +static void *raid4_takeover(mddev_t *mddev) +{ + /* raid4 can take over: + * raid0 - if there is only one strip zone + * raid5 - if layout is right + */ + if (mddev->level == 0) + return raid45_takeover_raid0(mddev, 4); + if (mddev->level == 5 && + mddev->layout == ALGORITHM_PARITY_N) { + mddev->new_layout = 0; + mddev->new_level = 4; + return setup_conf(mddev); + } + return ERR_PTR(-EINVAL); +} static struct mdk_personality raid5_personality; @@ -5875,6 +5893,7 @@ static struct mdk_personality raid4_personality = .start_reshape = raid5_start_reshape, .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, + .takeover = raid4_takeover, }; static int __init raid5_init(void) |