From 9ed38ff53090856769b99e9d2f19740fb11e6956 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Fri, 14 Aug 2015 12:19:40 -0500 Subject: md-cluster: complete all write requests before adding suspend_info process_suspend_info - which handles the RESYNCING request - must not reply until all writes which were initiated before the request arrived, have completed. As a by-product, all process_* functions now take mddev as their first arguement making it uniform. Signed-off-by: Goldwyn Rodrigues Signed-off-by: NeilBrown --- drivers/md/md-cluster.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 11e3bc9d2a4b..4a965f22be20 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -374,9 +374,10 @@ static void remove_suspend_info(struct md_cluster_info *cinfo, int slot) } -static void process_suspend_info(struct md_cluster_info *cinfo, +static void process_suspend_info(struct mddev *mddev, int slot, sector_t lo, sector_t hi) { + struct md_cluster_info *cinfo = mddev->cluster_info; struct suspend_info *s; if (!hi) { @@ -389,6 +390,8 @@ static void process_suspend_info(struct md_cluster_info *cinfo, s->slot = slot; s->lo = lo; s->hi = hi; + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); spin_lock_irq(&cinfo->suspend_lock); /* Remove existing entry (if exists) before adding */ __remove_suspend_info(cinfo, slot); @@ -457,7 +460,7 @@ static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) case RESYNCING: pr_info("%s: %d Received message: RESYNCING from %d\n", __func__, __LINE__, msg->slot); - process_suspend_info(mddev->cluster_info, msg->slot, + process_suspend_info(mddev, msg->slot, msg->low, msg->high); break; case NEWDISK: -- cgit v1.2.3 From 3c462c880b52aae2cfbbb8db8b401eef118cc128 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Wed, 19 Aug 2015 07:35:54 +1000 Subject: md: Increment version for clustered bitmaps Add BITMAP_MAJOR_CLUSTERED as 5, in order to prevent older kernels to assemble a clustered device. In order to maximize compatibility, the major version is set to BITMAP_MAJOR_CLUSTERED *only* if the bitmap is clustered. Added MD_FEATURE_CLUSTERED in order to return error for older kernels which would assemble MD even if the bitmap is corrupted. Signed-off-by: Goldwyn Rodrigues Signed-off-by: NeilBrown --- drivers/md/bitmap.c | 10 ++++------ drivers/md/bitmap.h | 2 ++ drivers/md/md.c | 3 +++ include/uapi/linux/raid/md_p.h | 2 ++ 4 files changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 48b5890c28e3..e9d3ee703e6d 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -613,12 +613,10 @@ re_read: daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); - /* XXX: This is a hack to ensure that we don't use clustering - * in case: - * - dm-raid is in use and - * - the nodes written in bitmap_sb is erroneous. + /* Setup nodes/clustername only if bitmap version is + * cluster-compatible */ - if (!bitmap->mddev->sync_super) { + if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { nodes = le32_to_cpu(sb->nodes); strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); @@ -628,7 +626,7 @@ re_read: if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) reason = "bad magic"; else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || - le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) + le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED) reason = "unrecognized superblock version"; else if (chunksize < 512) reason = "bitmap chunksize too small"; diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index f1f4dd01090d..8731fa06855f 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -9,8 +9,10 @@ #define BITMAP_MAJOR_LO 3 /* version 4 insists the bitmap is in little-endian order * with version 3, it is host-endian which is non-portable + * Version 5 is currently set only for clustered devices */ #define BITMAP_MAJOR_HI 4 +#define BITMAP_MAJOR_CLUSTERED 5 #define BITMAP_MAJOR_HOSTENDIAN 3 /* diff --git a/drivers/md/md.c b/drivers/md/md.c index c702de18207a..1e1bdd86f40c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1735,6 +1735,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) } } + if (mddev_is_clustered(mddev)) + sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); + if (rdev->badblocks.count == 0) /* Nothing to do for bad blocks*/ ; else if (sb->bblog_offset == 0) diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index 2ae6131e69a5..867ee874fa80 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -302,6 +302,7 @@ struct mdp_superblock_1 { #define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening * is guided by bitmap. */ +#define MD_FEATURE_CLUSTERED 256 /* clustered MD */ #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ |MD_FEATURE_RECOVERY_OFFSET \ |MD_FEATURE_RESHAPE_ACTIVE \ @@ -310,6 +311,7 @@ struct mdp_superblock_1 { |MD_FEATURE_RESHAPE_BACKWARDS \ |MD_FEATURE_NEW_OFFSET \ |MD_FEATURE_RECOVERY_BITMAP \ + |MD_FEATURE_CLUSTERED \ ) #endif -- cgit v1.2.3 From c40f341f1e7fd4eddcfc5881d94cfa8669071ee6 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Wed, 19 Aug 2015 08:14:42 +1000 Subject: md-cluster: Use a small window for resync Suspending the entire device for resync could take too long. Resync in small chunks. cluster's resync window (32M) is maintained in r1conf as cluster_sync_low and cluster_sync_high and processed in raid1's sync_request(). If the current resync is outside the cluster resync window: 1. Set the cluster_sync_low to curr_resync_completed. 2. Check if the sync will fit in the new window, if not issue a wait_barrier() and set cluster_sync_low to sector_nr. 3. Set cluster_sync_high to cluster_sync_low + resync_window. 4. Send a message to all nodes so they may add it in their suspension list. bitmap_cond_end_sync is modified to allow to force a sync inorder to get the curr_resync_completed uptodate with the sector passed. Signed-off-by: Goldwyn Rodrigues Signed-off-by: NeilBrown --- drivers/md/bitmap.c | 4 ++-- drivers/md/bitmap.h | 2 +- drivers/md/md-cluster.c | 41 +++++------------------------------------ drivers/md/md-cluster.h | 4 +--- drivers/md/md.c | 8 -------- drivers/md/raid1.c | 26 +++++++++++++++++++++++++- drivers/md/raid1.h | 7 +++++++ drivers/md/raid10.c | 2 +- drivers/md/raid5.c | 2 +- 9 files changed, 43 insertions(+), 53 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e9d3ee703e6d..4f22e919787a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1570,7 +1570,7 @@ void bitmap_close_sync(struct bitmap *bitmap) } EXPORT_SYMBOL(bitmap_close_sync); -void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) +void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) { sector_t s = 0; sector_t blocks; @@ -1581,7 +1581,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) bitmap->last_end_sync = jiffies; return; } - if (time_before(jiffies, (bitmap->last_end_sync + if (!force && time_before(jiffies, (bitmap->last_end_sync + bitmap->mddev->bitmap_info.daemon_sleep))) return; wait_event(bitmap->mddev->recovery_wait, diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index 8731fa06855f..7d5c3a610ca5 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -257,7 +257,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded); void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); void bitmap_close_sync(struct bitmap *bitmap); -void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); +void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force); void bitmap_unplug(struct bitmap *bitmap); void bitmap_daemon_work(struct mddev *mddev); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 4a965f22be20..b94a2e68ef43 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -802,15 +802,6 @@ static int slot_number(struct mddev *mddev) return cinfo->slot_number - 1; } -static void resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) -{ - struct md_cluster_info *cinfo = mddev->cluster_info; - - add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); - /* Re-acquire the lock to refresh LVB */ - dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); -} - static int metadata_update_start(struct mddev *mddev) { return lock_comm(mddev->cluster_info); @@ -836,45 +827,25 @@ static int metadata_update_cancel(struct mddev *mddev) return dlm_unlock_sync(cinfo->token_lockres); } -static int resync_send(struct mddev *mddev, enum msg_type type, - sector_t lo, sector_t hi) +static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; int slot = cinfo->slot_number - 1; + add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); + /* Re-acquire the lock to refresh LVB */ + dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__, (unsigned long long)lo, (unsigned long long)hi); - resync_info_update(mddev, lo, hi); - cmsg.type = cpu_to_le32(type); + cmsg.type = cpu_to_le32(RESYNCING); cmsg.slot = cpu_to_le32(slot); cmsg.low = cpu_to_le64(lo); cmsg.high = cpu_to_le64(hi); return sendmsg(cinfo, &cmsg); } -static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi) -{ - pr_info("%s:%d\n", __func__, __LINE__); - return resync_send(mddev, RESYNCING, lo, hi); -} - -static void resync_finish(struct mddev *mddev) -{ - struct md_cluster_info *cinfo = mddev->cluster_info; - struct cluster_msg cmsg; - int slot = cinfo->slot_number - 1; - - pr_info("%s:%d\n", __func__, __LINE__); - resync_send(mddev, RESYNCING, 0, 0); - if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { - cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC); - cmsg.slot = cpu_to_le32(slot); - sendmsg(cinfo, &cmsg); - } -} - static int area_resyncing(struct mddev *mddev, int direction, sector_t lo, sector_t hi) { @@ -997,8 +968,6 @@ static struct md_cluster_operations cluster_ops = { .leave = leave, .slot_number = slot_number, .resync_info_update = resync_info_update, - .resync_start = resync_start, - .resync_finish = resync_finish, .metadata_update_start = metadata_update_start, .metadata_update_finish = metadata_update_finish, .metadata_update_cancel = metadata_update_cancel, diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index 00defe2badbc..f5bdc0c86eaa 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h @@ -12,9 +12,7 @@ struct md_cluster_operations { int (*join)(struct mddev *mddev, int nodes); int (*leave)(struct mddev *mddev); int (*slot_number)(struct mddev *mddev); - void (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); - int (*resync_start)(struct mddev *mddev, sector_t lo, sector_t hi); - void (*resync_finish)(struct mddev *mddev); + int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_cancel)(struct mddev *mddev); diff --git a/drivers/md/md.c b/drivers/md/md.c index 1e1bdd86f40c..9798a9921a38 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7805,9 +7805,6 @@ void md_do_sync(struct md_thread *thread) md_new_event(mddev); update_time = jiffies; - if (mddev_is_clustered(mddev)) - md_cluster_ops->resync_start(mddev, j, max_sectors); - blk_start_plug(&plug); while (j < max_sectors) { sector_t sectors; @@ -7871,8 +7868,6 @@ void md_do_sync(struct md_thread *thread) j = max_sectors; if (j > 2) mddev->curr_resync = j; - if (mddev_is_clustered(mddev)) - md_cluster_ops->resync_info_update(mddev, j, max_sectors); mddev->curr_mark_cnt = io_sectors; if (last_check == 0) /* this is the earliest that rebuild will be @@ -7979,9 +7974,6 @@ void md_do_sync(struct md_thread *thread) } } skip: - if (mddev_is_clustered(mddev)) - md_cluster_ops->resync_finish(mddev); - set_bit(MD_CHANGE_DEVS, &mddev->flags); spin_lock(&mddev->lock); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 049df6c4a8cc..1dd13bb52940 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -90,6 +90,8 @@ static void r1bio_pool_free(void *r1_bio, void *data) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) +#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) +#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) @@ -2488,6 +2490,13 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp bitmap_close_sync(mddev->bitmap); close_sync(conf); + + if (mddev_is_clustered(mddev)) { + conf->cluster_sync_low = 0; + conf->cluster_sync_high = 0; + /* Send zeros to mark end of resync */ + md_cluster_ops->resync_info_update(mddev, 0, 0); + } return 0; } @@ -2508,7 +2517,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp return sync_blocks; } - bitmap_cond_end_sync(mddev->bitmap, sector_nr); + /* we are incrementing sector_nr below. To be safe, we check against + * sector_nr + two times RESYNC_SECTORS + */ + + bitmap_cond_end_sync(mddev->bitmap, sector_nr, + mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); raise_barrier(conf, sector_nr); @@ -2699,6 +2713,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp bio_full: r1_bio->sectors = nr_sectors; + if (mddev_is_clustered(mddev) && + conf->cluster_sync_high < sector_nr + nr_sectors) { + conf->cluster_sync_low = mddev->curr_resync_completed; + conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; + /* Send resync message */ + md_cluster_ops->resync_info_update(mddev, + conf->cluster_sync_low, + conf->cluster_sync_high); + } + /* For a user-requested sync, we read all readable devices and do a * compare */ diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index c52d7139c5d7..61c39b390cd8 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -111,6 +111,13 @@ struct r1conf { * the new thread here until we fully activate the array. */ struct md_thread *thread; + + /* Keep track of cluster resync window to send to other + * nodes. + */ + sector_t cluster_sync_low; + sector_t cluster_sync_high; + }; /* diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7c99a4037715..5f30b7526c1f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3137,7 +3137,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, /* resync. Schedule a read for every block at this virt offset */ int count = 0; - bitmap_cond_end_sync(mddev->bitmap, sector_nr); + bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, mddev->degraded) && diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 49bb8d3ff9be..5b79770c4f08 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5613,7 +5613,7 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ } - bitmap_cond_end_sync(mddev->bitmap, sector_nr); + bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); sh = get_active_stripe(conf, sector_nr, 0, 1, 0); if (sh == NULL) { -- cgit v1.2.3 From 099954119d53a24573d22b70a6ea22a0a279e689 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Thu, 1 Oct 2015 00:09:18 +0800 Subject: md-cluster: send BITMAP_NEEDS_SYNC when node is leaving cluster Previously, BITMAP_NEEDS_SYNC message is sent when the resyc aborts, but it could abort for different reasons, and not all of reasons require another node to take over the resync ownship. It is better make BITMAP_NEEDS_SYNC message only be sent when the node is leaving cluster with dirty bitmap. And we also need to ensure dlm connection is ok. Signed-off-by: Guoqing Jiang Signed-off-by: NeilBrown --- drivers/md/md-cluster.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index b94a2e68ef43..51e8552be73a 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -774,12 +774,32 @@ err: return ret; } +static void resync_bitmap(struct mddev *mddev) +{ + struct md_cluster_info *cinfo = mddev->cluster_info; + struct cluster_msg cmsg = {0}; + int err; + + cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC); + err = sendmsg(cinfo, &cmsg); + if (err) + pr_err("%s:%d: failed to send BITMAP_NEEDS_SYNC message (%d)\n", + __func__, __LINE__, err); +} + static int leave(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; if (!cinfo) return 0; + + /* BITMAP_NEEDS_SYNC message should be sent when node + * is leaving the cluster with dirty bitmap, also we + * can only deliver it when dlm connection is available */ + if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) + resync_bitmap(mddev); + md_unregister_thread(&cinfo->recovery_thread); md_unregister_thread(&cinfo->recv_thread); lockres_free(cinfo->message_lockres); -- cgit v1.2.3 From b8ca846e45197a2de829def27254f833a998723e Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Fri, 9 Oct 2015 11:27:01 -0500 Subject: md-cluster: Wake up suspended process When the suspended_area is deleted, the suspended processes must be woken up in order to complete their I/O. Signed-off-by: Goldwyn Rodrigues --- drivers/md/md-cluster.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 51e8552be73a..58eadc06a1b6 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -366,11 +366,13 @@ static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot) } } -static void remove_suspend_info(struct md_cluster_info *cinfo, int slot) +static void remove_suspend_info(struct mddev *mddev, int slot) { + struct md_cluster_info *cinfo = mddev->cluster_info; spin_lock_irq(&cinfo->suspend_lock); __remove_suspend_info(cinfo, slot); spin_unlock_irq(&cinfo->suspend_lock); + mddev->pers->quiesce(mddev, 2); } @@ -381,7 +383,7 @@ static void process_suspend_info(struct mddev *mddev, struct suspend_info *s; if (!hi) { - remove_suspend_info(cinfo, slot); + remove_suspend_info(mddev, slot); return; } s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL); @@ -397,6 +399,7 @@ static void process_suspend_info(struct mddev *mddev, __remove_suspend_info(cinfo, slot); list_add(&s->list, &cinfo->suspend_list); spin_unlock_irq(&cinfo->suspend_lock); + mddev->pers->quiesce(mddev, 2); } static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) -- cgit v1.2.3 From 2910ff17d154baa5eb50e362a91104e831eb2bb6 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Mon, 28 Sep 2015 10:27:26 -0500 Subject: md: remove_and_add_spares() to activate specific rdev remove_and_add_spares() checks for all devices to activate spare. Change it to activate a specific device if a non-null rdev argument is passed. remove_and_add_spares() can be used to activate spares in slot_store() as well. For hot_remove_disk(), check if rdev->raid_disk == -1 before calling remove_and_add_spares() Signed-off-by: Goldwyn Rodrigues --- drivers/md/md.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 9798a9921a38..e21a2feed826 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2691,15 +2691,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) rdev->saved_raid_disk = -1; clear_bit(In_sync, &rdev->flags); clear_bit(Bitmap_sync, &rdev->flags); - err = rdev->mddev->pers-> - hot_add_disk(rdev->mddev, rdev); - if (err) { - rdev->raid_disk = -1; - return err; - } else - sysfs_notify_dirent_safe(rdev->sysfs_state); - if (sysfs_link_rdev(rdev->mddev, rdev)) - /* failure here is OK */; + remove_and_add_spares(rdev->mddev, rdev); + if (rdev->raid_disk == -1) + return -EBUSY; /* don't wakeup anyone, leave that to userspace. */ } else { if (slot >= rdev->mddev->raid_disks && @@ -6004,12 +5998,16 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) if (mddev_is_clustered(mddev)) md_cluster_ops->metadata_update_start(mddev); + if (rdev->raid_disk < 0) + goto kick_rdev; + clear_bit(Blocked, &rdev->flags); remove_and_add_spares(mddev, rdev); if (rdev->raid_disk >= 0) goto busy; +kick_rdev: if (mddev_is_clustered(mddev)) md_cluster_ops->remove_disk(mddev, rdev); @@ -6024,6 +6022,7 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) busy: if (mddev_is_clustered(mddev)) md_cluster_ops->metadata_update_cancel(mddev); + printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", bdevname(rdev->bdev,b), mdname(mddev)); return -EBUSY; @@ -8018,10 +8017,12 @@ static int remove_and_add_spares(struct mddev *mddev, if (removed && mddev->kobj.sd) sysfs_notify(&mddev->kobj, NULL, "degraded"); - if (this) + if (this && removed) goto no_add; rdev_for_each(rdev, mddev) { + if (this && this != rdev) + continue; if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) -- cgit v1.2.3 From 70bcecdb1534a7dcd82503b705c27a048d568c9d Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Fri, 21 Aug 2015 10:33:39 -0500 Subject: md-cluster: Improve md_reload_sb to be less error prone md_reload_sb is too simplistic and it explicitly needs to determine the changes made by the writing node. However, there are multiple areas where a simple reload could fail. Instead, read the superblock of one of the "good" rdevs and update the necessary information: - read the superblock into a newly allocated page, by temporarily swapping out rdev->sb_page and calling ->load_super. - if that fails return - if it succeeds, call check_sb_changes 1. iterates over list of active devices and checks the matching dev_roles[] value. If that is 'faulty', the device must be marked as faulty - call md_error to mark the device as faulty. Make sure not to set CHANGE_DEVS and wakeup mddev->thread or else it would initiate a resync process, which is the responsibility of the "primary" node. - clear the Blocked bit - Call remove_and_add_spares() to hot remove the device. If the device is 'spare': - call remove_and_add_spares() to get the number of spares added in this operation. - Reduce mddev->degraded to mark the array as not degraded. 2. reset recovery_cp - read the rest of the rdevs to update recovery_offset. If recovery_offset is equal to MaxSector, call spare_active() to set it In_sync This required that recovery_offset be initialized to MaxSector, as opposed to zero so as to communicate the end of sync for a rdev. Signed-off-by: Goldwyn Rodrigues --- drivers/md/md-cluster.c | 27 ++++++----- drivers/md/md.c | 121 ++++++++++++++++++++++++++++++++++++++++++------ drivers/md/md.h | 2 +- drivers/md/raid1.c | 9 ++++ 4 files changed, 133 insertions(+), 26 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 58eadc06a1b6..2eb3a5019a63 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -427,8 +427,7 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg) { struct md_cluster_info *cinfo = mddev->cluster_info; - - md_reload_sb(mddev); + md_reload_sb(mddev, le32_to_cpu(msg->raid_slot)); dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); } @@ -834,11 +833,23 @@ static int metadata_update_finish(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; - int ret; + struct md_rdev *rdev; + int ret = 0; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(METADATA_UPDATED); - ret = __sendmsg(cinfo, &cmsg); + cmsg.raid_slot = -1; + /* Pick up a good active device number to send. + */ + rdev_for_each(rdev, mddev) + if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); + break; + } + if (cmsg.raid_slot >= 0) + ret = __sendmsg(cinfo, &cmsg); + else + pr_warn("md-cluster: No good device id found to send\n"); unlock_comm(cinfo); return ret; } @@ -922,15 +933,9 @@ static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev) static int add_new_disk_finish(struct mddev *mddev) { - struct cluster_msg cmsg; - struct md_cluster_info *cinfo = mddev->cluster_info; - int ret; /* Write sb and inform others */ md_update_sb(mddev, 1); - cmsg.type = METADATA_UPDATED; - ret = __sendmsg(cinfo, &cmsg); - unlock_comm(cinfo); - return ret; + return metadata_update_finish(mddev); } static int new_disk_ack(struct mddev *mddev, bool ack) diff --git a/drivers/md/md.c b/drivers/md/md.c index e21a2feed826..12cc28ab9a41 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8924,25 +8924,118 @@ err_wq: return ret; } -void md_reload_sb(struct mddev *mddev) +static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) { - struct md_rdev *rdev, *tmp; + struct mdp_superblock_1 *sb = page_address(rdev->sb_page); + struct md_rdev *rdev2; + int role, ret; + char b[BDEVNAME_SIZE]; - rdev_for_each_safe(rdev, tmp, mddev) { - rdev->sb_loaded = 0; - ClearPageUptodate(rdev->sb_page); + /* Check for change of roles in the active devices */ + rdev_for_each(rdev2, mddev) { + if (test_bit(Faulty, &rdev2->flags)) + continue; + + /* Check if the roles changed */ + role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); + if (role != rdev2->raid_disk) { + /* got activated */ + if (rdev2->raid_disk == -1 && role != 0xffff) { + rdev2->saved_raid_disk = role; + ret = remove_and_add_spares(mddev, rdev2); + pr_info("Activated spare: %s\n", + bdevname(rdev2->bdev,b)); + continue; + } + /* device faulty + * We just want to do the minimum to mark the disk + * as faulty. The recovery is performed by the + * one who initiated the error. + */ + if ((role == 0xfffe) || (role == 0xfffd)) { + md_error(mddev, rdev2); + clear_bit(Blocked, &rdev2->flags); + } + } } - mddev->raid_disks = 0; - analyze_sbs(mddev); - rdev_for_each_safe(rdev, tmp, mddev) { - struct mdp_superblock_1 *sb = page_address(rdev->sb_page); - /* since we don't write to faulty devices, we figure out if the - * disk is faulty by comparing events - */ - if (mddev->events > sb->events) - set_bit(Faulty, &rdev->flags); + + /* recovery_cp changed */ + if (le64_to_cpu(sb->resync_offset) != mddev->recovery_cp) + mddev->recovery_cp = le64_to_cpu(sb->resync_offset); + + /* Finally set the event to be up to date */ + mddev->events = le64_to_cpu(sb->events); +} + +static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) +{ + int err; + struct page *swapout = rdev->sb_page; + struct mdp_superblock_1 *sb; + + /* Store the sb page of the rdev in the swapout temporary + * variable in case we err in the future + */ + rdev->sb_page = NULL; + alloc_disk_sb(rdev); + ClearPageUptodate(rdev->sb_page); + rdev->sb_loaded = 0; + err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version); + + if (err < 0) { + pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", + __func__, __LINE__, rdev->desc_nr, err); + put_page(rdev->sb_page); + rdev->sb_page = swapout; + rdev->sb_loaded = 1; + return err; } + sb = page_address(rdev->sb_page); + /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET + * is not set + */ + + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) + rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); + + /* The other node finished recovery, call spare_active to set + * device In_sync and mddev->degraded + */ + if (rdev->recovery_offset == MaxSector && + !test_bit(In_sync, &rdev->flags) && + mddev->pers->spare_active(mddev)) + sysfs_notify(&mddev->kobj, NULL, "degraded"); + + put_page(swapout); + return 0; +} + +void md_reload_sb(struct mddev *mddev, int nr) +{ + struct md_rdev *rdev; + int err; + + /* Find the rdev */ + rdev_for_each_rcu(rdev, mddev) { + if (rdev->desc_nr == nr) + break; + } + + if (!rdev || rdev->desc_nr != nr) { + pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); + return; + } + + err = read_rdev(mddev, rdev); + if (err < 0) + return; + + check_sb_changes(mddev, rdev); + + /* Read all rdev's to update recovery_offset */ + rdev_for_each_rcu(rdev, mddev) + read_rdev(mddev, rdev); } EXPORT_SYMBOL(md_reload_sb); diff --git a/drivers/md/md.h b/drivers/md/md.h index ab339571e57f..2ea00356bb23 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -658,7 +658,7 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); -extern void md_reload_sb(struct mddev *mddev); +extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); extern void md_kick_rdev_from_array(struct md_rdev * rdev); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1dd13bb52940..b54fefc85b66 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1592,6 +1592,15 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; + /* + * find the disk ... but prefer rdev->saved_raid_disk + * if possible. + */ + if (rdev->saved_raid_disk >= 0 && + rdev->saved_raid_disk >= first && + conf->mirrors[rdev->saved_raid_disk].rdev == NULL) + first = last = rdev->saved_raid_disk; + for (mirror = first; mirror <= last; mirror++) { p = conf->mirrors+mirror; if (!p->rdev) { -- cgit v1.2.3 From 2aa82191ac36cd2f2a41aa25697db30ed7c619ef Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Mon, 28 Sep 2015 19:21:35 -0500 Subject: md-cluster: Perform a lazy update In a clustered environment, a change such as marking a device faulty, can be recorded by any of the nodes. This is communicated to all the nodes and re-recording such a change is unnecessary, and quite often pretty disruptive. With this patch, just before the update, we detect for the changes and if the changes are already in superblock, we abort the update after clearing all the flags Signed-off-by: Goldwyn Rodrigues --- drivers/md/md.c | 101 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 57 insertions(+), 44 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 12cc28ab9a41..5f0967803dc7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2199,6 +2199,46 @@ static void sync_sbs(struct mddev *mddev, int nospares) } } +static bool does_sb_need_changing(struct mddev *mddev) +{ + struct md_rdev *rdev; + struct mdp_superblock_1 *sb; + int role; + + /* Find a good rdev */ + rdev_for_each(rdev, mddev) + if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) + break; + + /* No good device found. */ + if (!rdev) + return false; + + sb = page_address(rdev->sb_page); + /* Check if a device has become faulty or a spare become active */ + rdev_for_each(rdev, mddev) { + role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); + /* Device activated? */ + if (role == 0xffff && rdev->raid_disk >=0 && + !test_bit(Faulty, &rdev->flags)) + return true; + /* Device turned faulty? */ + if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) + return true; + } + + /* Check if any mddev parameters have changed */ + if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || + (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || + (mddev->recovery_cp != le64_to_cpu(sb->resync_offset)) || + (mddev->layout != le64_to_cpu(sb->layout)) || + (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || + (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) + return true; + + return false; +} + void md_update_sb(struct mddev *mddev, int force_change) { struct md_rdev *rdev; @@ -2211,6 +2251,18 @@ void md_update_sb(struct mddev *mddev, int force_change) set_bit(MD_CHANGE_DEVS, &mddev->flags); return; } + + if (mddev_is_clustered(mddev)) { + if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) + force_change = 1; + md_cluster_ops->metadata_update_start(mddev); + /* Has someone else has updated the sb */ + if (!does_sb_need_changing(mddev)) { + md_cluster_ops->metadata_update_cancel(mddev); + clear_bit(MD_CHANGE_PENDING, &mddev->flags); + return; + } + } repeat: /* First make sure individual recovery_offsets are correct */ rdev_for_each(rdev, mddev) { @@ -2359,6 +2411,9 @@ repeat: clear_bit(BlockedBadBlocks, &rdev->flags); wake_up(&rdev->blocked_wait); } + + if (mddev_is_clustered(mddev)) + md_cluster_ops->metadata_update_finish(mddev); } EXPORT_SYMBOL(md_update_sb); @@ -2496,13 +2551,9 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) if (mddev_is_clustered(mddev)) md_cluster_ops->remove_disk(mddev, rdev); md_kick_rdev_from_array(rdev); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); if (mddev->pers) md_update_sb(mddev, 1); md_new_event(mddev); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); err = 0; } } else if (cmd_match(buf, "writemostly")) { @@ -4063,12 +4114,8 @@ size_store(struct mddev *mddev, const char *buf, size_t len) if (err) return err; if (mddev->pers) { - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); err = update_size(mddev, sectors); md_update_sb(mddev, 1); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); } else { if (mddev->dev_sectors == 0 || mddev->dev_sectors > sectors) @@ -5306,8 +5353,6 @@ static void md_clean(struct mddev *mddev) static void __md_stop_writes(struct mddev *mddev) { - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); flush_workqueue(md_misc_wq); if (mddev->sync_thread) { @@ -5326,8 +5371,6 @@ static void __md_stop_writes(struct mddev *mddev) mddev->in_sync = 1; md_update_sb(mddev, 1); } - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); } void md_stop_writes(struct mddev *mddev) @@ -6015,9 +6058,6 @@ kick_rdev: md_update_sb(mddev, 1); md_new_event(mddev); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); - return 0; busy: if (mddev_is_clustered(mddev)) @@ -6073,14 +6113,12 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) goto abort_export; } - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); clear_bit(In_sync, &rdev->flags); rdev->desc_nr = -1; rdev->saved_raid_disk = -1; err = bind_rdev_to_array(rdev, mddev); if (err) - goto abort_clustered; + goto abort_export; /* * The rest should better be atomic, we can have disk failures @@ -6090,9 +6128,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) rdev->raid_disk = -1; md_update_sb(mddev, 1); - - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); /* * Kick recovery, maybe this spare has to be added to the * array immediately. @@ -6102,9 +6137,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) md_new_event(mddev); return 0; -abort_clustered: - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_cancel(mddev); abort_export: export_rdev(rdev); return err; @@ -6422,8 +6454,6 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) return rv; } } - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) rv = update_size(mddev, (sector_t)info->size * 2); @@ -6481,12 +6511,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) } } md_update_sb(mddev, 1); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); return rv; err: - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_cancel(mddev); return rv; } @@ -7599,11 +7625,7 @@ int md_allow_write(struct mddev *mddev) mddev->safemode == 0) mddev->safemode = 1; spin_unlock(&mddev->lock); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); md_update_sb(mddev, 0); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); sysfs_notify_dirent_safe(mddev->sysfs_state); } else spin_unlock(&mddev->lock); @@ -8182,13 +8204,8 @@ void md_check_recovery(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_state); } - if (mddev->flags & MD_UPDATE_SB_FLAGS) { - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); + if (mddev->flags & MD_UPDATE_SB_FLAGS) md_update_sb(mddev, 0); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); - } if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { @@ -8286,8 +8303,6 @@ void md_reap_sync_thread(struct mddev *mddev) set_bit(MD_CHANGE_DEVS, &mddev->flags); } } - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev->pers->finish_reshape) mddev->pers->finish_reshape(mddev); @@ -8300,8 +8315,6 @@ void md_reap_sync_thread(struct mddev *mddev) rdev->saved_raid_disk = -1; md_update_sb(mddev, 1); - if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_finish(mddev); clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); clear_bit(MD_RECOVERY_DONE, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); -- cgit v1.2.3 From c186b128cda5a246da25f474e4689cb2bfacfcac Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Wed, 30 Sep 2015 13:20:35 -0500 Subject: md-cluster: Perform resync/recovery under a DLM lock Resync or recovery must be performed by only one node at a time. A DLM lock resource, resync_lockres provides the mutual exclusion so that only one node performs the recovery/resync at a time. If a node is unable to get the resync_lockres, because recovery is being performed by another node, it set MD_RECOVER_NEEDED so as to schedule recovery in the future. Remove the debug message in resync_info_update() used during development. Signed-off-by: Goldwyn Rodrigues --- drivers/md/md-cluster.c | 29 ++++++++++++++++++++++++++--- drivers/md/md-cluster.h | 2 ++ drivers/md/md.c | 29 +++++++++++++++++++++++++---- drivers/md/raid1.c | 2 -- 4 files changed, 53 insertions(+), 9 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 2eb3a5019a63..e1ce9c9a0473 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -55,6 +55,7 @@ struct md_cluster_info { struct completion completion; struct mutex sb_mutex; struct dlm_lock_resource *bitmap_lockres; + struct dlm_lock_resource *resync_lockres; struct list_head suspend_list; spinlock_t suspend_lock; struct md_thread *recovery_thread; @@ -384,6 +385,8 @@ static void process_suspend_info(struct mddev *mddev, if (!hi) { remove_suspend_info(mddev, slot); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); return; } s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL); @@ -758,6 +761,10 @@ static int join(struct mddev *mddev, int nodes) goto err; } + cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); + if (!cinfo->resync_lockres) + goto err; + ret = gather_all_resync_info(mddev, nodes); if (ret) goto err; @@ -768,6 +775,7 @@ err: lockres_free(cinfo->token_lockres); lockres_free(cinfo->ack_lockres); lockres_free(cinfo->no_new_dev_lockres); + lockres_free(cinfo->resync_lockres); lockres_free(cinfo->bitmap_lockres); if (cinfo->lockspace) dlm_release_lockspace(cinfo->lockspace, 2); @@ -861,6 +869,13 @@ static int metadata_update_cancel(struct mddev *mddev) return dlm_unlock_sync(cinfo->token_lockres); } +static int resync_start(struct mddev *mddev) +{ + struct md_cluster_info *cinfo = mddev->cluster_info; + cinfo->resync_lockres->flags |= DLM_LKF_NOQUEUE; + return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX); +} + static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; @@ -870,16 +885,22 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); /* Re-acquire the lock to refresh LVB */ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); - pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__, - (unsigned long long)lo, - (unsigned long long)hi); cmsg.type = cpu_to_le32(RESYNCING); cmsg.slot = cpu_to_le32(slot); cmsg.low = cpu_to_le64(lo); cmsg.high = cpu_to_le64(hi); + return sendmsg(cinfo, &cmsg); } +static int resync_finish(struct mddev *mddev) +{ + struct md_cluster_info *cinfo = mddev->cluster_info; + cinfo->resync_lockres->flags &= ~DLM_LKF_NOQUEUE; + dlm_unlock_sync(cinfo->resync_lockres); + return resync_info_update(mddev, 0, 0); +} + static int area_resyncing(struct mddev *mddev, int direction, sector_t lo, sector_t hi) { @@ -995,6 +1016,8 @@ static struct md_cluster_operations cluster_ops = { .join = join, .leave = leave, .slot_number = slot_number, + .resync_start = resync_start, + .resync_finish = resync_finish, .resync_info_update = resync_info_update, .metadata_update_start = metadata_update_start, .metadata_update_finish = metadata_update_finish, diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index f5bdc0c86eaa..c94172673599 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h @@ -16,6 +16,8 @@ struct md_cluster_operations { int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_cancel)(struct mddev *mddev); + int (*resync_start)(struct mddev *mddev); + int (*resync_finish)(struct mddev *mddev); int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); int (*add_new_disk_finish)(struct mddev *mddev); diff --git a/drivers/md/md.c b/drivers/md/md.c index 5f0967803dc7..61e897def04f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7657,6 +7657,7 @@ void md_do_sync(struct md_thread *thread) struct md_rdev *rdev; char *desc, *action = NULL; struct blk_plug plug; + bool cluster_resync_finished = false; /* just incase thread restarts... */ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) @@ -7959,7 +7960,11 @@ void md_do_sync(struct md_thread *thread) mddev->curr_resync_completed = mddev->curr_resync; sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } - /* tell personality that we are finished */ + /* tell personality and other nodes that we are finished */ + if (mddev_is_clustered(mddev)) { + md_cluster_ops->resync_finish(mddev); + cluster_resync_finished = true; + } mddev->pers->sync_request(mddev, max_sectors, &skipped); if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && @@ -7997,6 +8002,11 @@ void md_do_sync(struct md_thread *thread) skip: set_bit(MD_CHANGE_DEVS, &mddev->flags); + if (mddev_is_clustered(mddev) && + test_bit(MD_RECOVERY_INTR, &mddev->recovery) && + !cluster_resync_finished) + md_cluster_ops->resync_finish(mddev); + spin_lock(&mddev->lock); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { /* We completed so min/max setting can be forgotten if used. */ @@ -8078,14 +8088,25 @@ no_add: static void md_start_sync(struct work_struct *ws) { struct mddev *mddev = container_of(ws, struct mddev, del_work); + int ret = 0; + + if (mddev_is_clustered(mddev)) { + ret = md_cluster_ops->resync_start(mddev); + if (ret) { + mddev->sync_thread = NULL; + goto out; + } + } mddev->sync_thread = md_register_thread(md_do_sync, mddev, "resync"); +out: if (!mddev->sync_thread) { - printk(KERN_ERR "%s: could not start resync" - " thread...\n", - mdname(mddev)); + if (!(mddev_is_clustered(mddev) && ret == -EAGAIN)) + printk(KERN_ERR "%s: could not start resync" + " thread...\n", + mdname(mddev)); /* leave the spares where they are, it shouldn't hurt */ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b54fefc85b66..a2d813c9eabd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2503,8 +2503,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp if (mddev_is_clustered(mddev)) { conf->cluster_sync_low = 0; conf->cluster_sync_high = 0; - /* Send zeros to mark end of resync */ - md_cluster_ops->resync_info_update(mddev, 0, 0); } return 0; } -- cgit v1.2.3 From dbb64f8635f5d68192108b88759a34633a4bd558 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Thu, 1 Oct 2015 13:20:27 -0500 Subject: md-cluster: Fix adding of new disk with new reload code Adding the disk worked incorrectly with the new reload code. Fix it: - No operation should be performed on rdev marked as Candidate - After a metadata update operation, kick disk if role is 0xfffe else clear Candidate bit and continue with the regular change check. - Saving the mode of the lock resource to check if token lock is already locked, because it can be called twice while adding a disk. However, unlock_comm() must be called only once. - add_new_disk() is called by the node initiating the --add operation. If it needs to be canceled, call add_new_disk_cancel(). The operation is completed by md_update_sb() which will write and unlock the communication. Signed-off-by: Goldwyn Rodrigues --- drivers/md/md-cluster.c | 35 +++++++++++++++++++++++---------- drivers/md/md-cluster.h | 6 +++--- drivers/md/md.c | 52 ++++++++++++++++++++++++++++--------------------- 3 files changed, 58 insertions(+), 35 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index e1ce9c9a0473..28494e9f8d02 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -28,6 +28,7 @@ struct dlm_lock_resource { struct completion completion; /* completion for synchronized locking */ void (*bast)(void *arg, int mode); /* blocking AST function pointer*/ struct mddev *mddev; /* pointing back to mddev. */ + int mode; }; struct suspend_info { @@ -107,6 +108,8 @@ static int dlm_lock_sync(struct dlm_lock_resource *res, int mode) if (ret) return ret; wait_for_completion(&res->completion); + if (res->lksb.sb_status == 0) + res->mode = mode; return res->lksb.sb_status; } @@ -128,6 +131,7 @@ static struct dlm_lock_resource *lockres_init(struct mddev *mddev, init_completion(&res->completion); res->ls = cinfo->lockspace; res->mddev = mddev; + res->mode = DLM_LOCK_IV; namelen = strlen(name); res->name = kzalloc(namelen + 1, GFP_KERNEL); if (!res->name) { @@ -536,11 +540,17 @@ static void recv_daemon(struct md_thread *thread) /* lock_comm() * Takes the lock on the TOKEN lock resource so no other * node can communicate while the operation is underway. + * If called again, and the TOKEN lock is alread in EX mode + * return success. However, care must be taken that unlock_comm() + * is called only once. */ static int lock_comm(struct md_cluster_info *cinfo) { int error; + if (cinfo->token_lockres->mode == DLM_LOCK_EX) + return 0; + error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); if (error) pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", @@ -550,6 +560,7 @@ static int lock_comm(struct md_cluster_info *cinfo) static void unlock_comm(struct md_cluster_info *cinfo) { + WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX); dlm_unlock_sync(cinfo->token_lockres); } @@ -862,11 +873,10 @@ static int metadata_update_finish(struct mddev *mddev) return ret; } -static int metadata_update_cancel(struct mddev *mddev) +static void metadata_update_cancel(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; - - return dlm_unlock_sync(cinfo->token_lockres); + unlock_comm(cinfo); } static int resync_start(struct mddev *mddev) @@ -925,7 +935,11 @@ out: return ret; } -static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev) +/* add_new_disk() - initiates a disk add + * However, if this fails before writing md_update_sb(), + * add_new_disk_cancel() must be called to release token lock + */ +static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; @@ -947,16 +961,17 @@ static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev) /* Some node does not "see" the device */ if (ret == -EAGAIN) ret = -ENOENT; + if (ret) + unlock_comm(cinfo); else dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); return ret; } -static int add_new_disk_finish(struct mddev *mddev) +static void add_new_disk_cancel(struct mddev *mddev) { - /* Write sb and inform others */ - md_update_sb(mddev, 1); - return metadata_update_finish(mddev); + struct md_cluster_info *cinfo = mddev->cluster_info; + unlock_comm(cinfo); } static int new_disk_ack(struct mddev *mddev, bool ack) @@ -1023,8 +1038,8 @@ static struct md_cluster_operations cluster_ops = { .metadata_update_finish = metadata_update_finish, .metadata_update_cancel = metadata_update_cancel, .area_resyncing = area_resyncing, - .add_new_disk_start = add_new_disk_start, - .add_new_disk_finish = add_new_disk_finish, + .add_new_disk = add_new_disk, + .add_new_disk_cancel = add_new_disk_cancel, .new_disk_ack = new_disk_ack, .remove_disk = remove_disk, .gather_bitmaps = gather_bitmaps, diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index c94172673599..e75ea2613184 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h @@ -15,12 +15,12 @@ struct md_cluster_operations { int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev); - int (*metadata_update_cancel)(struct mddev *mddev); + void (*metadata_update_cancel)(struct mddev *mddev); int (*resync_start)(struct mddev *mddev); int (*resync_finish)(struct mddev *mddev); int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); - int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); - int (*add_new_disk_finish)(struct mddev *mddev); + int (*add_new_disk)(struct mddev *mddev, struct md_rdev *rdev); + void (*add_new_disk_cancel)(struct mddev *mddev); int (*new_disk_ack)(struct mddev *mddev, bool ack); int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev); int (*gather_bitmaps)(struct md_rdev *rdev); diff --git a/drivers/md/md.c b/drivers/md/md.c index 61e897def04f..8a6f67f55d3d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3246,14 +3246,6 @@ static void analyze_sbs(struct mddev *mddev) md_kick_rdev_from_array(rdev); continue; } - /* No device should have a Candidate flag - * when reading devices - */ - if (test_bit(Candidate, &rdev->flags)) { - pr_info("md: kicking Cluster Candidate %s from array!\n", - bdevname(rdev->bdev, b)); - md_kick_rdev_from_array(rdev); - } } if (mddev->level == LEVEL_MULTIPATH) { rdev->desc_nr = i++; @@ -5950,19 +5942,12 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) * check whether the device shows up in other nodes */ if (mddev_is_clustered(mddev)) { - if (info->state & (1 << MD_DISK_CANDIDATE)) { - /* Through --cluster-confirm */ + if (info->state & (1 << MD_DISK_CANDIDATE)) set_bit(Candidate, &rdev->flags); - err = md_cluster_ops->new_disk_ack(mddev, true); - if (err) { - export_rdev(rdev); - return err; - } - } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { + else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { /* --add initiated by this node */ - err = md_cluster_ops->add_new_disk_start(mddev, rdev); + err = md_cluster_ops->add_new_disk(mddev, rdev); if (err) { - md_cluster_ops->add_new_disk_finish(mddev); export_rdev(rdev); return err; } @@ -5971,13 +5956,23 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) rdev->raid_disk = -1; err = bind_rdev_to_array(rdev, mddev); + if (err) export_rdev(rdev); - else + + if (mddev_is_clustered(mddev)) { + if (info->state & (1 << MD_DISK_CANDIDATE)) + md_cluster_ops->new_disk_ack(mddev, (err == 0)); + else { + if (err) + md_cluster_ops->add_new_disk_cancel(mddev); + else + err = add_bound_rdev(rdev); + } + + } else if (!err) err = add_bound_rdev(rdev); - if (mddev_is_clustered(mddev) && - (info->state & (1 << MD_DISK_CLUSTER_ADD))) - md_cluster_ops->add_new_disk_finish(mddev); + return err; } @@ -8055,6 +8050,8 @@ static int remove_and_add_spares(struct mddev *mddev, rdev_for_each(rdev, mddev) { if (this && this != rdev) continue; + if (test_bit(Candidate, &rdev->flags)) + continue; if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) @@ -8972,6 +8969,17 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) /* Check if the roles changed */ role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); + + if (test_bit(Candidate, &rdev2->flags)) { + if (role == 0xfffe) { + pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); + md_kick_rdev_from_array(rdev2); + continue; + } + else + clear_bit(Candidate, &rdev2->flags); + } + if (role != rdev2->raid_disk) { /* got activated */ if (rdev2->raid_disk == -1 && role != 0xffff) { -- cgit v1.2.3 From d216711bed1a0fb6527858f32cd89ff4a6553a97 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Mon, 12 Oct 2015 10:42:34 -0500 Subject: md-cluster: Do not printk() every received message The receive daemon prints kernel messages for every network message received. This would fill the kernel message log with unnecessary messages. Remove the pr_info() messages. Signed-off-by: Goldwyn Rodrigues --- drivers/md/md-cluster.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 28494e9f8d02..85d0a1fef807 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -363,8 +363,6 @@ static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot) list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) if (slot == s->slot) { - pr_info("%s:%d Deleting suspend_info: %d\n", - __func__, __LINE__, slot); list_del(&s->list); kfree(s); break; @@ -462,34 +460,22 @@ static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) { switch (msg->type) { case METADATA_UPDATED: - pr_info("%s: %d Received message: METADATA_UPDATE from %d\n", - __func__, __LINE__, msg->slot); process_metadata_update(mddev, msg); break; case RESYNCING: - pr_info("%s: %d Received message: RESYNCING from %d\n", - __func__, __LINE__, msg->slot); process_suspend_info(mddev, msg->slot, msg->low, msg->high); break; case NEWDISK: - pr_info("%s: %d Received message: NEWDISK from %d\n", - __func__, __LINE__, msg->slot); process_add_new_disk(mddev, msg); break; case REMOVE: - pr_info("%s: %d Received REMOVE from %d\n", - __func__, __LINE__, msg->slot); process_remove_disk(mddev, msg); break; case RE_ADD: - pr_info("%s: %d Received RE_ADD from %d\n", - __func__, __LINE__, msg->slot); process_readd_disk(mddev, msg); break; case BITMAP_NEEDS_SYNC: - pr_info("%s: %d Received BITMAP_NEEDS_SYNC from %d\n", - __func__, __LINE__, msg->slot); __recover_slot(mddev, msg->slot); break; default: -- cgit v1.2.3 From faeff83fa478b4dca9877d6b10a25ad252891f14 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 12 Oct 2015 17:21:21 +0800 Subject: md-cluster: make other members of cluster_msg is handled by little endian funcs Signed-off-by: Guoqing Jiang --- drivers/md/md-cluster.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 85d0a1fef807..3c1818516070 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -418,7 +418,7 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) len = snprintf(disk_uuid, 64, "DEVICE_UUID="); sprintf(disk_uuid + len, "%pU", cmsg->uuid); - snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot); + snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot)); pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot); init_completion(&cinfo->newdisk_completion); set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); @@ -438,22 +438,26 @@ static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg) { - struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot); + struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, + le32_to_cpu(msg->raid_slot)); if (rdev) md_kick_rdev_from_array(rdev); else - pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", __func__, __LINE__, msg->raid_slot); + pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", + __func__, __LINE__, le32_to_cpu(msg->raid_slot)); } static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg) { - struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot); + struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, + le32_to_cpu(msg->raid_slot)); if (rdev && test_bit(Faulty, &rdev->flags)) clear_bit(Faulty, &rdev->flags); else - pr_warn("%s: %d Could not find disk(%d) which is faulty", __func__, __LINE__, msg->raid_slot); + pr_warn("%s: %d Could not find disk(%d) which is faulty", + __func__, __LINE__, le32_to_cpu(msg->raid_slot)); } static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) @@ -936,7 +940,7 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(NEWDISK); memcpy(cmsg.uuid, uuid, 16); - cmsg.raid_slot = rdev->desc_nr; + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); lock_comm(cinfo); ret = __sendmsg(cinfo, &cmsg); if (ret) @@ -979,8 +983,8 @@ static int remove_disk(struct mddev *mddev, struct md_rdev *rdev) { struct cluster_msg cmsg; struct md_cluster_info *cinfo = mddev->cluster_info; - cmsg.type = REMOVE; - cmsg.raid_slot = rdev->desc_nr; + cmsg.type = cpu_to_le32(REMOVE); + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); return __sendmsg(cinfo, &cmsg); } @@ -992,8 +996,8 @@ static int gather_bitmaps(struct md_rdev *rdev) struct mddev *mddev = rdev->mddev; struct md_cluster_info *cinfo = mddev->cluster_info; - cmsg.type = RE_ADD; - cmsg.raid_slot = rdev->desc_nr; + cmsg.type = cpu_to_le32(RE_ADD); + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); err = sendmsg(cinfo, &cmsg); if (err) goto out; -- cgit v1.2.3 From 487cf9142c434530443b7bb8c545e9d0f30391b1 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 12 Oct 2015 17:21:22 +0800 Subject: md-cluster: remove unnecessary setting for slot Since slot will be set within _sendmsg, we can remove the redundant code in resync_info_update. Signed-off-by: Guoqing Jiang --- drivers/md/md-cluster.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 3c1818516070..ba80df923605 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -880,13 +880,11 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; - int slot = cinfo->slot_number - 1; add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); /* Re-acquire the lock to refresh LVB */ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); cmsg.type = cpu_to_le32(RESYNCING); - cmsg.slot = cpu_to_le32(slot); cmsg.low = cpu_to_le64(lo); cmsg.high = cpu_to_le64(hi); -- cgit v1.2.3 From 256f5b245aab93bfa2d8b86d66545fceea05408e Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 12 Oct 2015 17:21:23 +0800 Subject: md-cluster: make sure the node do not receive it's own msg During the past test, the node occasionally received the msg which is sent from itself, this case should not happen in theory, but it is better to avoid it in case something wrong happened. Signed-off-by: Guoqing Jiang Signed-off-by: Goldwyn Rodrigues --- drivers/md/md-cluster.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index ba80df923605..8bddd7882292 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -462,6 +462,9 @@ static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg) static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) { + if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), + "node %d received it's own msg\n", le32_to_cpu(msg->slot))) + return; switch (msg->type) { case METADATA_UPDATED: process_metadata_update(mddev, msg); -- cgit v1.2.3 From aee177ac5a4225ac4eeed0aa5edf2338c2d713a4 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 12 Oct 2015 17:21:24 +0800 Subject: md-cluster: zero cmsg before it was sent Signed-off-by: Guoqing Jiang --- drivers/md/md-cluster.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 8bddd7882292..c7b8027cebdd 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -882,7 +882,7 @@ static int resync_start(struct mddev *mddev) static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; - struct cluster_msg cmsg; + struct cluster_msg cmsg = {0}; add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); /* Re-acquire the lock to refresh LVB */ @@ -982,7 +982,7 @@ static int new_disk_ack(struct mddev *mddev, bool ack) static int remove_disk(struct mddev *mddev, struct md_rdev *rdev) { - struct cluster_msg cmsg; + struct cluster_msg cmsg = {0}; struct md_cluster_info *cinfo = mddev->cluster_info; cmsg.type = cpu_to_le32(REMOVE); cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); @@ -993,7 +993,7 @@ static int gather_bitmaps(struct md_rdev *rdev) { int sn, err; sector_t lo, hi; - struct cluster_msg cmsg; + struct cluster_msg cmsg = {0}; struct mddev *mddev = rdev->mddev; struct md_cluster_info *cinfo = mddev->cluster_info; -- cgit v1.2.3 From 86b572770e7964f006d438c4e05008914e9db79b Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 12 Oct 2015 17:21:25 +0800 Subject: md-cluster: Add 'SUSE' as author for md-cluster.c Signed-off-by: Guoqing Jiang --- drivers/md/md-cluster.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index c7b8027cebdd..35ac2e8cb7f1 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1051,5 +1051,6 @@ static void cluster_exit(void) module_init(cluster_init); module_exit(cluster_exit); +MODULE_AUTHOR("SUSE"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Clustering support for MD"); -- cgit v1.2.3 From a9720903d1415317e18f439917f760ec592f3e3b Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 12 Oct 2015 17:21:27 +0800 Subject: md-cluster: only call kick_rdev_from_array after remove disk successfully For cluster raid, we should not kick it from array if the disk can't be remove from array successfully. Signed-off-by: Guoqing Jiang Signed-off-by: Goldwyn Rodrigues --- drivers/md/md.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 8a6f67f55d3d..d39a72aec316 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2548,13 +2548,16 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) err = -EBUSY; else { struct mddev *mddev = rdev->mddev; - if (mddev_is_clustered(mddev)) - md_cluster_ops->remove_disk(mddev, rdev); - md_kick_rdev_from_array(rdev); - if (mddev->pers) - md_update_sb(mddev, 1); - md_new_event(mddev); err = 0; + if (mddev_is_clustered(mddev)) + err = md_cluster_ops->remove_disk(mddev, rdev); + + if (err == 0) { + md_kick_rdev_from_array(rdev); + if (mddev->pers) + md_update_sb(mddev, 1); + md_new_event(mddev); + } } } else if (cmd_match(buf, "writemostly")) { set_bit(WriteMostly, &rdev->flags); -- cgit v1.2.3 From 23b63f9fa82eed128b5c585cbfe10ced82d73e91 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 12 Oct 2015 17:21:30 +0800 Subject: md: check the return value for metadata_update_start We shouldn't run related funs of md_cluster_ops in case metadata_update_start returned failure. Signed-off-by: Guoqing Jiang --- drivers/md/md.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index d39a72aec316..a71b36f0acb0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2245,6 +2245,7 @@ void md_update_sb(struct mddev *mddev, int force_change) int sync_req; int nospares = 0; int any_badblocks_changed = 0; + int ret = -1; if (mddev->ro) { if (force_change) @@ -2255,10 +2256,11 @@ void md_update_sb(struct mddev *mddev, int force_change) if (mddev_is_clustered(mddev)) { if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) force_change = 1; - md_cluster_ops->metadata_update_start(mddev); + ret = md_cluster_ops->metadata_update_start(mddev); /* Has someone else has updated the sb */ if (!does_sb_need_changing(mddev)) { - md_cluster_ops->metadata_update_cancel(mddev); + if (ret == 0) + md_cluster_ops->metadata_update_cancel(mddev); clear_bit(MD_CHANGE_PENDING, &mddev->flags); return; } @@ -2412,7 +2414,7 @@ repeat: wake_up(&rdev->blocked_wait); } - if (mddev_is_clustered(mddev)) + if (mddev_is_clustered(mddev) && ret == 0) md_cluster_ops->metadata_update_finish(mddev); } EXPORT_SYMBOL(md_update_sb); @@ -6031,13 +6033,14 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) { char b[BDEVNAME_SIZE]; struct md_rdev *rdev; + int ret = -1; rdev = find_rdev(mddev, dev); if (!rdev) return -ENXIO; if (mddev_is_clustered(mddev)) - md_cluster_ops->metadata_update_start(mddev); + ret = md_cluster_ops->metadata_update_start(mddev); if (rdev->raid_disk < 0) goto kick_rdev; @@ -6049,7 +6052,7 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) goto busy; kick_rdev: - if (mddev_is_clustered(mddev)) + if (mddev_is_clustered(mddev) && ret == 0) md_cluster_ops->remove_disk(mddev, rdev); md_kick_rdev_from_array(rdev); @@ -6058,7 +6061,7 @@ kick_rdev: return 0; busy: - if (mddev_is_clustered(mddev)) + if (mddev_is_clustered(mddev) && ret == 0) md_cluster_ops->metadata_update_cancel(mddev); printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", -- cgit v1.2.3 From ba2746b0facf83c62bebf8aed8f0a3fd446de612 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 16 Oct 2015 13:48:35 +1100 Subject: md-cluster: metadata_update_finish: consistently use cmsg.raid_slot as le32 As cmsg.raid_slot is le32, comparing for >0 is not meaningful. So introduce cpu-endian 'raid_slot' and only assign to cmsg.raid_slot when we know value is valid. Reported-by: kbuild test robot Signed-off-by: NeilBrown --- drivers/md/md-cluster.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 35ac2e8cb7f1..aad5e7cda523 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -847,20 +847,21 @@ static int metadata_update_finish(struct mddev *mddev) struct cluster_msg cmsg; struct md_rdev *rdev; int ret = 0; + int raid_slot = -1; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(METADATA_UPDATED); - cmsg.raid_slot = -1; /* Pick up a good active device number to send. */ rdev_for_each(rdev, mddev) if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { - cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); + raid_slot = rdev->desc_nr; break; } - if (cmsg.raid_slot >= 0) + if (raid_slot >= 0) { + cmsg.raid_slot = cpu_to_le32(raid_slot); ret = __sendmsg(cinfo, &cmsg); - else + } else pr_warn("md-cluster: No good device id found to send\n"); unlock_comm(cinfo); return ret; -- cgit v1.2.3 From cf97a348c8f700e448588bad4de3504d8d65c2f9 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Fri, 16 Oct 2015 15:40:22 +0800 Subject: md-cluster: Fix warnings when build with CF=-D__CHECK_ENDIAN__ This patches fixes sparse warnings like incorrect type in assignment (different base types), cast to restricted __le64. Reported-by: kbuild test robot Signed-off-by: Guoqing Jiang Signed-off-by: NeilBrown --- drivers/md/md-cluster.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index aad5e7cda523..f42162c49e3b 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -81,13 +81,13 @@ enum msg_type { }; struct cluster_msg { - int type; - int slot; + __le32 type; + __le32 slot; /* TODO: Unionize this for smaller footprint */ - sector_t low; - sector_t high; + __le64 low; + __le64 high; char uuid[16]; - int raid_slot; + __le32 raid_slot; }; static void sync_ast(void *arg) @@ -215,7 +215,7 @@ static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_loc dlm_lock_sync(lockres, DLM_LOCK_CR); memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); hi = le64_to_cpu(ri.hi); - if (ri.hi > 0) { + if (hi > 0) { s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL); if (!s) goto out; @@ -465,13 +465,14 @@ static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), "node %d received it's own msg\n", le32_to_cpu(msg->slot))) return; - switch (msg->type) { + switch (le32_to_cpu(msg->type)) { case METADATA_UPDATED: process_metadata_update(mddev, msg); break; case RESYNCING: - process_suspend_info(mddev, msg->slot, - msg->low, msg->high); + process_suspend_info(mddev, le32_to_cpu(msg->slot), + le64_to_cpu(msg->low), + le64_to_cpu(msg->high)); break; case NEWDISK: process_add_new_disk(mddev, msg); @@ -483,7 +484,7 @@ static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) process_readd_disk(mddev, msg); break; case BITMAP_NEEDS_SYNC: - __recover_slot(mddev, msg->slot); + __recover_slot(mddev, le32_to_cpu(msg->slot)); break; default: pr_warn("%s:%d Received unknown message from %d\n", -- cgit v1.2.3 From 823815238f5e2bfd3783602ca9c50da9fe41c778 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Oct 2015 15:36:07 +1100 Subject: md-cluster: discard unused sb_mutex. Signed-off-by: NeilBrown --- drivers/md/md-cluster.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index f42162c49e3b..02a0fb2e8441 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -54,7 +54,6 @@ struct md_cluster_info { dlm_lockspace_t *lockspace; int slot_number; struct completion completion; - struct mutex sb_mutex; struct dlm_lock_resource *bitmap_lockres; struct dlm_lock_resource *resync_lockres; struct list_head suspend_list; @@ -709,7 +708,6 @@ static int join(struct mddev *mddev, int nodes) init_completion(&cinfo->completion); set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); - mutex_init(&cinfo->sb_mutex); mddev->cluster_info = cinfo; memset(str, 0, 64); -- cgit v1.2.3 From 2e2a7cd96f5a076db4d4a4bd564652d37dd034c2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Oct 2015 15:42:18 +1100 Subject: md-cluster: don't cast void pointers when assigning them. It is common practice in the kernel to leave out this case. It isn't needed and adds little if any value. Signed-off-by: NeilBrown --- drivers/md/md-cluster.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 02a0fb2e8441..818bcccb2962 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -93,7 +93,7 @@ static void sync_ast(void *arg) { struct dlm_lock_resource *res; - res = (struct dlm_lock_resource *) arg; + res = arg; complete(&res->completion); } @@ -349,7 +349,7 @@ static const struct dlm_lockspace_ops md_ls_ops = { */ static void ack_bast(void *arg, int mode) { - struct dlm_lock_resource *res = (struct dlm_lock_resource *)arg; + struct dlm_lock_resource *res = arg; struct md_cluster_info *cinfo = res->mddev->cluster_info; if (mode == DLM_LOCK_EX) -- cgit v1.2.3 From 30661b49be784e8eecde60330ad7a8bdeb5291b1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 19 Oct 2015 15:44:00 +1100 Subject: md-cluster: remove mddev arg from add_resync_info() The arg isn't used, so its presence is only confusing. Signed-off-by: NeilBrown --- drivers/md/md-cluster.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 818bcccb2962..d6a1126d85ce 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -195,8 +195,8 @@ retry: kfree(res); } -static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres, - sector_t lo, sector_t hi) +static void add_resync_info(struct dlm_lock_resource *lockres, + sector_t lo, sector_t hi) { struct resync_info *ri; @@ -884,7 +884,7 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg = {0}; - add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); + add_resync_info(cinfo->bitmap_lockres, lo, hi); /* Re-acquire the lock to refresh LVB */ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); cmsg.type = cpu_to_le32(RESYNCING); -- cgit v1.2.3 From 28c1b9fdf4562b52fe104384b16238c39c8a8d40 Mon Sep 17 00:00:00 2001 From: Goldwyn Rodrigues Date: Thu, 22 Oct 2015 16:01:25 +1100 Subject: md-cluster: Call update_raid_disks() if another node --grow's raid_disks To incorporate --grow feature executed on one node, other nodes need to acknowledge the change in number of disks. Call update_raid_disks() to update internal data structures. This leads to call check_reshape() -> md_allow_write() -> md_update_sb(), this results in a deadlock. This is done so it can safely allocate memory (which might trigger writeback which might write to raid1). This is not required for md with a bitmap. In the clustered case, we don't perform md_update_sb() in md_allow_write(), but in do_md_run(). Also we disable safemode for clustered mode. mddev->recovery_cp need not be set in check_sb_changes() because this is required only when a node reads another node's bitmap. mddev->recovery_cp (which is read from sb->resync_offset), is set only if mddev is in_sync. Since we disabled safemode, in_sync is set to zero. In a clustered environment, the MD may not be in sync because another node could be writing to it. So make sure that in_sync is not set in case of clustered node in __md_stop_writes(). Signed-off-by: Goldwyn Rodrigues Signed-off-by: NeilBrown --- drivers/md/md.c | 25 ++++++++++++++++++------- drivers/md/raid1.c | 8 +++++--- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index a71b36f0acb0..44d034246723 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2230,7 +2230,6 @@ static bool does_sb_need_changing(struct mddev *mddev) /* Check if any mddev parameters have changed */ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || - (mddev->recovery_cp != le64_to_cpu(sb->resync_offset)) || (mddev->layout != le64_to_cpu(sb->layout)) || (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) @@ -3314,6 +3313,11 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) { unsigned long msec; + if (mddev_is_clustered(mddev)) { + pr_info("md: Safemode is disabled for clustered mode\n"); + return -EINVAL; + } + if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) return -EINVAL; if (msec == 0) @@ -5224,7 +5228,10 @@ int md_run(struct mddev *mddev) atomic_set(&mddev->max_corr_read_errors, MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); mddev->safemode = 0; - mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ + if (mddev_is_clustered(mddev)) + mddev->safemode_delay = 0; + else + mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ mddev->in_sync = 1; smp_wmb(); spin_lock(&mddev->lock); @@ -5267,6 +5274,9 @@ static int do_md_run(struct mddev *mddev) goto out; } + if (mddev_is_clustered(mddev)) + md_allow_write(mddev); + md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ @@ -5363,9 +5373,11 @@ static void __md_stop_writes(struct mddev *mddev) md_super_wait(mddev); if (mddev->ro == 0 && - (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) { + ((!mddev->in_sync && !mddev_is_clustered(mddev)) || + (mddev->flags & MD_UPDATE_SB_FLAGS))) { /* mark array as shutdown cleanly */ - mddev->in_sync = 1; + if (!mddev_is_clustered(mddev)) + mddev->in_sync = 1; md_update_sb(mddev, 1); } } @@ -9007,9 +9019,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) } } - /* recovery_cp changed */ - if (le64_to_cpu(sb->resync_offset) != mddev->recovery_cp) - mddev->recovery_cp = le64_to_cpu(sb->resync_offset); + if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) + update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); /* Finally set the event to be up to date */ mddev->events = le64_to_cpu(sb->events); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ce2d797f8787..c1ad0b075807 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3044,9 +3044,11 @@ static int raid1_reshape(struct mddev *mddev) return -EINVAL; } - err = md_allow_write(mddev); - if (err) - return err; + if (!mddev_is_clustered(mddev)) { + err = md_allow_write(mddev); + if (err) + return err; + } raid_disks = mddev->raid_disks + mddev->delta_disks; -- cgit v1.2.3 From c4d4c91b44d8309082127893221a1971a27c50ca Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 13 Aug 2015 14:31:54 -0700 Subject: MD: replace special disk roles with macros Add the following two macros for special roles: spare and faulty MD_DISK_ROLE_SPARE 0xffff MD_DISK_ROLE_FAULTY 0xfffe Add MD_DISK_ROLE_MAX 0xff00 as the maximal possible regular role, and minimal value of special role. Signed-off-by: Song Liu Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 14 +++++++------- include/uapi/linux/raid/md_p.h | 4 ++++ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 44d034246723..cfe5c8704a26 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1608,7 +1608,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) ++ev1; if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) && - le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) + le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX) if (ev1 < mddev->events) return -EINVAL; } else if (mddev->bitmap) { @@ -1628,14 +1628,14 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) int role; if (rdev->desc_nr < 0 || rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { - role = 0xffff; + role = MD_DISK_ROLE_SPARE; rdev->desc_nr = -1; } else role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); switch(role) { - case 0xffff: /* spare */ + case MD_DISK_ROLE_SPARE: /* spare */ break; - case 0xfffe: /* faulty */ + case MD_DISK_ROLE_FAULTY: /* faulty */ set_bit(Faulty, &rdev->flags); break; default: @@ -1788,18 +1788,18 @@ retry: max_dev = le32_to_cpu(sb->max_dev); for (i=0; idev_roles[i] = cpu_to_le16(0xfffe); + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); rdev_for_each(rdev2, mddev) { i = rdev2->desc_nr; if (test_bit(Faulty, &rdev2->flags)) - sb->dev_roles[i] = cpu_to_le16(0xfffe); + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); else if (test_bit(In_sync, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else if (rdev2->raid_disk >= 0) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else - sb->dev_roles[i] = cpu_to_le16(0xffff); + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); } sb->sb_csum = calc_sb_1_csum(sb); diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index 867ee874fa80..d1fc8a637368 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -90,6 +90,10 @@ * dire need */ +#define MD_DISK_ROLE_SPARE 0xffff +#define MD_DISK_ROLE_FAULTY 0xfffe +#define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */ + typedef struct mdp_device_descriptor_s { __u32 number; /* 0 Device number in the entire set */ __u32 major; /* 1 Device major number */ -- cgit v1.2.3 From bac624f3f86a8c7db395c7f85ccad6a504b9c4b4 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 13 Aug 2015 14:31:55 -0700 Subject: MD: add a new disk role to present write journal device Next patches will use a disk as raid5/6 journaling. We need a new disk role to present the journal device and add MD_FEATURE_JOURNAL to feature_map for backward compability. Signed-off-by: Song Liu Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 23 +++++++++++++++++++++-- drivers/md/md.h | 5 +++++ include/uapi/linux/raid/md_p.h | 3 +++ 3 files changed, 29 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index cfe5c8704a26..391341a772c7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1638,6 +1638,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) case MD_DISK_ROLE_FAULTY: /* faulty */ set_bit(Faulty, &rdev->flags); break; + case MD_DISK_ROLE_JOURNAL: /* journal device */ + if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { + /* journal device without journal feature */ + printk(KERN_WARNING + "md: journal device provided without journal feature, ignoring the device\n"); + return -EINVAL; + } + set_bit(Journal, &rdev->flags); + break; default: rdev->saved_raid_disk = role; if ((le32_to_cpu(sb->feature_map) & @@ -1796,7 +1805,10 @@ retry: sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); else if (test_bit(In_sync, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); - else if (rdev2->raid_disk >= 0) + else if (test_bit(Journal, &rdev2->flags)) { + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); + sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); + } else if (rdev2->raid_disk >= 0) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); @@ -5840,7 +5852,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg) else if (test_bit(In_sync, &rdev->flags)) { info.state |= (1<flags)) + info.state |= (1<flags)) info.state |= (1<flags); + if (info->state & (1<flags); /* * check whether the device shows up in other nodes */ @@ -7330,6 +7345,10 @@ static int md_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "(F)"); continue; } + if (test_bit(Journal, &rdev->flags)) { + seq_printf(seq, "(J)"); + continue; + } if (rdev->raid_disk < 0) seq_printf(seq, "(S)"); /* spare */ if (test_bit(Replacement, &rdev->flags)) diff --git a/drivers/md/md.h b/drivers/md/md.h index 2ea00356bb23..88dc6312f5d5 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -172,6 +172,11 @@ enum flag_bits { * This device is seen locally but not * by the whole cluster */ + Journal, /* This device is used as journal for + * raid-5/6. + * Usually, this device should be faster + * than other devices in the array + */ }; #define BB_LEN_MASK (0x00000000000001FFULL) diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index d1fc8a637368..eaaab52077a3 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -89,9 +89,11 @@ * read requests will only be sent here in * dire need */ +#define MD_DISK_JOURNAL 18 /* disk is used as the write journal in RAID-5/6 */ #define MD_DISK_ROLE_SPARE 0xffff #define MD_DISK_ROLE_FAULTY 0xfffe +#define MD_DISK_ROLE_JOURNAL 0xfffd #define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */ typedef struct mdp_device_descriptor_s { @@ -307,6 +309,7 @@ struct mdp_superblock_1 { * is guided by bitmap. */ #define MD_FEATURE_CLUSTERED 256 /* clustered MD */ +#define MD_FEATURE_JOURNAL 512 /* support write cache */ #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ |MD_FEATURE_RECOVERY_OFFSET \ |MD_FEATURE_RESHAPE_ACTIVE \ -- cgit v1.2.3 From 3069aa8def32b0c2b83cd27d1c37ed30b47ce879 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:31:56 -0700 Subject: md: override md superblock recovery_offset for journal device Journal device stores data in a log structure. We need record the log start. Here we override md superblock recovery_offset for this purpose. This field of a journal device is meaningless otherwise. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 6 ++++++ drivers/md/md.h | 8 +++++++- include/uapi/linux/raid/md_p.h | 5 ++++- 3 files changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 391341a772c7..3592beb6931e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1646,6 +1646,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) return -EINVAL; } set_bit(Journal, &rdev->flags); + rdev->journal_tail = le64_to_cpu(sb->journal_tail); break; default: rdev->saved_raid_disk = role; @@ -1721,6 +1722,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); } + /* Note: recovery_offset and journal_tail share space */ + if (test_bit(Journal, &rdev->flags)) + sb->journal_tail = cpu_to_le64(rdev->journal_tail); if (test_bit(Replacement, &rdev->flags)) sb->feature_map |= cpu_to_le32(MD_FEATURE_REPLACEMENT); @@ -8097,6 +8101,8 @@ static int remove_and_add_spares(struct mddev *mddev, continue; if (test_bit(Faulty, &rdev->flags)) continue; + if (test_bit(Journal, &rdev->flags)) + continue; if (mddev->ro && ! (rdev->saved_raid_disk >= 0 && !test_bit(Bitmap_sync, &rdev->flags))) diff --git a/drivers/md/md.h b/drivers/md/md.h index 88dc6312f5d5..2b0f62fb6146 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -87,10 +87,16 @@ struct md_rdev { * array and could again if we did a partial * resync from the bitmap */ - sector_t recovery_offset;/* If this device has been partially + union { + sector_t recovery_offset;/* If this device has been partially * recovered, this is where we were * up to. */ + sector_t journal_tail; /* If this device is a journal device, + * this is the journal tail (journal + * recovery start point) + */ + }; atomic_t nr_pending; /* number of pending requests. * only maintained for arrays that diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index eaaab52077a3..a5f54ff26c20 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -258,7 +258,10 @@ struct mdp_superblock_1 { __le64 data_offset; /* sector start of data, often 0 */ __le64 data_size; /* sectors in this device that can be used for data */ __le64 super_offset; /* sector start of this superblock */ - __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ + union { + __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ + __le64 journal_tail;/* journal tail of journal device (from data_offset) */ + }; __le32 dev_number; /* permanent identifier of this device - not role in raid */ __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ -- cgit v1.2.3 From 6d036f7d52e5a9c3b2ff77883db4c34620681804 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:31:57 -0700 Subject: raid5: export some functions Next several patches use some raid5 functions, rename them with raid5 prefix and export out. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5.c | 100 ++++++++++++++++++++++++++--------------------------- drivers/md/raid5.h | 8 +++++ 2 files changed, 57 insertions(+), 51 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5b79770c4f08..b200c195160c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -353,7 +353,7 @@ static void release_inactive_stripe_list(struct r5conf *conf, struct list_head *list = &temp_inactive_list[size - 1]; /* - * We don't hold any lock here yet, get_active_stripe() might + * We don't hold any lock here yet, raid5_get_active_stripe() might * remove stripes from the list */ if (!list_empty_careful(list)) { @@ -413,7 +413,7 @@ static int release_stripe_list(struct r5conf *conf, return count; } -static void release_stripe(struct stripe_head *sh) +void raid5_release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; @@ -658,9 +658,9 @@ static int has_failed(struct r5conf *conf) return 0; } -static struct stripe_head * -get_active_stripe(struct r5conf *conf, sector_t sector, - int previous, int noblock, int noquiesce) +struct stripe_head * +raid5_get_active_stripe(struct r5conf *conf, sector_t sector, + int previous, int noblock, int noquiesce) { struct stripe_head *sh; int hash = stripe_hash_locks_hash(sector); @@ -858,7 +858,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh unlock_out: unlock_two_stripes(head, sh); out: - release_stripe(head); + raid5_release_stripe(head); } /* Determine if 'data_offset' or 'new_data_offset' should be used @@ -1208,7 +1208,7 @@ static void ops_complete_biofill(void *stripe_head_ref) return_io(&return_bi); set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); } static void ops_run_biofill(struct stripe_head *sh) @@ -1271,7 +1271,7 @@ static void ops_complete_compute(void *stripe_head_ref) if (sh->check_state == check_state_compute_run) sh->check_state = check_state_compute_result; set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); } /* return a pointer to the address conversion region of the scribble buffer */ @@ -1697,7 +1697,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref) } set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); } static void @@ -1855,7 +1855,7 @@ static void ops_complete_check(void *stripe_head_ref) sh->check_state = check_state_check_result; set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); } static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) @@ -2017,7 +2017,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) /* we just created an active stripe so... */ atomic_inc(&conf->active_stripes); - release_stripe(sh); + raid5_release_stripe(sh); conf->max_nr_stripes++; return 1; } @@ -2236,7 +2236,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) if (!p) err = -ENOMEM; } - release_stripe(nsh); + raid5_release_stripe(nsh); } /* critical section pass, GFP_NOIO no longer needed */ @@ -2394,7 +2394,7 @@ static void raid5_end_read_request(struct bio * bi) rdev_dec_pending(rdev, conf->mddev); clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); } static void raid5_end_write_request(struct bio *bi) @@ -2468,14 +2468,12 @@ static void raid5_end_write_request(struct bio *bi) if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); if (sh->batch_head && sh != sh->batch_head) - release_stripe(sh->batch_head); + raid5_release_stripe(sh->batch_head); } -static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); - static void raid5_build_block(struct stripe_head *sh, int i, int previous) { struct r5dev *dev = &sh->dev[i]; @@ -2491,7 +2489,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous) dev->rreq.bi_private = sh; dev->flags = 0; - dev->sector = compute_blocknr(sh, i, previous); + dev->sector = raid5_compute_blocknr(sh, i, previous); } static void error(struct mddev *mddev, struct md_rdev *rdev) @@ -2524,9 +2522,9 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) * Input: a 'big' sector number, * Output: index of the data and parity disk, and the sector # in them. */ -static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, - int previous, int *dd_idx, - struct stripe_head *sh) +sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, + int previous, int *dd_idx, + struct stripe_head *sh) { sector_t stripe, stripe2; sector_t chunk_number; @@ -2726,7 +2724,7 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, return new_sector; } -static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) +sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) { struct r5conf *conf = sh->raid_conf; int raid_disks = sh->disks; @@ -3937,10 +3935,10 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) struct stripe_head *sh2; struct async_submit_ctl submit; - sector_t bn = compute_blocknr(sh, i, 1); + sector_t bn = raid5_compute_blocknr(sh, i, 1); sector_t s = raid5_compute_sector(conf, bn, 0, &dd_idx, NULL); - sh2 = get_active_stripe(conf, s, 0, 1, 1); + sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); if (sh2 == NULL) /* so far only the early blocks of this stripe * have been requested. When later blocks @@ -3950,7 +3948,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) if (!test_bit(STRIPE_EXPANDING, &sh2->state) || test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { /* must have already done this block */ - release_stripe(sh2); + raid5_release_stripe(sh2); continue; } @@ -3971,7 +3969,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) set_bit(STRIPE_EXPAND_READY, &sh2->state); set_bit(STRIPE_HANDLE, &sh2->state); } - release_stripe(sh2); + raid5_release_stripe(sh2); } /* done submitting copies, wait for them to complete */ @@ -4257,7 +4255,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, if (handle_flags == 0 || sh->state & handle_flags) set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); } spin_lock_irq(&head_sh->stripe_lock); head_sh->batch_head = NULL; @@ -4504,7 +4502,7 @@ static void handle_stripe(struct stripe_head *sh) /* Finish reconstruct operations initiated by the expansion process */ if (sh->reconstruct_state == reconstruct_state_result) { struct stripe_head *sh_src - = get_active_stripe(conf, sh->sector, 1, 1, 1); + = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { /* sh cannot be written until sh_src has been read. * so arrange for sh to be delayed a little @@ -4514,11 +4512,11 @@ static void handle_stripe(struct stripe_head *sh) if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh_src->state)) atomic_inc(&conf->preread_active_stripes); - release_stripe(sh_src); + raid5_release_stripe(sh_src); goto finish; } if (sh_src) - release_stripe(sh_src); + raid5_release_stripe(sh_src); sh->reconstruct_state = reconstruct_state_idle; clear_bit(STRIPE_EXPANDING, &sh->state); @@ -5010,7 +5008,7 @@ static void release_stripe_plug(struct mddev *mddev, struct raid5_plug_cb *cb; if (!blk_cb) { - release_stripe(sh); + raid5_release_stripe(sh); return; } @@ -5026,7 +5024,7 @@ static void release_stripe_plug(struct mddev *mddev, if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) list_add_tail(&sh->lru, &cb->list); else - release_stripe(sh); + raid5_release_stripe(sh); } static void make_discard_request(struct mddev *mddev, struct bio *bi) @@ -5061,12 +5059,12 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) DEFINE_WAIT(w); int d; again: - sh = get_active_stripe(conf, logical_sector, 0, 0, 0); + sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); if (test_bit(STRIPE_SYNCING, &sh->state)) { - release_stripe(sh); + raid5_release_stripe(sh); schedule(); goto again; } @@ -5078,7 +5076,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) if (sh->dev[d].towrite || sh->dev[d].toread) { set_bit(R5_Overlap, &sh->dev[d].flags); spin_unlock_irq(&sh->stripe_lock); - release_stripe(sh); + raid5_release_stripe(sh); schedule(); goto again; } @@ -5208,7 +5206,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) (unsigned long long)new_sector, (unsigned long long)logical_sector); - sh = get_active_stripe(conf, new_sector, previous, + sh = raid5_get_active_stripe(conf, new_sector, previous, (bi->bi_rw&RWA_MASK), 0); if (sh) { if (unlikely(previous)) { @@ -5229,7 +5227,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) must_retry = 1; spin_unlock_irq(&conf->device_lock); if (must_retry) { - release_stripe(sh); + raid5_release_stripe(sh); schedule(); do_prepare = true; goto retry; @@ -5239,14 +5237,14 @@ static void make_request(struct mddev *mddev, struct bio * bi) /* Might have got the wrong stripe_head * by accident */ - release_stripe(sh); + raid5_release_stripe(sh); goto retry; } if (rw == WRITE && logical_sector >= mddev->suspend_lo && logical_sector < mddev->suspend_hi) { - release_stripe(sh); + raid5_release_stripe(sh); /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. @@ -5269,7 +5267,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) * and wait a while */ md_wakeup_thread(mddev->thread); - release_stripe(sh); + raid5_release_stripe(sh); schedule(); do_prepare = true; goto retry; @@ -5456,7 +5454,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { int j; int skipped_disk = 0; - sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); + sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); set_bit(STRIPE_EXPANDING, &sh->state); atomic_inc(&conf->reshape_stripes); /* If any of this stripe is beyond the end of the old @@ -5469,7 +5467,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk if (conf->level == 6 && j == sh->qd_idx) continue; - s = compute_blocknr(sh, j, 0); + s = raid5_compute_blocknr(sh, j, 0); if (s < raid5_size(mddev, 0, 0)) { skipped_disk = 1; continue; @@ -5505,10 +5503,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk if (last_sector >= mddev->dev_sectors) last_sector = mddev->dev_sectors - 1; while (first_sector <= last_sector) { - sh = get_active_stripe(conf, first_sector, 1, 0, 1); + sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); set_bit(STRIPE_EXPAND_SOURCE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); first_sector += STRIPE_SECTORS; } /* Now that the sources are clearly marked, we can release @@ -5517,7 +5515,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk while (!list_empty(&stripes)) { sh = list_entry(stripes.next, struct stripe_head, lru); list_del_init(&sh->lru); - release_stripe(sh); + raid5_release_stripe(sh); } /* If this takes us to the resync_max point where we have to pause, * then we need to write out the superblock. @@ -5615,9 +5613,9 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); - sh = get_active_stripe(conf, sector_nr, 0, 1, 0); + sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); if (sh == NULL) { - sh = get_active_stripe(conf, sector_nr, 0, 0, 0); + sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); /* make sure we don't swamp the stripe cache if someone else * is trying to get access */ @@ -5641,7 +5639,7 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int set_bit(STRIPE_SYNC_REQUESTED, &sh->state); set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); + raid5_release_stripe(sh); return STRIPE_SECTORS; } @@ -5680,7 +5678,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) /* already done this stripe */ continue; - sh = get_active_stripe(conf, sector, 0, 1, 1); + sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); if (!sh) { /* failed to get a stripe - must wait */ @@ -5690,7 +5688,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) } if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { - release_stripe(sh); + raid5_release_stripe(sh); raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; @@ -5698,7 +5696,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); handle_stripe(sh); - release_stripe(sh); + raid5_release_stripe(sh); handled++; } remaining = raid5_dec_bi_active_stripes(raid_bio); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 828c2925e68f..7686fcb62157 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -609,4 +609,12 @@ static inline int algorithm_is_DDF(int layout) extern void md_raid5_kick_device(struct r5conf *conf); extern int raid5_set_cache_size(struct mddev *mddev, int size); +extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous); +extern void raid5_release_stripe(struct stripe_head *sh); +extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, + int previous, int *dd_idx, + struct stripe_head *sh); +extern struct stripe_head * +raid5_get_active_stripe(struct r5conf *conf, sector_t sector, + int previous, int noblock, int noquiesce); #endif -- cgit v1.2.3 From b70abcb24711d1327a8a505ab3e931c24cbab0a7 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:31:58 -0700 Subject: raid5: add a new state for stripe log handling When a stripe finishes construction, we write the stripe to raid in ops_run_io normally. With log, we do a bunch of other operations before the stripe is written to raid. Mainly write the stripe to log disk, flush disk cache and so on. The operations are still driven by raid5d and run in the stripe state machine. We introduce a new state for such stripe (trapped into log). The stripe is in this state from the time it first enters ops_run_io (finish construction) to the time it is written to raid. Since we know the state is only for log, we bypass other check/operation in handle_stripe. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5.c | 3 +++ drivers/md/raid5.h | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b200c195160c..4b789f1f4550 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4316,6 +4316,9 @@ static void handle_stripe(struct stripe_head *sh) analyse_stripe(sh, &s); + if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) + goto finish; + if (s.handle_bad_blocks) { set_bit(STRIPE_HANDLE, &sh->state); goto finish; diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 7686fcb62157..a42c123d15d2 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -340,6 +340,7 @@ enum { STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add * to batch yet. */ + STRIPE_LOG_TRAPPED, /* trapped into log */ }; #define STRIPE_EXPAND_SYNC_FLAGS \ -- cgit v1.2.3 From f6bed0ef0a808164f51197de062e0450ce6c1f96 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:31:59 -0700 Subject: raid5: add basic stripe log This introduces a simple log for raid5. Data/parity writing to raid array first writes to the log, then write to raid array disks. If crash happens, we can recovery data from the log. This can speed up raid resync and fix write hole issue. The log structure is pretty simple. Data/meta data is stored in block unit, which is 4k generally. It has only one type of meta data block. The meta data block can track 3 types of data, stripe data, stripe parity and flush block. MD superblock will point to the last valid meta data block. Each meta data block has checksum/seq number, so recovery can scan the log correctly. We store a checksum of stripe data/parity to the metadata block, so meta data and stripe data/parity can be written to log disk together. otherwise, meta data write must wait till stripe data/parity is finished. For stripe data, meta data block will record stripe data sector and size. Currently the size is always 4k. This meta data record can be made simpler if we just fix write hole (eg, we can record data of a stripe's different disks together), but this format can be extended to support caching in the future, which must record data address/size. For stripe parity, meta data block will record stripe sector. It's size should be 4k (for raid5) or 8k (for raid6). We always store p parity first. This format should work for caching too. flush block indicates a stripe is in raid array disks. Fixing write hole doesn't need this type of meta data, it's for caching extension. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/Makefile | 2 +- drivers/md/raid5-cache.c | 608 +++++++++++++++++++++++++++++++++++++++++ drivers/md/raid5.c | 4 + drivers/md/raid5.h | 9 + include/uapi/linux/raid/md_p.h | 58 ++++ 5 files changed, 680 insertions(+), 1 deletion(-) create mode 100644 drivers/md/raid5-cache.c (limited to 'drivers/md') diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 462f443a4f85..f34979cd141a 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -17,7 +17,7 @@ dm-cache-smq-y += dm-cache-policy-smq.o dm-cache-cleaner-y += dm-cache-policy-cleaner.o dm-era-y += dm-era-target.o md-mod-y += md.o bitmap.o -raid456-y += raid5.o +raid456-y += raid5.o raid5-cache.o # Note: link order is important. All raid personalities # and must come before md.o, as they each initialise diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c new file mode 100644 index 000000000000..d18a7700bcb9 --- /dev/null +++ b/drivers/md/raid5-cache.c @@ -0,0 +1,608 @@ +/* + * Copyright (C) 2015 Shaohua Li + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include "md.h" +#include "raid5.h" + +/* + * metadata/data stored in disk with 4k size unit (a block) regardless + * underneath hardware sector size. only works with PAGE_SIZE == 4096 + */ +#define BLOCK_SECTORS (8) + +struct r5l_log { + struct md_rdev *rdev; + + u32 uuid_checksum; + + sector_t device_size; /* log device size, round to + * BLOCK_SECTORS */ + + sector_t last_checkpoint; /* log tail. where recovery scan + * starts from */ + u64 last_cp_seq; /* log tail sequence */ + + sector_t log_start; /* log head. where new data appends */ + u64 seq; /* log head sequence */ + + struct mutex io_mutex; + struct r5l_io_unit *current_io; /* current io_unit accepting new data */ + + spinlock_t io_list_lock; + struct list_head running_ios; /* io_units which are still running, + * and have not yet been completely + * written to the log */ + struct list_head io_end_ios; /* io_units which have been completely + * written to the log but not yet written + * to the RAID */ + + struct kmem_cache *io_kc; + + struct list_head no_space_stripes; /* pending stripes, log has no space */ + spinlock_t no_space_stripes_lock; +}; + +/* + * an IO range starts from a meta data block and end at the next meta data + * block. The io unit's the meta data block tracks data/parity followed it. io + * unit is written to log disk with normal write, as we always flush log disk + * first and then start move data to raid disks, there is no requirement to + * write io unit with FLUSH/FUA + */ +struct r5l_io_unit { + struct r5l_log *log; + + struct page *meta_page; /* store meta block */ + int meta_offset; /* current offset in meta_page */ + + struct bio_list bios; + atomic_t pending_io; /* pending bios not written to log yet */ + struct bio *current_bio;/* current_bio accepting new data */ + + atomic_t pending_stripe;/* how many stripes not flushed to raid */ + u64 seq; /* seq number of the metablock */ + sector_t log_start; /* where the io_unit starts */ + sector_t log_end; /* where the io_unit ends */ + struct list_head log_sibling; /* log->running_ios */ + struct list_head stripe_list; /* stripes added to the io_unit */ + + int state; + wait_queue_head_t wait_state; +}; + +/* r5l_io_unit state */ +enum r5l_io_unit_state { + IO_UNIT_RUNNING = 0, /* accepting new IO */ + IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, + * don't accepting new bio */ + IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ + IO_UNIT_STRIPE_START = 3, /* stripes of io_unit are flushing to raid */ + IO_UNIT_STRIPE_END = 4, /* stripes data finished writing to raid */ +}; + +static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) +{ + start += inc; + if (start >= log->device_size) + start = start - log->device_size; + return start; +} + +static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, + sector_t end) +{ + if (end >= start) + return end - start; + else + return end + log->device_size - start; +} + +static bool r5l_has_free_space(struct r5l_log *log, sector_t size) +{ + sector_t used_size; + + used_size = r5l_ring_distance(log, log->last_checkpoint, + log->log_start); + + return log->device_size > used_size + size; +} + +static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log) +{ + struct r5l_io_unit *io; + /* We can't handle memory allocate failure so far */ + gfp_t gfp = GFP_NOIO | __GFP_NOFAIL; + + io = kmem_cache_zalloc(log->io_kc, gfp); + io->log = log; + io->meta_page = alloc_page(gfp | __GFP_ZERO); + + bio_list_init(&io->bios); + INIT_LIST_HEAD(&io->log_sibling); + INIT_LIST_HEAD(&io->stripe_list); + io->state = IO_UNIT_RUNNING; + init_waitqueue_head(&io->wait_state); + return io; +} + +static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io) +{ + __free_page(io->meta_page); + kmem_cache_free(log->io_kc, io); +} + +static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to, + enum r5l_io_unit_state state) +{ + struct r5l_io_unit *io; + + while (!list_empty(from)) { + io = list_first_entry(from, struct r5l_io_unit, log_sibling); + /* don't change list order */ + if (io->state >= state) + list_move_tail(&io->log_sibling, to); + else + break; + } +} + +static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); +static void __r5l_set_io_unit_state(struct r5l_io_unit *io, + enum r5l_io_unit_state state) +{ + struct r5l_log *log = io->log; + + if (WARN_ON(io->state >= state)) + return; + io->state = state; + if (state == IO_UNIT_IO_END) + r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, + IO_UNIT_IO_END); + wake_up(&io->wait_state); +} + +static void r5l_set_io_unit_state(struct r5l_io_unit *io, + enum r5l_io_unit_state state) +{ + struct r5l_log *log = io->log; + unsigned long flags; + + spin_lock_irqsave(&log->io_list_lock, flags); + __r5l_set_io_unit_state(io, state); + spin_unlock_irqrestore(&log->io_list_lock, flags); +} + +/* XXX: totally ignores I/O errors */ +static void r5l_log_endio(struct bio *bio) +{ + struct r5l_io_unit *io = bio->bi_private; + struct r5l_log *log = io->log; + + bio_put(bio); + + if (!atomic_dec_and_test(&io->pending_io)) + return; + + r5l_set_io_unit_state(io, IO_UNIT_IO_END); + md_wakeup_thread(log->rdev->mddev->thread); +} + +static void r5l_submit_current_io(struct r5l_log *log) +{ + struct r5l_io_unit *io = log->current_io; + struct r5l_meta_block *block; + struct bio *bio; + u32 crc; + + if (!io) + return; + + block = page_address(io->meta_page); + block->meta_size = cpu_to_le32(io->meta_offset); + crc = crc32_le(log->uuid_checksum, (void *)block, PAGE_SIZE); + block->checksum = cpu_to_le32(crc); + + log->current_io = NULL; + r5l_set_io_unit_state(io, IO_UNIT_IO_START); + + while ((bio = bio_list_pop(&io->bios))) { + /* all IO must start from rdev->data_offset */ + bio->bi_iter.bi_sector += log->rdev->data_offset; + submit_bio(WRITE, bio); + } +} + +static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) +{ + struct r5l_io_unit *io; + struct r5l_meta_block *block; + struct bio *bio; + + io = r5l_alloc_io_unit(log); + + block = page_address(io->meta_page); + block->magic = cpu_to_le32(R5LOG_MAGIC); + block->version = R5LOG_VERSION; + block->seq = cpu_to_le64(log->seq); + block->position = cpu_to_le64(log->log_start); + + io->log_start = log->log_start; + io->meta_offset = sizeof(struct r5l_meta_block); + io->seq = log->seq; + + bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); + io->current_bio = bio; + bio->bi_rw = WRITE; + bio->bi_bdev = log->rdev->bdev; + bio->bi_iter.bi_sector = log->log_start; + bio_add_page(bio, io->meta_page, PAGE_SIZE, 0); + bio->bi_end_io = r5l_log_endio; + bio->bi_private = io; + + bio_list_add(&io->bios, bio); + atomic_inc(&io->pending_io); + + log->seq++; + log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); + io->log_end = log->log_start; + /* current bio hit disk end */ + if (log->log_start == 0) + io->current_bio = NULL; + + spin_lock_irq(&log->io_list_lock); + list_add_tail(&io->log_sibling, &log->running_ios); + spin_unlock_irq(&log->io_list_lock); + + return io; +} + +static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) +{ + struct r5l_io_unit *io; + + io = log->current_io; + if (io && io->meta_offset + payload_size > PAGE_SIZE) + r5l_submit_current_io(log); + io = log->current_io; + if (io) + return 0; + + log->current_io = r5l_new_meta(log); + return 0; +} + +static void r5l_append_payload_meta(struct r5l_log *log, u16 type, + sector_t location, + u32 checksum1, u32 checksum2, + bool checksum2_valid) +{ + struct r5l_io_unit *io = log->current_io; + struct r5l_payload_data_parity *payload; + + payload = page_address(io->meta_page) + io->meta_offset; + payload->header.type = cpu_to_le16(type); + payload->header.flags = cpu_to_le16(0); + payload->size = cpu_to_le32((1 + !!checksum2_valid) << + (PAGE_SHIFT - 9)); + payload->location = cpu_to_le64(location); + payload->checksum[0] = cpu_to_le32(checksum1); + if (checksum2_valid) + payload->checksum[1] = cpu_to_le32(checksum2); + + io->meta_offset += sizeof(struct r5l_payload_data_parity) + + sizeof(__le32) * (1 + !!checksum2_valid); +} + +static void r5l_append_payload_page(struct r5l_log *log, struct page *page) +{ + struct r5l_io_unit *io = log->current_io; + +alloc_bio: + if (!io->current_bio) { + struct bio *bio; + + bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); + bio->bi_rw = WRITE; + bio->bi_bdev = log->rdev->bdev; + bio->bi_iter.bi_sector = log->log_start; + bio->bi_end_io = r5l_log_endio; + bio->bi_private = io; + bio_list_add(&io->bios, bio); + atomic_inc(&io->pending_io); + io->current_bio = bio; + } + if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) { + io->current_bio = NULL; + goto alloc_bio; + } + log->log_start = r5l_ring_add(log, log->log_start, + BLOCK_SECTORS); + /* current bio hit disk end */ + if (log->log_start == 0) + io->current_bio = NULL; + + io->log_end = log->log_start; +} + +static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, + int data_pages, int parity_pages) +{ + int i; + int meta_size; + struct r5l_io_unit *io; + + meta_size = + ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) + * data_pages) + + sizeof(struct r5l_payload_data_parity) + + sizeof(__le32) * parity_pages; + + r5l_get_meta(log, meta_size); + io = log->current_io; + + for (i = 0; i < sh->disks; i++) { + if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) + continue; + if (i == sh->pd_idx || i == sh->qd_idx) + continue; + r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, + raid5_compute_blocknr(sh, i, 0), + sh->dev[i].log_checksum, 0, false); + r5l_append_payload_page(log, sh->dev[i].page); + } + + if (sh->qd_idx >= 0) { + r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, + sh->sector, sh->dev[sh->pd_idx].log_checksum, + sh->dev[sh->qd_idx].log_checksum, true); + r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); + r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); + } else { + r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, + sh->sector, sh->dev[sh->pd_idx].log_checksum, + 0, false); + r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); + } + + list_add_tail(&sh->log_list, &io->stripe_list); + atomic_inc(&io->pending_stripe); + sh->log_io = io; +} + +/* + * running in raid5d, where reclaim could wait for raid5d too (when it flushes + * data from log to raid disks), so we shouldn't wait for reclaim here + */ +int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) +{ + int write_disks = 0; + int data_pages, parity_pages; + int meta_size; + int reserve; + int i; + + if (!log) + return -EAGAIN; + /* Don't support stripe batch */ + if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || + test_bit(STRIPE_SYNCING, &sh->state)) { + /* the stripe is written to log, we start writing it to raid */ + clear_bit(STRIPE_LOG_TRAPPED, &sh->state); + return -EAGAIN; + } + + for (i = 0; i < sh->disks; i++) { + void *addr; + + if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) + continue; + write_disks++; + /* checksum is already calculated in last run */ + if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) + continue; + addr = kmap_atomic(sh->dev[i].page); + sh->dev[i].log_checksum = crc32_le(log->uuid_checksum, + addr, PAGE_SIZE); + kunmap_atomic(addr); + } + parity_pages = 1 + !!(sh->qd_idx >= 0); + data_pages = write_disks - parity_pages; + + meta_size = + ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) + * data_pages) + + sizeof(struct r5l_payload_data_parity) + + sizeof(__le32) * parity_pages; + /* Doesn't work with very big raid array */ + if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE) + return -EINVAL; + + set_bit(STRIPE_LOG_TRAPPED, &sh->state); + atomic_inc(&sh->count); + + mutex_lock(&log->io_mutex); + /* meta + data */ + reserve = (1 + write_disks) << (PAGE_SHIFT - 9); + if (r5l_has_free_space(log, reserve)) + r5l_log_stripe(log, sh, data_pages, parity_pages); + else { + spin_lock(&log->no_space_stripes_lock); + list_add_tail(&sh->log_list, &log->no_space_stripes); + spin_unlock(&log->no_space_stripes_lock); + + r5l_wake_reclaim(log, reserve); + } + mutex_unlock(&log->io_mutex); + + return 0; +} + +void r5l_write_stripe_run(struct r5l_log *log) +{ + if (!log) + return; + mutex_lock(&log->io_mutex); + r5l_submit_current_io(log); + mutex_unlock(&log->io_mutex); +} + +/* This will run after log space is reclaimed */ +static void r5l_run_no_space_stripes(struct r5l_log *log) +{ + struct stripe_head *sh; + + spin_lock(&log->no_space_stripes_lock); + while (!list_empty(&log->no_space_stripes)) { + sh = list_first_entry(&log->no_space_stripes, + struct stripe_head, log_list); + list_del_init(&sh->log_list); + set_bit(STRIPE_HANDLE, &sh->state); + raid5_release_stripe(sh); + } + spin_unlock(&log->no_space_stripes_lock); +} + +static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) +{ + /* will implement later */ +} + +static int r5l_recovery_log(struct r5l_log *log) +{ + /* fake recovery */ + log->seq = log->last_cp_seq + 1; + log->log_start = r5l_ring_add(log, log->last_checkpoint, BLOCK_SECTORS); + return 0; +} + +static void r5l_write_super(struct r5l_log *log, sector_t cp) +{ + struct mddev *mddev = log->rdev->mddev; + + log->rdev->journal_tail = cp; + set_bit(MD_CHANGE_DEVS, &mddev->flags); +} + +static int r5l_load_log(struct r5l_log *log) +{ + struct md_rdev *rdev = log->rdev; + struct page *page; + struct r5l_meta_block *mb; + sector_t cp = log->rdev->journal_tail; + u32 stored_crc, expected_crc; + bool create_super = false; + int ret; + + /* Make sure it's valid */ + if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) + cp = 0; + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) { + ret = -EIO; + goto ioerr; + } + mb = page_address(page); + + if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || + mb->version != R5LOG_VERSION) { + create_super = true; + goto create; + } + stored_crc = le32_to_cpu(mb->checksum); + mb->checksum = 0; + expected_crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE); + if (stored_crc != expected_crc) { + create_super = true; + goto create; + } + if (le64_to_cpu(mb->position) != cp) { + create_super = true; + goto create; + } +create: + if (create_super) { + log->last_cp_seq = prandom_u32(); + cp = 0; + /* + * Make sure super points to correct address. Log might have + * data very soon. If super hasn't correct log tail address, + * recovery can't find the log + */ + r5l_write_super(log, cp); + } else + log->last_cp_seq = le64_to_cpu(mb->seq); + + log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); + log->last_checkpoint = cp; + + __free_page(page); + + return r5l_recovery_log(log); +ioerr: + __free_page(page); + return ret; +} + +int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) +{ + struct r5l_log *log; + + if (PAGE_SIZE != 4096) + return -EINVAL; + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) + return -ENOMEM; + log->rdev = rdev; + + log->uuid_checksum = crc32_le(~0, (void *)rdev->mddev->uuid, + sizeof(rdev->mddev->uuid)); + + mutex_init(&log->io_mutex); + + spin_lock_init(&log->io_list_lock); + INIT_LIST_HEAD(&log->running_ios); + + log->io_kc = KMEM_CACHE(r5l_io_unit, 0); + if (!log->io_kc) + goto io_kc; + + INIT_LIST_HEAD(&log->no_space_stripes); + spin_lock_init(&log->no_space_stripes_lock); + + if (r5l_load_log(log)) + goto error; + + conf->log = log; + return 0; +error: + kmem_cache_destroy(log->io_kc); +io_kc: + kfree(log); + return -EINVAL; +} + +void r5l_exit_log(struct r5l_log *log) +{ + kmem_cache_destroy(log->io_kc); + kfree(log); +} diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4b789f1f4550..64a256538ff7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -895,6 +895,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) might_sleep(); + if (r5l_write_stripe(conf->log, sh) == 0) + return; for (i = disks; i--; ) { int rw; int replace_only = 0; @@ -3495,6 +3497,7 @@ returnbi: WARN_ON(test_bit(R5_SkipCopy, &dev->flags)); WARN_ON(dev->page != dev->orig_page); } + if (!discard_pending && test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); @@ -5745,6 +5748,7 @@ static int handle_active_stripes(struct r5conf *conf, int group, for (i = 0; i < batch_size; i++) handle_stripe(batch[i]); + r5l_write_stripe_run(conf->log); cond_resched(); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index a42c123d15d2..87fae2b50d87 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -223,6 +223,9 @@ struct stripe_head { struct stripe_head *batch_head; /* protected by stripe lock */ spinlock_t batch_lock; /* only header's lock is useful */ struct list_head batch_list; /* protected by head's batch lock*/ + + struct r5l_io_unit *log_io; + struct list_head log_list; /** * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target @@ -244,6 +247,7 @@ struct stripe_head { struct bio *toread, *read, *towrite, *written; sector_t sector; /* sector of this page */ unsigned long flags; + u32 log_checksum; } dev[1]; /* allocated with extra space depending of RAID geometry */ }; @@ -544,6 +548,7 @@ struct r5conf { struct r5worker_group *worker_groups; int group_cnt; int worker_cnt_per_group; + struct r5l_log *log; }; @@ -618,4 +623,8 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, extern struct stripe_head * raid5_get_active_stripe(struct r5conf *conf, sector_t sector, int previous, int noblock, int noquiesce); +extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); +extern void r5l_exit_log(struct r5l_log *log); +extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); +extern void r5l_write_stripe_run(struct r5l_log *log); #endif diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index a5f54ff26c20..96e4196f9c79 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -324,4 +324,62 @@ struct mdp_superblock_1 { |MD_FEATURE_CLUSTERED \ ) +struct r5l_payload_header { + __le16 type; + __le16 flags; +} __attribute__ ((__packed__)); + +enum r5l_payload_type { + R5LOG_PAYLOAD_DATA = 0, + R5LOG_PAYLOAD_PARITY = 1, + R5LOG_PAYLOAD_FLUSH = 2, +}; + +struct r5l_payload_data_parity { + struct r5l_payload_header header; + __le32 size; /* sector. data/parity size. each 4k + * has a checksum */ + __le64 location; /* sector. For data, it's raid sector. For + * parity, it's stripe sector */ + __le32 checksum[]; +} __attribute__ ((__packed__)); + +enum r5l_payload_data_parity_flag { + R5LOG_PAYLOAD_FLAG_DISCARD = 1, /* payload is discard */ + /* + * RESHAPED/RESHAPING is only set when there is reshape activity. Note, + * both data/parity of a stripe should have the same flag set + * + * RESHAPED: reshape is running, and this stripe finished reshape + * RESHAPING: reshape is running, and this stripe isn't reshaped + */ + R5LOG_PAYLOAD_FLAG_RESHAPED = 2, + R5LOG_PAYLOAD_FLAG_RESHAPING = 3, +}; + +struct r5l_payload_flush { + struct r5l_payload_header header; + __le32 size; /* flush_stripes size, bytes */ + __le64 flush_stripes[]; +} __attribute__ ((__packed__)); + +enum r5l_payload_flush_flag { + R5LOG_PAYLOAD_FLAG_FLUSH_STRIPE = 1, /* data represents whole stripe */ +}; + +struct r5l_meta_block { + __le32 magic; + __le32 checksum; + __u8 version; + __u8 __zero_pading_1; + __le16 __zero_pading_2; + __le32 meta_size; /* whole size of the block */ + + __le64 seq; + __le64 position; /* sector, start from rdev->data_offset, current position */ + struct r5l_payload_header payloads[]; +} __attribute__ ((__packed__)); + +#define R5LOG_VERSION 0x1 +#define R5LOG_MAGIC 0x6433c509 #endif -- cgit v1.2.3 From 0576b1c618ef220051a8555f2aa7dd316e88f330 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:32:00 -0700 Subject: raid5: log reclaim support This is the reclaim support for raid5 log. A stripe write will have following steps: 1. reconstruct the stripe, read data/calculate parity. ops_run_io prepares to write data/parity to raid disks 2. hijack ops_run_io. stripe data/parity is appending to log disk 3. flush log disk cache 4. ops_run_io run again and do normal operation. stripe data/parity is written in raid array disks. raid core can return io to upper layer. 5. flush cache of all raid array disks 6. update super block 7. log disk space used by the stripe can be reused In practice, several stripes consist of an io_unit and we will batch several io_unit in different steps, but the whole process doesn't change. It's possible io return just after data/parity hit log disk, but then read IO will need read from log disk. For simplicity, IO return happens at step 4, where read IO can directly read from raid disks. Currently reclaim run if there is specific reclaimable space (1/4 disk size or 10G) or we are out of space. Reclaim is just to free log disk spaces, it doesn't impact data consistency. The size based force reclaim is to make sure log isn't too big, so recovery doesn't scan log too much. Recovery make sure raid disks and log disk have the same data of a stripe. If crash happens before 4, recovery might/might not recovery stripe's data/parity depending on if data/parity and its checksum matches. In either case, this doesn't change the syntax of an IO write. After step 3, stripe is guaranteed recoverable, because stripe's data/parity is persistent in log disk. In some cases, log disk content and raid disks content of a stripe are the same, but recovery will still copy log disk content to raid disks, this doesn't impact data consistency. space reuse happens after superblock update and cache flush. There is one situation we want to avoid. A broken meta in the middle of a log causes recovery can't find meta at the head of log. If operations require meta at the head persistent in log, we must make sure meta before it persistent in log too. The case is stripe data/parity is in log and we start write stripe to raid disks (before step 4). stripe data/parity must be persistent in log before we do the write to raid disks. The solution is we restrictly maintain io_unit list order. In this case, we only write stripes of an io_unit to raid disks till the io_unit is the first one whose data/parity is in log. The io_unit list order is important for other cases too. For example, some io_unit are reclaimable and others not. They can be mixed in the list, we shouldn't reuse space of an unreclaimable io_unit. Includes fixes to problems which were... Reported-by: kbuild test robot Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 260 ++++++++++++++++++++++++++++++++++++++++++++++- drivers/md/raid5.c | 6 ++ drivers/md/raid5.h | 2 + 3 files changed, 267 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index d18a7700bcb9..a42f522f52e7 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -27,6 +27,13 @@ */ #define BLOCK_SECTORS (8) +/* + * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent + * recovery scans a very long log + */ +#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ +#define RECLAIM_MAX_FREE_SPACE_SHIFT (2) + struct r5l_log { struct md_rdev *rdev; @@ -34,6 +41,8 @@ struct r5l_log { sector_t device_size; /* log device size, round to * BLOCK_SECTORS */ + sector_t max_free_space; /* reclaim run if free space is at + * this size */ sector_t last_checkpoint; /* log tail. where recovery scan * starts from */ @@ -52,9 +61,21 @@ struct r5l_log { struct list_head io_end_ios; /* io_units which have been completely * written to the log but not yet written * to the RAID */ + struct list_head stripe_end_ios;/* io_units which have been completely + * written to the RAID but have not yet + * been considered for updating super */ struct kmem_cache *io_kc; + struct md_thread *reclaim_thread; + unsigned long reclaim_target; /* number of space that need to be + * reclaimed. if it's 0, reclaim spaces + * used by io_units which are in + * IO_UNIT_STRIPE_END state (eg, reclaim + * dones't wait for specific io_unit + * switching to IO_UNIT_STRIPE_END + * state) */ + struct list_head no_space_stripes; /* pending stripes, log has no space */ spinlock_t no_space_stripes_lock; }; @@ -163,6 +184,35 @@ static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to, } } +/* + * We don't want too many io_units reside in stripe_end_ios list, which will + * waste a lot of memory. So we try to remove some. But we must keep at least 2 + * io_units. The superblock must point to a valid meta, if it's the last meta, + * recovery can scan less + */ +static void r5l_compress_stripe_end_list(struct r5l_log *log) +{ + struct r5l_io_unit *first, *last, *io; + + first = list_first_entry(&log->stripe_end_ios, + struct r5l_io_unit, log_sibling); + last = list_last_entry(&log->stripe_end_ios, + struct r5l_io_unit, log_sibling); + if (first == last) + return; + list_del(&first->log_sibling); + list_del(&last->log_sibling); + while (!list_empty(&log->stripe_end_ios)) { + io = list_first_entry(&log->stripe_end_ios, + struct r5l_io_unit, log_sibling); + list_del(&io->log_sibling); + first->log_end = io->log_end; + r5l_free_io_unit(log, io); + } + list_add_tail(&first->log_sibling, &log->stripe_end_ios); + list_add_tail(&last->log_sibling, &log->stripe_end_ios); +} + static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); static void __r5l_set_io_unit_state(struct r5l_io_unit *io, enum r5l_io_unit_state state) @@ -175,6 +225,22 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io, if (state == IO_UNIT_IO_END) r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, IO_UNIT_IO_END); + if (state == IO_UNIT_STRIPE_END) { + struct r5l_io_unit *last; + sector_t reclaimable_space; + + r5l_move_io_unit_list(&log->io_end_ios, &log->stripe_end_ios, + IO_UNIT_STRIPE_END); + + last = list_last_entry(&log->stripe_end_ios, + struct r5l_io_unit, log_sibling); + reclaimable_space = r5l_ring_distance(log, log->last_checkpoint, + last->log_end); + if (reclaimable_space >= log->max_free_space) + r5l_wake_reclaim(log, 0); + + r5l_compress_stripe_end_list(log); + } wake_up(&io->wait_state); } @@ -479,9 +545,176 @@ static void r5l_run_no_space_stripes(struct r5l_log *log) spin_unlock(&log->no_space_stripes_lock); } +void r5l_stripe_write_finished(struct stripe_head *sh) +{ + struct r5l_io_unit *io; + + /* Don't support stripe batch */ + io = sh->log_io; + if (!io) + return; + sh->log_io = NULL; + + if (atomic_dec_and_test(&io->pending_stripe)) + r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); +} + +/* + * Starting dispatch IO to raid. + * io_unit(meta) consists of a log. There is one situation we want to avoid. A + * broken meta in the middle of a log causes recovery can't find meta at the + * head of log. If operations require meta at the head persistent in log, we + * must make sure meta before it persistent in log too. A case is: + * + * stripe data/parity is in log, we start write stripe to raid disks. stripe + * data/parity must be persistent in log before we do the write to raid disks. + * + * The solution is we restrictly maintain io_unit list order. In this case, we + * only write stripes of an io_unit to raid disks till the io_unit is the first + * one whose data/parity is in log. + */ +void r5l_flush_stripe_to_raid(struct r5l_log *log) +{ + struct r5l_io_unit *io; + struct stripe_head *sh; + bool run_stripe; + + if (!log) + return; + spin_lock_irq(&log->io_list_lock); + run_stripe = !list_empty(&log->io_end_ios); + spin_unlock_irq(&log->io_list_lock); + + if (!run_stripe) + return; + + blkdev_issue_flush(log->rdev->bdev, GFP_NOIO, NULL); + + spin_lock_irq(&log->io_list_lock); + list_for_each_entry(io, &log->io_end_ios, log_sibling) { + if (io->state >= IO_UNIT_STRIPE_START) + continue; + __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_START); + + while (!list_empty(&io->stripe_list)) { + sh = list_first_entry(&io->stripe_list, + struct stripe_head, log_list); + list_del_init(&sh->log_list); + set_bit(STRIPE_HANDLE, &sh->state); + raid5_release_stripe(sh); + } + } + spin_unlock_irq(&log->io_list_lock); +} + +static void r5l_kick_io_unit(struct r5l_log *log, struct r5l_io_unit *io) +{ + /* the log thread will write the io unit */ + wait_event(io->wait_state, io->state >= IO_UNIT_IO_END); + if (io->state < IO_UNIT_STRIPE_START) + r5l_flush_stripe_to_raid(log); + wait_event(io->wait_state, io->state >= IO_UNIT_STRIPE_END); +} + +static void r5l_write_super(struct r5l_log *log, sector_t cp); +static void r5l_do_reclaim(struct r5l_log *log) +{ + struct r5l_io_unit *io, *last; + LIST_HEAD(list); + sector_t free = 0; + sector_t reclaim_target = xchg(&log->reclaim_target, 0); + + spin_lock_irq(&log->io_list_lock); + /* + * move proper io_unit to reclaim list. We should not change the order. + * reclaimable/unreclaimable io_unit can be mixed in the list, we + * shouldn't reuse space of an unreclaimable io_unit + */ + while (1) { + while (!list_empty(&log->stripe_end_ios)) { + io = list_first_entry(&log->stripe_end_ios, + struct r5l_io_unit, log_sibling); + list_move_tail(&io->log_sibling, &list); + free += r5l_ring_distance(log, io->log_start, + io->log_end); + } + + if (free >= reclaim_target || + (list_empty(&log->running_ios) && + list_empty(&log->io_end_ios) && + list_empty(&log->stripe_end_ios))) + break; + + /* Below waiting mostly happens when we shutdown the raid */ + if (!list_empty(&log->io_end_ios)) { + io = list_first_entry(&log->io_end_ios, + struct r5l_io_unit, log_sibling); + spin_unlock_irq(&log->io_list_lock); + /* nobody else can delete the io, we are safe */ + r5l_kick_io_unit(log, io); + spin_lock_irq(&log->io_list_lock); + continue; + } + + if (!list_empty(&log->running_ios)) { + io = list_first_entry(&log->running_ios, + struct r5l_io_unit, log_sibling); + spin_unlock_irq(&log->io_list_lock); + /* nobody else can delete the io, we are safe */ + r5l_kick_io_unit(log, io); + spin_lock_irq(&log->io_list_lock); + continue; + } + } + spin_unlock_irq(&log->io_list_lock); + + if (list_empty(&list)) + return; + + /* super always point to last valid meta */ + last = list_last_entry(&list, struct r5l_io_unit, log_sibling); + /* + * write_super will flush cache of each raid disk. We must write super + * here, because the log area might be reused soon and we don't want to + * confuse recovery + */ + r5l_write_super(log, last->log_start); + + mutex_lock(&log->io_mutex); + log->last_checkpoint = last->log_start; + log->last_cp_seq = last->seq; + mutex_unlock(&log->io_mutex); + r5l_run_no_space_stripes(log); + + while (!list_empty(&list)) { + io = list_first_entry(&list, struct r5l_io_unit, log_sibling); + list_del(&io->log_sibling); + r5l_free_io_unit(log, io); + } +} + +static void r5l_reclaim_thread(struct md_thread *thread) +{ + struct mddev *mddev = thread->mddev; + struct r5conf *conf = mddev->private; + struct r5l_log *log = conf->log; + + if (!log) + return; + r5l_do_reclaim(log); +} + static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) { - /* will implement later */ + unsigned long target; + unsigned long new = (unsigned long)space; /* overflow in theory */ + + do { + target = log->reclaim_target; + if (new < target) + return; + } while (cmpxchg(&log->reclaim_target, target, new) != target); + md_wakeup_thread(log->reclaim_thread); } static int r5l_recovery_log(struct r5l_log *log) @@ -553,6 +786,9 @@ create: log->last_cp_seq = le64_to_cpu(mb->seq); log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); + log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; + if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) + log->max_free_space = RECLAIM_MAX_FREE_SPACE; log->last_checkpoint = cp; __free_page(page); @@ -581,11 +817,18 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) spin_lock_init(&log->io_list_lock); INIT_LIST_HEAD(&log->running_ios); + INIT_LIST_HEAD(&log->io_end_ios); + INIT_LIST_HEAD(&log->stripe_end_ios); log->io_kc = KMEM_CACHE(r5l_io_unit, 0); if (!log->io_kc) goto io_kc; + log->reclaim_thread = md_register_thread(r5l_reclaim_thread, + log->rdev->mddev, "reclaim"); + if (!log->reclaim_thread) + goto reclaim_thread; + INIT_LIST_HEAD(&log->no_space_stripes); spin_lock_init(&log->no_space_stripes_lock); @@ -595,6 +838,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) conf->log = log; return 0; error: + md_unregister_thread(&log->reclaim_thread); +reclaim_thread: kmem_cache_destroy(log->io_kc); io_kc: kfree(log); @@ -603,6 +848,19 @@ io_kc: void r5l_exit_log(struct r5l_log *log) { + /* + * at this point all stripes are finished, so io_unit is at least in + * STRIPE_END state + */ + r5l_wake_reclaim(log, -1L); + md_unregister_thread(&log->reclaim_thread); + r5l_do_reclaim(log); + /* + * force a super update, r5l_do_reclaim might updated the super. + * mddev->thread is already stopped + */ + md_update_sb(log->rdev->mddev, 1); + kmem_cache_destroy(log->io_kc); kfree(log); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 64a256538ff7..508a29bd4733 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3098,6 +3098,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, if (bi) bitmap_end = 1; + r5l_stripe_write_finished(sh); + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); @@ -3498,6 +3500,8 @@ returnbi: WARN_ON(dev->page != dev->orig_page); } + r5l_stripe_write_finished(sh); + if (!discard_pending && test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); @@ -5882,6 +5886,8 @@ static void raid5d(struct md_thread *thread) mutex_unlock(&conf->cache_size_mutex); } + r5l_flush_stripe_to_raid(conf->log); + async_tx_issue_pending_all(); blk_finish_plug(&plug); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 87fae2b50d87..1f16d437bfda 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -627,4 +627,6 @@ extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); extern void r5l_exit_log(struct r5l_log *log); extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); extern void r5l_write_stripe_run(struct r5l_log *log); +extern void r5l_flush_stripe_to_raid(struct r5l_log *log); +extern void r5l_stripe_write_finished(struct stripe_head *sh); #endif -- cgit v1.2.3 From 355810d12a8974ff1f3a7336149b65d4bda84634 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:32:01 -0700 Subject: raid5: log recovery This is the log recovery support. The process is quite straightforward. We scan the log and read all valid meta/data/parity into memory. If a stripe's data/parity checksum is correct, the stripe will be recoveried. Otherwise, it's discarded and we don't scan the log further. The reclaim process guarantees stripe which starts to be flushed raid disks has completed data/parity and has correct checksum. To recovery a stripe, we just copy its data/parity to corresponding raid disks. The trick thing is superblock update after recovery. we can't let superblock point to last valid meta block. The log might look like: | meta 1| meta 2| meta 3| meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If superblock points to meta 1, we write a new valid meta 2n. If crash happens again, new recovery will start from meta 1. Since meta 2n is valid, recovery will think meta 3 is valid, which is wrong. The solution is we create a new meta in meta2 with its seq == meta 1's seq + 10 and let superblock points to meta2. recovery will not think meta 3 is a valid meta, because its seq is wrong Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 243 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 240 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index a42f522f52e7..2b9ed0e3af37 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -717,11 +717,248 @@ static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) md_wakeup_thread(log->reclaim_thread); } +struct r5l_recovery_ctx { + struct page *meta_page; /* current meta */ + sector_t meta_total_blocks; /* total size of current meta and data */ + sector_t pos; /* recovery position */ + u64 seq; /* recovery position seq */ +}; + +static int r5l_read_meta_block(struct r5l_log *log, + struct r5l_recovery_ctx *ctx) +{ + struct page *page = ctx->meta_page; + struct r5l_meta_block *mb; + u32 crc, stored_crc; + + if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false)) + return -EIO; + + mb = page_address(page); + stored_crc = le32_to_cpu(mb->checksum); + mb->checksum = 0; + + if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || + le64_to_cpu(mb->seq) != ctx->seq || + mb->version != R5LOG_VERSION || + le64_to_cpu(mb->position) != ctx->pos) + return -EINVAL; + + crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE); + if (stored_crc != crc) + return -EINVAL; + + if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) + return -EINVAL; + + ctx->meta_total_blocks = BLOCK_SECTORS; + + return 0; +} + +static int r5l_recovery_flush_one_stripe(struct r5l_log *log, + struct r5l_recovery_ctx *ctx, + sector_t stripe_sect, + int *offset, sector_t *log_offset) +{ + struct r5conf *conf = log->rdev->mddev->private; + struct stripe_head *sh; + struct r5l_payload_data_parity *payload; + int disk_index; + + sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0); + while (1) { + payload = page_address(ctx->meta_page) + *offset; + + if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { + raid5_compute_sector(conf, + le64_to_cpu(payload->location), 0, + &disk_index, sh); + + sync_page_io(log->rdev, *log_offset, PAGE_SIZE, + sh->dev[disk_index].page, READ, false); + sh->dev[disk_index].log_checksum = + le32_to_cpu(payload->checksum[0]); + set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); + ctx->meta_total_blocks += BLOCK_SECTORS; + } else { + disk_index = sh->pd_idx; + sync_page_io(log->rdev, *log_offset, PAGE_SIZE, + sh->dev[disk_index].page, READ, false); + sh->dev[disk_index].log_checksum = + le32_to_cpu(payload->checksum[0]); + set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); + + if (sh->qd_idx >= 0) { + disk_index = sh->qd_idx; + sync_page_io(log->rdev, + r5l_ring_add(log, *log_offset, BLOCK_SECTORS), + PAGE_SIZE, sh->dev[disk_index].page, + READ, false); + sh->dev[disk_index].log_checksum = + le32_to_cpu(payload->checksum[1]); + set_bit(R5_Wantwrite, + &sh->dev[disk_index].flags); + } + ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; + } + + *log_offset = r5l_ring_add(log, *log_offset, + le32_to_cpu(payload->size)); + *offset += sizeof(struct r5l_payload_data_parity) + + sizeof(__le32) * + (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); + if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) + break; + } + + for (disk_index = 0; disk_index < sh->disks; disk_index++) { + void *addr; + u32 checksum; + + if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) + continue; + addr = kmap_atomic(sh->dev[disk_index].page); + checksum = crc32_le(log->uuid_checksum, addr, PAGE_SIZE); + kunmap_atomic(addr); + if (checksum != sh->dev[disk_index].log_checksum) + goto error; + } + + for (disk_index = 0; disk_index < sh->disks; disk_index++) { + struct md_rdev *rdev, *rrdev; + + if (!test_and_clear_bit(R5_Wantwrite, + &sh->dev[disk_index].flags)) + continue; + + /* in case device is broken */ + rdev = rcu_dereference(conf->disks[disk_index].rdev); + if (rdev) + sync_page_io(rdev, stripe_sect, PAGE_SIZE, + sh->dev[disk_index].page, WRITE, false); + rrdev = rcu_dereference(conf->disks[disk_index].replacement); + if (rrdev) + sync_page_io(rrdev, stripe_sect, PAGE_SIZE, + sh->dev[disk_index].page, WRITE, false); + } + raid5_release_stripe(sh); + return 0; + +error: + for (disk_index = 0; disk_index < sh->disks; disk_index++) + sh->dev[disk_index].flags = 0; + raid5_release_stripe(sh); + return -EINVAL; +} + +static int r5l_recovery_flush_one_meta(struct r5l_log *log, + struct r5l_recovery_ctx *ctx) +{ + struct r5conf *conf = log->rdev->mddev->private; + struct r5l_payload_data_parity *payload; + struct r5l_meta_block *mb; + int offset; + sector_t log_offset; + sector_t stripe_sector; + + mb = page_address(ctx->meta_page); + offset = sizeof(struct r5l_meta_block); + log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); + + while (offset < le32_to_cpu(mb->meta_size)) { + int dd; + + payload = (void *)mb + offset; + stripe_sector = raid5_compute_sector(conf, + le64_to_cpu(payload->location), 0, &dd, NULL); + if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector, + &offset, &log_offset)) + return -EINVAL; + } + return 0; +} + +/* copy data/parity from log to raid disks */ +static void r5l_recovery_flush_log(struct r5l_log *log, + struct r5l_recovery_ctx *ctx) +{ + while (1) { + if (r5l_read_meta_block(log, ctx)) + return; + if (r5l_recovery_flush_one_meta(log, ctx)) + return; + ctx->seq++; + ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); + } +} + +static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, + u64 seq) +{ + struct page *page; + struct r5l_meta_block *mb; + u32 crc; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + mb = page_address(page); + mb->magic = cpu_to_le32(R5LOG_MAGIC); + mb->version = R5LOG_VERSION; + mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); + mb->seq = cpu_to_le64(seq); + mb->position = cpu_to_le64(pos); + crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE); + mb->checksum = cpu_to_le32(crc); + + if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { + __free_page(page); + return -EIO; + } + __free_page(page); + return 0; +} + static int r5l_recovery_log(struct r5l_log *log) { - /* fake recovery */ - log->seq = log->last_cp_seq + 1; - log->log_start = r5l_ring_add(log, log->last_checkpoint, BLOCK_SECTORS); + struct r5l_recovery_ctx ctx; + + ctx.pos = log->last_checkpoint; + ctx.seq = log->last_cp_seq; + ctx.meta_page = alloc_page(GFP_KERNEL); + if (!ctx.meta_page) + return -ENOMEM; + + r5l_recovery_flush_log(log, &ctx); + __free_page(ctx.meta_page); + + /* + * we did a recovery. Now ctx.pos points to an invalid meta block. New + * log will start here. but we can't let superblock point to last valid + * meta block. The log might looks like: + * | meta 1| meta 2| meta 3| + * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If + * superblock points to meta 1, we write a new valid meta 2n. if crash + * happens again, new recovery will start from meta 1. Since meta 2n is + * valid now, recovery will think meta 3 is valid, which is wrong. + * The solution is we create a new meta in meta2 with its seq == meta + * 1's seq + 10 and let superblock points to meta2. The same recovery will + * not think meta 3 is a valid meta, because its seq doesn't match + */ + if (ctx.seq > log->last_cp_seq + 1) { + int ret; + + ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); + if (ret) + return ret; + log->seq = ctx.seq + 11; + log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); + r5l_write_super(log, ctx.pos); + } else { + log->log_start = ctx.pos; + log->seq = ctx.seq; + } return 0; } -- cgit v1.2.3 From 5cb2fbd6ea0d151dcb12d98c06c8761eedfed2ee Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 28 Oct 2015 08:41:25 -0700 Subject: raid5-cache: use crc32c checksum crc32c has lower overhead with cpu acceleration. It's a shame I didn't use it in first post, sorry. This changes disk format, but we are still ok in current stage. V2: delete unnecessary type conversion as pointed out by Bart Signed-off-by: Shaohua Li Signed-off-by: NeilBrown Reviewed-by: Bart Van Assche --- drivers/md/raid5-cache.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 2b9ed0e3af37..270ee3aaba23 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include "md.h" #include "raid5.h" @@ -282,7 +282,7 @@ static void r5l_submit_current_io(struct r5l_log *log) block = page_address(io->meta_page); block->meta_size = cpu_to_le32(io->meta_offset); - crc = crc32_le(log->uuid_checksum, (void *)block, PAGE_SIZE); + crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); block->checksum = cpu_to_le32(crc); log->current_io = NULL; @@ -484,8 +484,8 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) continue; addr = kmap_atomic(sh->dev[i].page); - sh->dev[i].log_checksum = crc32_le(log->uuid_checksum, - addr, PAGE_SIZE); + sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, + addr, PAGE_SIZE); kunmap_atomic(addr); } parity_pages = 1 + !!(sh->qd_idx >= 0); @@ -744,7 +744,7 @@ static int r5l_read_meta_block(struct r5l_log *log, le64_to_cpu(mb->position) != ctx->pos) return -EINVAL; - crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE); + crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); if (stored_crc != crc) return -EINVAL; @@ -819,7 +819,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) continue; addr = kmap_atomic(sh->dev[disk_index].page); - checksum = crc32_le(log->uuid_checksum, addr, PAGE_SIZE); + checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); kunmap_atomic(addr); if (checksum != sh->dev[disk_index].log_checksum) goto error; @@ -909,7 +909,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); mb->seq = cpu_to_le64(seq); mb->position = cpu_to_le64(pos); - crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE); + crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); mb->checksum = cpu_to_le32(crc); if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { @@ -1000,7 +1000,7 @@ static int r5l_load_log(struct r5l_log *log) } stored_crc = le32_to_cpu(mb->checksum); mb->checksum = 0; - expected_crc = crc32_le(log->uuid_checksum, (void *)mb, PAGE_SIZE); + expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); if (stored_crc != expected_crc) { create_super = true; goto create; @@ -1047,8 +1047,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) return -ENOMEM; log->rdev = rdev; - log->uuid_checksum = crc32_le(~0, (void *)rdev->mddev->uuid, - sizeof(rdev->mddev->uuid)); + log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, + sizeof(rdev->mddev->uuid)); mutex_init(&log->io_mutex); -- cgit v1.2.3 From 9c3e333d3f8b01407c8e9f78958e28a8594a0827 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:32:02 -0700 Subject: raid5: disable batch with log enabled With log enabled, r5l_write_stripe will add the stripe to log. With batch, several stripes are linked together. The stripes must be in the same state. While with log, the log/reclaim unit is stripe, we can't guarantee the several stripes are in the same state. Disabling batch for log now. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 508a29bd4733..a9604d4392ee 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -755,6 +755,10 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) /* Only freshly new full stripe normal write stripe can be added to a batch list */ static bool stripe_can_batch(struct stripe_head *sh) { + struct r5conf *conf = sh->raid_conf; + + if (conf->log) + return false; return test_bit(STRIPE_BATCH_READY, &sh->state) && !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && is_full_stripe_write(sh); -- cgit v1.2.3 From 713cf5a63954bdc1cd4bed0a81e98cbd8fc5928c Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:32:03 -0700 Subject: raid5: don't allow resize/reshape with cache(log) support If cache(log) support is enabled, don't allow resize/reshape in current stage. In the future, we can flush all data from cache(log) to raid before resize/reshape and then allow resize/reshape. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a9604d4392ee..122702d957a6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6663,6 +6663,7 @@ static int run(struct mddev *mddev) int working_disks = 0; int dirty_parity_disks = 0; struct md_rdev *rdev; + struct md_rdev *journal_dev = NULL; sector_t reshape_offset = 0; int i; long long min_offset_diff = 0; @@ -6675,6 +6676,9 @@ static int run(struct mddev *mddev) rdev_for_each(rdev, mddev) { long long diff; + + if (test_bit(Journal, &rdev->flags)) + journal_dev = rdev; if (rdev->raid_disk < 0) continue; diff = (rdev->new_data_offset - rdev->data_offset); @@ -6708,6 +6712,12 @@ static int run(struct mddev *mddev) int chunk_sectors; int new_data_disks; + if (journal_dev) { + printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n", + mdname(mddev)); + return -EINVAL; + } + if (mddev->new_level != mddev->level) { printk(KERN_ERR "md/raid:%s: unsupported reshape " "required - aborting.\n", @@ -7218,6 +7228,8 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) sector_t newsize; struct r5conf *conf = mddev->private; + if (conf->log) + return -EINVAL; sectors &= ~((sector_t)conf->chunk_sectors - 1); newsize = raid5_size(mddev, sectors, mddev->raid_disks); if (mddev->external_size && @@ -7269,6 +7281,8 @@ static int check_reshape(struct mddev *mddev) { struct r5conf *conf = mddev->private; + if (conf->log) + return -EINVAL; if (mddev->delta_disks == 0 && mddev->new_layout == mddev->layout && mddev->new_chunk_sectors == mddev->chunk_sectors) -- cgit v1.2.3 From 5c7e81c3de9eb3db01e16190a1da0899efee645b Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 13 Aug 2015 14:32:04 -0700 Subject: raid5: enable log for raid array with cache disk Now log is safe to enable for raid array with cache disk Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5.c | 11 +++++++++++ include/uapi/linux/raid/md_p.h | 1 + 2 files changed, 12 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 122702d957a6..b0bf81d084fd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6329,8 +6329,11 @@ static void raid5_free_percpu(struct r5conf *conf) static void free_conf(struct r5conf *conf) { + if (conf->log) + r5l_exit_log(conf->log); if (conf->shrinker.seeks) unregister_shrinker(&conf->shrinker); + free_thread_groups(conf); shrink_stripes(conf); raid5_free_percpu(conf); @@ -6996,6 +6999,14 @@ static int run(struct mddev *mddev) mddev->queue); } + if (journal_dev) { + char b[BDEVNAME_SIZE]; + + printk(KERN_INFO"md/raid:%s: using device %s as journal\n", + mdname(mddev), bdevname(journal_dev->bdev, b)); + r5l_init_log(conf, journal_dev); + } + return 0; abort: md_unregister_thread(&mddev->thread); diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index 96e4196f9c79..c3e654c6d518 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -322,6 +322,7 @@ struct mdp_superblock_1 { |MD_FEATURE_NEW_OFFSET \ |MD_FEATURE_RECOVERY_BITMAP \ |MD_FEATURE_CLUSTERED \ + |MD_FEATURE_JOURNAL \ ) struct r5l_payload_header { -- cgit v1.2.3 From a8c34f915976e3de044cc31b8bcb46f816f5a52e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 2 Sep 2015 13:49:46 -0700 Subject: raid5-cache: switching to state machine for log disk cache flush Before we write stripe data to raid disks, we must guarantee stripe data is settled down in log disk. To do this, we flush log disk cache and wait the flush finish. That wait introduces sleep time in raid5d thread and impact performance. This patch moves the log disk cache flush process to the stripe handling state machine, which can remove the wait in raid5d. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 116 ++++++++++++++++++++++++++--------------------- drivers/md/raid5.c | 7 ++- 2 files changed, 71 insertions(+), 52 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 270ee3aaba23..41542ebd813b 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -61,6 +61,10 @@ struct r5l_log { struct list_head io_end_ios; /* io_units which have been completely * written to the log but not yet written * to the RAID */ + struct list_head flushing_ios; /* io_units which are waiting for log + * cache flush */ + struct list_head flushed_ios; /* io_units which settle down in log disk */ + struct bio flush_bio; struct list_head stripe_end_ios;/* io_units which have been completely * written to the RAID but have not yet * been considered for updating super */ @@ -114,8 +118,7 @@ enum r5l_io_unit_state { IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, * don't accepting new bio */ IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ - IO_UNIT_STRIPE_START = 3, /* stripes of io_unit are flushing to raid */ - IO_UNIT_STRIPE_END = 4, /* stripes data finished writing to raid */ + IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ }; static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) @@ -229,7 +232,7 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io, struct r5l_io_unit *last; sector_t reclaimable_space; - r5l_move_io_unit_list(&log->io_end_ios, &log->stripe_end_ios, + r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios, IO_UNIT_STRIPE_END); last = list_last_entry(&log->stripe_end_ios, @@ -559,6 +562,28 @@ void r5l_stripe_write_finished(struct stripe_head *sh) r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); } +static void r5l_log_flush_endio(struct bio *bio) +{ + struct r5l_log *log = container_of(bio, struct r5l_log, + flush_bio); + unsigned long flags; + struct r5l_io_unit *io; + struct stripe_head *sh; + + spin_lock_irqsave(&log->io_list_lock, flags); + list_for_each_entry(io, &log->flushing_ios, log_sibling) { + while (!list_empty(&io->stripe_list)) { + sh = list_first_entry(&io->stripe_list, + struct stripe_head, log_list); + list_del_init(&sh->log_list); + set_bit(STRIPE_HANDLE, &sh->state); + raid5_release_stripe(sh); + } + } + list_splice_tail_init(&log->flushing_ios, &log->flushed_ios); + spin_unlock_irqrestore(&log->io_list_lock, flags); +} + /* * Starting dispatch IO to raid. * io_unit(meta) consists of a log. There is one situation we want to avoid. A @@ -575,44 +600,31 @@ void r5l_stripe_write_finished(struct stripe_head *sh) */ void r5l_flush_stripe_to_raid(struct r5l_log *log) { - struct r5l_io_unit *io; - struct stripe_head *sh; - bool run_stripe; - + bool do_flush; if (!log) return; - spin_lock_irq(&log->io_list_lock); - run_stripe = !list_empty(&log->io_end_ios); - spin_unlock_irq(&log->io_list_lock); - - if (!run_stripe) - return; - - blkdev_issue_flush(log->rdev->bdev, GFP_NOIO, NULL); spin_lock_irq(&log->io_list_lock); - list_for_each_entry(io, &log->io_end_ios, log_sibling) { - if (io->state >= IO_UNIT_STRIPE_START) - continue; - __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_START); - - while (!list_empty(&io->stripe_list)) { - sh = list_first_entry(&io->stripe_list, - struct stripe_head, log_list); - list_del_init(&sh->log_list); - set_bit(STRIPE_HANDLE, &sh->state); - raid5_release_stripe(sh); - } + /* flush bio is running */ + if (!list_empty(&log->flushing_ios)) { + spin_unlock_irq(&log->io_list_lock); + return; } + list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); + do_flush = !list_empty(&log->flushing_ios); spin_unlock_irq(&log->io_list_lock); + + if (!do_flush) + return; + bio_reset(&log->flush_bio); + log->flush_bio.bi_bdev = log->rdev->bdev; + log->flush_bio.bi_end_io = r5l_log_flush_endio; + submit_bio(WRITE_FLUSH, &log->flush_bio); } static void r5l_kick_io_unit(struct r5l_log *log, struct r5l_io_unit *io) { - /* the log thread will write the io unit */ - wait_event(io->wait_state, io->state >= IO_UNIT_IO_END); - if (io->state < IO_UNIT_STRIPE_START) - r5l_flush_stripe_to_raid(log); + md_wakeup_thread(log->rdev->mddev->thread); wait_event(io->wait_state, io->state >= IO_UNIT_STRIPE_END); } @@ -631,6 +643,8 @@ static void r5l_do_reclaim(struct r5l_log *log) * shouldn't reuse space of an unreclaimable io_unit */ while (1) { + struct list_head *target_list = NULL; + while (!list_empty(&log->stripe_end_ios)) { io = list_first_entry(&log->stripe_end_ios, struct r5l_io_unit, log_sibling); @@ -642,29 +656,26 @@ static void r5l_do_reclaim(struct r5l_log *log) if (free >= reclaim_target || (list_empty(&log->running_ios) && list_empty(&log->io_end_ios) && - list_empty(&log->stripe_end_ios))) + list_empty(&log->flushing_ios) && + list_empty(&log->flushed_ios))) break; /* Below waiting mostly happens when we shutdown the raid */ - if (!list_empty(&log->io_end_ios)) { - io = list_first_entry(&log->io_end_ios, - struct r5l_io_unit, log_sibling); - spin_unlock_irq(&log->io_list_lock); - /* nobody else can delete the io, we are safe */ - r5l_kick_io_unit(log, io); - spin_lock_irq(&log->io_list_lock); - continue; - } - - if (!list_empty(&log->running_ios)) { - io = list_first_entry(&log->running_ios, - struct r5l_io_unit, log_sibling); - spin_unlock_irq(&log->io_list_lock); - /* nobody else can delete the io, we are safe */ - r5l_kick_io_unit(log, io); - spin_lock_irq(&log->io_list_lock); - continue; - } + if (!list_empty(&log->flushed_ios)) + target_list = &log->flushed_ios; + else if (!list_empty(&log->flushing_ios)) + target_list = &log->flushing_ios; + else if (!list_empty(&log->io_end_ios)) + target_list = &log->io_end_ios; + else if (!list_empty(&log->running_ios)) + target_list = &log->running_ios; + + io = list_first_entry(target_list, + struct r5l_io_unit, log_sibling); + spin_unlock_irq(&log->io_list_lock); + /* nobody else can delete the io, we are safe */ + r5l_kick_io_unit(log, io); + spin_lock_irq(&log->io_list_lock); } spin_unlock_irq(&log->io_list_lock); @@ -1056,6 +1067,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) INIT_LIST_HEAD(&log->running_ios); INIT_LIST_HEAD(&log->io_end_ios); INIT_LIST_HEAD(&log->stripe_end_ios); + INIT_LIST_HEAD(&log->flushing_ios); + INIT_LIST_HEAD(&log->flushed_ios); + bio_init(&log->flush_bio); log->io_kc = KMEM_CACHE(r5l_io_unit, 0); if (!log->io_kc) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b0bf81d084fd..46042c7c25a5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5740,8 +5740,12 @@ static int handle_active_stripes(struct r5conf *conf, int group, for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) if (!list_empty(temp_inactive_list + i)) break; - if (i == NR_STRIPE_HASH_LOCKS) + if (i == NR_STRIPE_HASH_LOCKS) { + spin_unlock_irq(&conf->device_lock); + r5l_flush_stripe_to_raid(conf->log); + spin_lock_irq(&conf->device_lock); return batch_size; + } release_inactive = true; } spin_unlock_irq(&conf->device_lock); @@ -5749,6 +5753,7 @@ static int handle_active_stripes(struct r5conf *conf, int group, release_inactive_stripe_list(conf, temp_inactive_list, NR_STRIPE_HASH_LOCKS); + r5l_flush_stripe_to_raid(conf->log); if (release_inactive) { spin_lock_irq(&conf->device_lock); return 0; -- cgit v1.2.3 From 0fd22b45b2d2e1de162d6a47327d449e0ed45ad2 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 2 Sep 2015 13:49:47 -0700 Subject: raid5-cache: fix a user-after-free bug r5l_compress_stripe_end_list() can free an io_unit. This breaks the assumption only reclaimer can free io_unit. We can add a reference count based io_unit free, but since only reclaim can wait io_unit becoming to STRIPE_END state, we use a simple global wait queue here. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 41542ebd813b..496f8e2578cc 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -79,6 +79,7 @@ struct r5l_log { * dones't wait for specific io_unit * switching to IO_UNIT_STRIPE_END * state) */ + wait_queue_head_t iounit_wait; struct list_head no_space_stripes; /* pending stripes, log has no space */ spinlock_t no_space_stripes_lock; @@ -109,7 +110,6 @@ struct r5l_io_unit { struct list_head stripe_list; /* stripes added to the io_unit */ int state; - wait_queue_head_t wait_state; }; /* r5l_io_unit state */ @@ -162,7 +162,6 @@ static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log) INIT_LIST_HEAD(&io->log_sibling); INIT_LIST_HEAD(&io->stripe_list); io->state = IO_UNIT_RUNNING; - init_waitqueue_head(&io->wait_state); return io; } @@ -243,8 +242,8 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io, r5l_wake_reclaim(log, 0); r5l_compress_stripe_end_list(log); + wake_up(&log->iounit_wait); } - wake_up(&io->wait_state); } static void r5l_set_io_unit_state(struct r5l_io_unit *io, @@ -622,10 +621,11 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) submit_bio(WRITE_FLUSH, &log->flush_bio); } -static void r5l_kick_io_unit(struct r5l_log *log, struct r5l_io_unit *io) +static void r5l_kick_io_unit(struct r5l_log *log) { md_wakeup_thread(log->rdev->mddev->thread); - wait_event(io->wait_state, io->state >= IO_UNIT_STRIPE_END); + wait_event_lock_irq(log->iounit_wait, !list_empty(&log->stripe_end_ios), + log->io_list_lock); } static void r5l_write_super(struct r5l_log *log, sector_t cp); @@ -670,12 +670,7 @@ static void r5l_do_reclaim(struct r5l_log *log) else if (!list_empty(&log->running_ios)) target_list = &log->running_ios; - io = list_first_entry(target_list, - struct r5l_io_unit, log_sibling); - spin_unlock_irq(&log->io_list_lock); - /* nobody else can delete the io, we are safe */ - r5l_kick_io_unit(log, io); - spin_lock_irq(&log->io_list_lock); + r5l_kick_io_unit(log); } spin_unlock_irq(&log->io_list_lock); @@ -1079,6 +1074,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) log->rdev->mddev, "reclaim"); if (!log->reclaim_thread) goto reclaim_thread; + init_waitqueue_head(&log->iounit_wait); INIT_LIST_HEAD(&log->no_space_stripes); spin_lock_init(&log->no_space_stripes_lock); -- cgit v1.2.3 From 509ffec7089d10521ac91d4537b789d76103b4c0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 2 Sep 2015 13:49:48 -0700 Subject: raid5-cache: move functionality out of __r5l_set_io_unit_state Just keep __r5l_set_io_unit_state as a small set the state wrapper, and remove r5l_set_io_unit_state entirely after moving the real functionality to the two callers that need it. Signed-off-by: Christoph Hellwig Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 79 +++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 41 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 496f8e2578cc..6479f15a5434 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -215,46 +215,12 @@ static void r5l_compress_stripe_end_list(struct r5l_log *log) list_add_tail(&last->log_sibling, &log->stripe_end_ios); } -static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); static void __r5l_set_io_unit_state(struct r5l_io_unit *io, enum r5l_io_unit_state state) { - struct r5l_log *log = io->log; - if (WARN_ON(io->state >= state)) return; io->state = state; - if (state == IO_UNIT_IO_END) - r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, - IO_UNIT_IO_END); - if (state == IO_UNIT_STRIPE_END) { - struct r5l_io_unit *last; - sector_t reclaimable_space; - - r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios, - IO_UNIT_STRIPE_END); - - last = list_last_entry(&log->stripe_end_ios, - struct r5l_io_unit, log_sibling); - reclaimable_space = r5l_ring_distance(log, log->last_checkpoint, - last->log_end); - if (reclaimable_space >= log->max_free_space) - r5l_wake_reclaim(log, 0); - - r5l_compress_stripe_end_list(log); - wake_up(&log->iounit_wait); - } -} - -static void r5l_set_io_unit_state(struct r5l_io_unit *io, - enum r5l_io_unit_state state) -{ - struct r5l_log *log = io->log; - unsigned long flags; - - spin_lock_irqsave(&log->io_list_lock, flags); - __r5l_set_io_unit_state(io, state); - spin_unlock_irqrestore(&log->io_list_lock, flags); } /* XXX: totally ignores I/O errors */ @@ -262,13 +228,19 @@ static void r5l_log_endio(struct bio *bio) { struct r5l_io_unit *io = bio->bi_private; struct r5l_log *log = io->log; + unsigned long flags; bio_put(bio); if (!atomic_dec_and_test(&io->pending_io)) return; - r5l_set_io_unit_state(io, IO_UNIT_IO_END); + spin_lock_irqsave(&log->io_list_lock, flags); + __r5l_set_io_unit_state(io, IO_UNIT_IO_END); + r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, + IO_UNIT_IO_END); + spin_unlock_irqrestore(&log->io_list_lock, flags); + md_wakeup_thread(log->rdev->mddev->thread); } @@ -277,6 +249,7 @@ static void r5l_submit_current_io(struct r5l_log *log) struct r5l_io_unit *io = log->current_io; struct r5l_meta_block *block; struct bio *bio; + unsigned long flags; u32 crc; if (!io) @@ -288,7 +261,9 @@ static void r5l_submit_current_io(struct r5l_log *log) block->checksum = cpu_to_le32(crc); log->current_io = NULL; - r5l_set_io_unit_state(io, IO_UNIT_IO_START); + spin_lock_irqsave(&log->io_list_lock, flags); + __r5l_set_io_unit_state(io, IO_UNIT_IO_START); + spin_unlock_irqrestore(&log->io_list_lock, flags); while ((bio = bio_list_pop(&io->bios))) { /* all IO must start from rdev->data_offset */ @@ -454,6 +429,7 @@ static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, sh->log_io = io; } +static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); /* * running in raid5d, where reclaim could wait for raid5d too (when it flushes * data from log to raid disks), so we shouldn't wait for reclaim here @@ -547,18 +523,39 @@ static void r5l_run_no_space_stripes(struct r5l_log *log) spin_unlock(&log->no_space_stripes_lock); } +static void __r5l_stripe_write_finished(struct r5l_io_unit *io) +{ + struct r5l_log *log = io->log; + struct r5l_io_unit *last; + sector_t reclaimable_space; + unsigned long flags; + + spin_lock_irqsave(&log->io_list_lock, flags); + __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); + r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios, + IO_UNIT_STRIPE_END); + + last = list_last_entry(&log->stripe_end_ios, + struct r5l_io_unit, log_sibling); + reclaimable_space = r5l_ring_distance(log, log->last_checkpoint, + last->log_end); + if (reclaimable_space >= log->max_free_space) + r5l_wake_reclaim(log, 0); + + r5l_compress_stripe_end_list(log); + spin_unlock_irqrestore(&log->io_list_lock, flags); + wake_up(&log->iounit_wait); +} + void r5l_stripe_write_finished(struct stripe_head *sh) { struct r5l_io_unit *io; - /* Don't support stripe batch */ io = sh->log_io; - if (!io) - return; sh->log_io = NULL; - if (atomic_dec_and_test(&io->pending_stripe)) - r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); + if (io && atomic_dec_and_test(&io->pending_stripe)) + __r5l_stripe_write_finished(io); } static void r5l_log_flush_endio(struct bio *bio) -- cgit v1.2.3 From 828cbe989e4f5c8666cb3d99918b03666ccde0a0 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 2 Sep 2015 13:49:49 -0700 Subject: raid5-cache: optimize FLUSH IO with log enabled With log enabled, bio is written to raid disks after the bio is settled down in log disk. The recovery guarantees we can recovery the bio data from log disk, so we we skip FLUSH IO. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 18 ++++++++++++++++++ drivers/md/raid5.c | 11 +++++++++-- drivers/md/raid5.h | 1 + 3 files changed, 28 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 6479f15a5434..ea1480392eba 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -507,6 +507,24 @@ void r5l_write_stripe_run(struct r5l_log *log) mutex_unlock(&log->io_mutex); } +int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) +{ + if (!log) + return -ENODEV; + /* + * we flush log disk cache first, then write stripe data to raid disks. + * So if bio is finished, the log disk cache is flushed already. The + * recovery guarantees we can recovery the bio from log disk, so we + * don't need to flush again + */ + if (bio->bi_iter.bi_size == 0) { + bio_endio(bio); + return 0; + } + bio->bi_rw &= ~REQ_FLUSH; + return -EAGAIN; +} + /* This will run after log space is reclaimed */ static void r5l_run_no_space_stripes(struct r5l_log *log) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 46042c7c25a5..a622ccb3477a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5146,8 +5146,15 @@ static void make_request(struct mddev *mddev, struct bio * bi) bool do_prepare; if (unlikely(bi->bi_rw & REQ_FLUSH)) { - md_flush_request(mddev, bi); - return; + int ret = r5l_handle_flush_request(conf->log, bi); + + if (ret == 0) + return; + if (ret == -ENODEV) { + md_flush_request(mddev, bi); + return; + } + /* ret == -EAGAIN, fallback */ } md_write_start(mddev, bi); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 1f16d437bfda..32c8ce81248b 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -629,4 +629,5 @@ extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); extern void r5l_write_stripe_run(struct r5l_log *log); extern void r5l_flush_stripe_to_raid(struct r5l_log *log); extern void r5l_stripe_write_finished(struct stripe_head *sh); +extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); #endif -- cgit v1.2.3 From bd18f6462f3d167a9b3ec27851c98f82694b2adf Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 2 Sep 2015 13:49:50 -0700 Subject: md: skip resync for raid array with journal If a raid array has journal, the journal can guarantee the consistency, we can skip resync after a unclean shutdown. The exception is raid creation or user initiated resync, which we still do a raid resync. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 4 ++++ drivers/md/md.h | 1 + 2 files changed, 5 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 3592beb6931e..89149acd8a5e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1647,6 +1647,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) } set_bit(Journal, &rdev->flags); rdev->journal_tail = le64_to_cpu(sb->journal_tail); + if (mddev->recovery_cp == MaxSector) + set_bit(MD_JOURNAL_CLEAN, &mddev->flags); break; default: rdev->saved_raid_disk = role; @@ -1689,6 +1691,8 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->events = cpu_to_le64(mddev->events); if (mddev->in_sync) sb->resync_offset = cpu_to_le64(mddev->recovery_cp); + else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) + sb->resync_offset = cpu_to_le64(MaxSector); else sb->resync_offset = cpu_to_le64(0); diff --git a/drivers/md/md.h b/drivers/md/md.h index 2b0f62fb6146..e14e667a20e9 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -232,6 +232,7 @@ struct mddev { #define MD_STILL_CLOSED 4 /* If set, then array has not been opened since * md_ioctl checked on it. */ +#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */ int suspended; atomic_t active_io; -- cgit v1.2.3 From 85f2f9a4f49d3e3230b3c5fb08362d561691421e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 4 Sep 2015 14:14:05 -0700 Subject: raid5-cache: check stripe finish out of order stripes could finish out of order. Hence r5l_move_io_unit_list() of __r5l_stripe_write_finished might not move any entry and leave stripe_end_ios list empty. This applies on top of http://marc.info/?l=linux-raid&m=144122700510667 Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index ea1480392eba..30c7e5e79a02 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -550,8 +550,13 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io) spin_lock_irqsave(&log->io_list_lock, flags); __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); + /* might move 0 entry */ r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios, IO_UNIT_STRIPE_END); + if (list_empty(&log->stripe_end_ios)) { + spin_unlock_irqrestore(&log->io_list_lock, flags); + return; + } last = list_last_entry(&log->stripe_end_ios, struct r5l_io_unit, log_sibling); -- cgit v1.2.3 From 253f9fd41afe2492b85de779946b5882a00dcdc5 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 4 Sep 2015 14:14:16 -0700 Subject: raid5-cache: don't delay stripe captured in log There is a case a stripe gets delayed forever. 1. a stripe finishes construction 2. a new bio hits the stripe 3. handle_stripe runs for the stripe. The stripe gets DELAYED bit set since construction can't run for new bio (the stripe is locked since step 1) Without log, handle_stripe will call ops_run_io. After IO finishes, the stripe gets unlocked and the stripe will restart and run construction for the new bio. With log, ops_run_io need to run two times. If the DELAYED bit set, the stripe can't enter into the handle_list, so the second ops_run_io doesn't run, which leaves the stripe stalled. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 30c7e5e79a02..0460882a5fd7 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -479,6 +479,11 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) return -EINVAL; set_bit(STRIPE_LOG_TRAPPED, &sh->state); + /* + * The stripe must enter state machine again to finish the write, so + * don't delay. + */ + clear_bit(STRIPE_DELAYED, &sh->state); atomic_inc(&sh->count); mutex_lock(&log->io_mutex); -- cgit v1.2.3 From 0b020e85bdd5765aac2440848e7a927069f5f83c Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 3 Sep 2015 23:00:35 -0700 Subject: skip match_mddev_units check for special roles match_mddev_units is used to check whether 2 RAID arrays share same disk(s). Arrays that share disk(s) will not do resync at the same time for better performance (fewer HDD seek). However, this check should not apply to Spare, Faulty, and Journal disks, as they do not paticipate in resync. In this patch, match_mddev_units skips check for disks with flag "Faulty" or "Journal" or raid_disk < 0. Signed-off-by: Song Liu Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 89149acd8a5e..fe67272d0b1b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1935,13 +1935,23 @@ static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) struct md_rdev *rdev, *rdev2; rcu_read_lock(); - rdev_for_each_rcu(rdev, mddev1) - rdev_for_each_rcu(rdev2, mddev2) + rdev_for_each_rcu(rdev, mddev1) { + if (test_bit(Faulty, &rdev->flags) || + test_bit(Journal, &rdev->flags) || + rdev->raid_disk == -1) + continue; + rdev_for_each_rcu(rdev2, mddev2) { + if (test_bit(Faulty, &rdev2->flags) || + test_bit(Journal, &rdev2->flags) || + rdev2->raid_disk == -1) + continue; if (rdev->bdev->bd_contains == rdev2->bdev->bd_contains) { rcu_read_unlock(); return 1; } + } + } rcu_read_unlock(); return 0; } -- cgit v1.2.3 From ac6096e9d5cb88a31f3af2d140df7d680b42745e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Sun, 4 Oct 2015 09:20:11 -0700 Subject: md: show journal for journal disk in disk state sysfs Journal disk state sysfs entry should indicate it's journal Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index fe67272d0b1b..5744829b7d05 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2520,6 +2520,10 @@ state_show(struct md_rdev *rdev, char *page) len += sprintf(page+len, "%sin_sync",sep); sep = ","; } + if (test_bit(Journal, &flags)) { + len += sprintf(page+len, "%sjournal",sep); + sep = ","; + } if (test_bit(WriteMostly, &flags)) { len += sprintf(page+len, "%swrite_mostly",sep); sep = ","; -- cgit v1.2.3 From e6c033f79a0a1e9ca850575dcfa51bb583b592fa Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Sun, 4 Oct 2015 09:20:12 -0700 Subject: raid5-cache: move reclaim stop to quiesce Move reclaim stop to quiesce handling, where is safer for this stuff. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 30 ++++++++++++++++++------------ drivers/md/raid5.c | 1 + drivers/md/raid5.h | 1 + 3 files changed, 20 insertions(+), 12 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0460882a5fd7..289ca3f5d4b3 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -748,6 +748,24 @@ static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) md_wakeup_thread(log->reclaim_thread); } +void r5l_quiesce(struct r5l_log *log, int state) +{ + if (!log || state == 2) + return; + if (state == 0) { + log->reclaim_thread = md_register_thread(r5l_reclaim_thread, + log->rdev->mddev, "reclaim"); + } else if (state == 1) { + /* + * at this point all stripes are finished, so io_unit is at + * least in STRIPE_END state + */ + r5l_wake_reclaim(log, -1L); + md_unregister_thread(&log->reclaim_thread); + r5l_do_reclaim(log); + } +} + struct r5l_recovery_ctx { struct page *meta_page; /* current meta */ sector_t meta_total_blocks; /* total size of current meta and data */ @@ -1120,19 +1138,7 @@ io_kc: void r5l_exit_log(struct r5l_log *log) { - /* - * at this point all stripes are finished, so io_unit is at least in - * STRIPE_END state - */ - r5l_wake_reclaim(log, -1L); md_unregister_thread(&log->reclaim_thread); - r5l_do_reclaim(log); - /* - * force a super update, r5l_do_reclaim might updated the super. - * mddev->thread is already stopped - */ - md_update_sb(log->rdev->mddev, 1); - kmem_cache_destroy(log->io_kc); kfree(log); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a622ccb3477a..216fa3c64287 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7582,6 +7582,7 @@ static void raid5_quiesce(struct mddev *mddev, int state) unlock_all_device_hash_locks_irq(conf); break; } + r5l_quiesce(conf->log, state); } static void *raid45_takeover_raid0(struct mddev *mddev, int level) diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 32c8ce81248b..1ab534c909fe 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -630,4 +630,5 @@ extern void r5l_write_stripe_run(struct r5l_log *log); extern void r5l_flush_stripe_to_raid(struct r5l_log *log); extern void r5l_stripe_write_finished(struct stripe_head *sh); extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); +extern void r5l_quiesce(struct r5l_log *log, int state); #endif -- cgit v1.2.3 From 170364619ac21c2b14869571eeaf767ae825f96c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:06 +0200 Subject: raid5-cache: free I/O units earlier There is no good reason to keep the I/O unit structures around after the stripe has been written back to the RAID array. The only information we need is the log sequence number, and the checkpoint offset of the highest successfull writeback. Store those in the log structure, and free the IO units from __r5l_stripe_write_finished. Besides simplifying the code this also avoid having to keep the allocation for the I/O unit around for a potentially long time as superblock updates that checkpoint the log do not happen very often. This also fixes the previously incorrect calculation of 'free' in r5l_do_reclaim as a side effect: previous if took the last unit which isn't checkpointed into account. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 143 ++++++++++++++++++----------------------------- 1 file changed, 54 insertions(+), 89 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 289ca3f5d4b3..604c64505232 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -51,6 +51,9 @@ struct r5l_log { sector_t log_start; /* log head. where new data appends */ u64 seq; /* log head sequence */ + sector_t next_checkpoint; + u64 next_cp_seq; + struct mutex io_mutex; struct r5l_io_unit *current_io; /* current io_unit accepting new data */ @@ -65,9 +68,6 @@ struct r5l_log { * cache flush */ struct list_head flushed_ios; /* io_units which settle down in log disk */ struct bio flush_bio; - struct list_head stripe_end_ios;/* io_units which have been completely - * written to the RAID but have not yet - * been considered for updating super */ struct kmem_cache *io_kc; @@ -186,35 +186,6 @@ static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to, } } -/* - * We don't want too many io_units reside in stripe_end_ios list, which will - * waste a lot of memory. So we try to remove some. But we must keep at least 2 - * io_units. The superblock must point to a valid meta, if it's the last meta, - * recovery can scan less - */ -static void r5l_compress_stripe_end_list(struct r5l_log *log) -{ - struct r5l_io_unit *first, *last, *io; - - first = list_first_entry(&log->stripe_end_ios, - struct r5l_io_unit, log_sibling); - last = list_last_entry(&log->stripe_end_ios, - struct r5l_io_unit, log_sibling); - if (first == last) - return; - list_del(&first->log_sibling); - list_del(&last->log_sibling); - while (!list_empty(&log->stripe_end_ios)) { - io = list_first_entry(&log->stripe_end_ios, - struct r5l_io_unit, log_sibling); - list_del(&io->log_sibling); - first->log_end = io->log_end; - r5l_free_io_unit(log, io); - } - list_add_tail(&first->log_sibling, &log->stripe_end_ios); - list_add_tail(&last->log_sibling, &log->stripe_end_ios); -} - static void __r5l_set_io_unit_state(struct r5l_io_unit *io, enum r5l_io_unit_state state) { @@ -546,31 +517,52 @@ static void r5l_run_no_space_stripes(struct r5l_log *log) spin_unlock(&log->no_space_stripes_lock); } +static sector_t r5l_reclaimable_space(struct r5l_log *log) +{ + return r5l_ring_distance(log, log->last_checkpoint, + log->next_checkpoint); +} + +static bool r5l_complete_flushed_ios(struct r5l_log *log) +{ + struct r5l_io_unit *io, *next; + bool found = false; + + assert_spin_locked(&log->io_list_lock); + + list_for_each_entry_safe(io, next, &log->flushed_ios, log_sibling) { + /* don't change list order */ + if (io->state < IO_UNIT_STRIPE_END) + break; + + log->next_checkpoint = io->log_start; + log->next_cp_seq = io->seq; + + list_del(&io->log_sibling); + r5l_free_io_unit(log, io); + + found = true; + } + + return found; +} + static void __r5l_stripe_write_finished(struct r5l_io_unit *io) { struct r5l_log *log = io->log; - struct r5l_io_unit *last; - sector_t reclaimable_space; unsigned long flags; spin_lock_irqsave(&log->io_list_lock, flags); __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); - /* might move 0 entry */ - r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios, - IO_UNIT_STRIPE_END); - if (list_empty(&log->stripe_end_ios)) { + + if (!r5l_complete_flushed_ios(log)) { spin_unlock_irqrestore(&log->io_list_lock, flags); return; } - last = list_last_entry(&log->stripe_end_ios, - struct r5l_io_unit, log_sibling); - reclaimable_space = r5l_ring_distance(log, log->last_checkpoint, - last->log_end); - if (reclaimable_space >= log->max_free_space) + if (r5l_reclaimable_space(log) > log->max_free_space) r5l_wake_reclaim(log, 0); - r5l_compress_stripe_end_list(log); spin_unlock_irqrestore(&log->io_list_lock, flags); wake_up(&log->iounit_wait); } @@ -646,20 +638,13 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) submit_bio(WRITE_FLUSH, &log->flush_bio); } -static void r5l_kick_io_unit(struct r5l_log *log) -{ - md_wakeup_thread(log->rdev->mddev->thread); - wait_event_lock_irq(log->iounit_wait, !list_empty(&log->stripe_end_ios), - log->io_list_lock); -} - static void r5l_write_super(struct r5l_log *log, sector_t cp); static void r5l_do_reclaim(struct r5l_log *log) { - struct r5l_io_unit *io, *last; - LIST_HEAD(list); - sector_t free = 0; sector_t reclaim_target = xchg(&log->reclaim_target, 0); + sector_t reclaimable; + sector_t next_checkpoint; + u64 next_cp_seq; spin_lock_irq(&log->io_list_lock); /* @@ -668,60 +653,41 @@ static void r5l_do_reclaim(struct r5l_log *log) * shouldn't reuse space of an unreclaimable io_unit */ while (1) { - struct list_head *target_list = NULL; - - while (!list_empty(&log->stripe_end_ios)) { - io = list_first_entry(&log->stripe_end_ios, - struct r5l_io_unit, log_sibling); - list_move_tail(&io->log_sibling, &list); - free += r5l_ring_distance(log, io->log_start, - io->log_end); - } - - if (free >= reclaim_target || + reclaimable = r5l_reclaimable_space(log); + if (reclaimable >= reclaim_target || (list_empty(&log->running_ios) && list_empty(&log->io_end_ios) && list_empty(&log->flushing_ios) && list_empty(&log->flushed_ios))) break; - /* Below waiting mostly happens when we shutdown the raid */ - if (!list_empty(&log->flushed_ios)) - target_list = &log->flushed_ios; - else if (!list_empty(&log->flushing_ios)) - target_list = &log->flushing_ios; - else if (!list_empty(&log->io_end_ios)) - target_list = &log->io_end_ios; - else if (!list_empty(&log->running_ios)) - target_list = &log->running_ios; - - r5l_kick_io_unit(log); + md_wakeup_thread(log->rdev->mddev->thread); + wait_event_lock_irq(log->iounit_wait, + r5l_reclaimable_space(log) > reclaimable, + log->io_list_lock); } + + next_checkpoint = log->next_checkpoint; + next_cp_seq = log->next_cp_seq; spin_unlock_irq(&log->io_list_lock); - if (list_empty(&list)) + BUG_ON(reclaimable < 0); + if (reclaimable == 0) return; - /* super always point to last valid meta */ - last = list_last_entry(&list, struct r5l_io_unit, log_sibling); /* * write_super will flush cache of each raid disk. We must write super * here, because the log area might be reused soon and we don't want to * confuse recovery */ - r5l_write_super(log, last->log_start); + r5l_write_super(log, next_checkpoint); mutex_lock(&log->io_mutex); - log->last_checkpoint = last->log_start; - log->last_cp_seq = last->seq; + log->last_checkpoint = next_checkpoint; + log->last_cp_seq = next_cp_seq; mutex_unlock(&log->io_mutex); - r5l_run_no_space_stripes(log); - while (!list_empty(&list)) { - io = list_first_entry(&list, struct r5l_io_unit, log_sibling); - list_del(&io->log_sibling); - r5l_free_io_unit(log, io); - } + r5l_run_no_space_stripes(log); } static void r5l_reclaim_thread(struct md_thread *thread) @@ -1104,7 +1070,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) spin_lock_init(&log->io_list_lock); INIT_LIST_HEAD(&log->running_ios); INIT_LIST_HEAD(&log->io_end_ios); - INIT_LIST_HEAD(&log->stripe_end_ios); INIT_LIST_HEAD(&log->flushing_ios); INIT_LIST_HEAD(&log->flushed_ios); bio_init(&log->flush_bio); -- cgit v1.2.3 From 04732f741dce5e21b3ca90677a237635f1e98184 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:07 +0200 Subject: raid5-cache: rename flushed_ios to finished_ios After this series we won't nessecarily have flushed the cache for these I/Os, so give the list a more neutral name. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 604c64505232..c7d5a1ee4b75 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -66,7 +66,7 @@ struct r5l_log { * to the RAID */ struct list_head flushing_ios; /* io_units which are waiting for log * cache flush */ - struct list_head flushed_ios; /* io_units which settle down in log disk */ + struct list_head finished_ios; /* io_units which settle down in log disk */ struct bio flush_bio; struct kmem_cache *io_kc; @@ -523,14 +523,14 @@ static sector_t r5l_reclaimable_space(struct r5l_log *log) log->next_checkpoint); } -static bool r5l_complete_flushed_ios(struct r5l_log *log) +static bool r5l_complete_finished_ios(struct r5l_log *log) { struct r5l_io_unit *io, *next; bool found = false; assert_spin_locked(&log->io_list_lock); - list_for_each_entry_safe(io, next, &log->flushed_ios, log_sibling) { + list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { /* don't change list order */ if (io->state < IO_UNIT_STRIPE_END) break; @@ -555,7 +555,7 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io) spin_lock_irqsave(&log->io_list_lock, flags); __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); - if (!r5l_complete_flushed_ios(log)) { + if (!r5l_complete_finished_ios(log)) { spin_unlock_irqrestore(&log->io_list_lock, flags); return; } @@ -596,7 +596,7 @@ static void r5l_log_flush_endio(struct bio *bio) raid5_release_stripe(sh); } } - list_splice_tail_init(&log->flushing_ios, &log->flushed_ios); + list_splice_tail_init(&log->flushing_ios, &log->finished_ios); spin_unlock_irqrestore(&log->io_list_lock, flags); } @@ -658,7 +658,7 @@ static void r5l_do_reclaim(struct r5l_log *log) (list_empty(&log->running_ios) && list_empty(&log->io_end_ios) && list_empty(&log->flushing_ios) && - list_empty(&log->flushed_ios))) + list_empty(&log->finished_ios))) break; md_wakeup_thread(log->rdev->mddev->thread); @@ -1071,7 +1071,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) INIT_LIST_HEAD(&log->running_ios); INIT_LIST_HEAD(&log->io_end_ios); INIT_LIST_HEAD(&log->flushing_ios); - INIT_LIST_HEAD(&log->flushed_ios); + INIT_LIST_HEAD(&log->finished_ios); bio_init(&log->flush_bio); log->io_kc = KMEM_CACHE(r5l_io_unit, 0); -- cgit v1.2.3 From d8858f4321b744ff02e286165b643e8dc0ef1cf5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:08 +0200 Subject: raid5-cache: factor out a helper to run all stripes for an I/O unit Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index c7d5a1ee4b75..a81db0a8466a 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -194,6 +194,17 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io, io->state = state; } +static void r5l_io_run_stripes(struct r5l_io_unit *io) +{ + struct stripe_head *sh, *next; + + list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { + list_del_init(&sh->log_list); + set_bit(STRIPE_HANDLE, &sh->state); + raid5_release_stripe(sh); + } +} + /* XXX: totally ignores I/O errors */ static void r5l_log_endio(struct bio *bio) { @@ -584,18 +595,10 @@ static void r5l_log_flush_endio(struct bio *bio) flush_bio); unsigned long flags; struct r5l_io_unit *io; - struct stripe_head *sh; spin_lock_irqsave(&log->io_list_lock, flags); - list_for_each_entry(io, &log->flushing_ios, log_sibling) { - while (!list_empty(&io->stripe_list)) { - sh = list_first_entry(&io->stripe_list, - struct stripe_head, log_list); - list_del_init(&sh->log_list); - set_bit(STRIPE_HANDLE, &sh->state); - raid5_release_stripe(sh); - } - } + list_for_each_entry(io, &log->flushing_ios, log_sibling) + r5l_io_run_stripes(io); list_splice_tail_init(&log->flushing_ios, &log->finished_ios); spin_unlock_irqrestore(&log->io_list_lock, flags); } -- cgit v1.2.3 From 56fef7c6e02493dce316de99a11e9e26b852218b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:09 +0200 Subject: raid5-cache: simplify state machine when caches flushes are not needed For devices without a volatile write cache we don't need to send a FLUSH command to ensure writes are stable on disk, and thus can avoid the whole step of batching up bios for processing by the MD thread. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index a81db0a8466a..a7ee7ec89f4f 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -83,6 +83,8 @@ struct r5l_log { struct list_head no_space_stripes; /* pending stripes, log has no space */ spinlock_t no_space_stripes_lock; + + bool need_cache_flush; }; /* @@ -206,6 +208,22 @@ static void r5l_io_run_stripes(struct r5l_io_unit *io) } /* XXX: totally ignores I/O errors */ +static void r5l_log_run_stripes(struct r5l_log *log) +{ + struct r5l_io_unit *io, *next; + + assert_spin_locked(&log->io_list_lock); + + list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { + /* don't change list order */ + if (io->state < IO_UNIT_IO_END) + break; + + list_move_tail(&io->log_sibling, &log->finished_ios); + r5l_io_run_stripes(io); + } +} + static void r5l_log_endio(struct bio *bio) { struct r5l_io_unit *io = bio->bi_private; @@ -219,11 +237,15 @@ static void r5l_log_endio(struct bio *bio) spin_lock_irqsave(&log->io_list_lock, flags); __r5l_set_io_unit_state(io, IO_UNIT_IO_END); - r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, - IO_UNIT_IO_END); + if (log->need_cache_flush) + r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, + IO_UNIT_IO_END); + else + r5l_log_run_stripes(log); spin_unlock_irqrestore(&log->io_list_lock, flags); - md_wakeup_thread(log->rdev->mddev->thread); + if (log->need_cache_flush) + md_wakeup_thread(log->rdev->mddev->thread); } static void r5l_submit_current_io(struct r5l_log *log) @@ -620,7 +642,8 @@ static void r5l_log_flush_endio(struct bio *bio) void r5l_flush_stripe_to_raid(struct r5l_log *log) { bool do_flush; - if (!log) + + if (!log || !log->need_cache_flush) return; spin_lock_irq(&log->io_list_lock); @@ -1065,6 +1088,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) return -ENOMEM; log->rdev = rdev; + log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0); + log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, sizeof(rdev->mddev->uuid)); -- cgit v1.2.3 From 22581f58ed3f0c9eb066d67b696b66f951df5c2b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:10 +0200 Subject: raid5-cache: clean up r5l_get_meta Remove the only partially used local 'io' variable to simplify the code flow. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index a7ee7ec89f4f..29db786557d9 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -322,16 +322,12 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) { - struct r5l_io_unit *io; - - io = log->current_io; - if (io && io->meta_offset + payload_size > PAGE_SIZE) + if (log->current_io && + log->current_io->meta_offset + payload_size > PAGE_SIZE) r5l_submit_current_io(log); - io = log->current_io; - if (io) - return 0; - log->current_io = r5l_new_meta(log); + if (!log->current_io) + log->current_io = r5l_new_meta(log); return 0; } -- cgit v1.2.3 From b349feb36cc2b7ed41a5222a0b681153f3525369 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:11 +0200 Subject: raid5-cache: refactor bio allocation Split out a helper to allocate a bio for log writes. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 45 ++++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 25 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 29db786557d9..59a4640f09e5 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -276,11 +276,25 @@ static void r5l_submit_current_io(struct r5l_log *log) } } +static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io) +{ + struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); + + bio->bi_rw = WRITE; + bio->bi_bdev = log->rdev->bdev; + bio->bi_iter.bi_sector = log->log_start; + bio->bi_end_io = r5l_log_endio; + bio->bi_private = io; + + bio_list_add(&io->bios, bio); + atomic_inc(&io->pending_io); + return bio; +} + static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) { struct r5l_io_unit *io; struct r5l_meta_block *block; - struct bio *bio; io = r5l_alloc_io_unit(log); @@ -294,17 +308,8 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) io->meta_offset = sizeof(struct r5l_meta_block); io->seq = log->seq; - bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); - io->current_bio = bio; - bio->bi_rw = WRITE; - bio->bi_bdev = log->rdev->bdev; - bio->bi_iter.bi_sector = log->log_start; - bio_add_page(bio, io->meta_page, PAGE_SIZE, 0); - bio->bi_end_io = r5l_log_endio; - bio->bi_private = io; - - bio_list_add(&io->bios, bio); - atomic_inc(&io->pending_io); + io->current_bio = r5l_bio_alloc(log, io); + bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); log->seq++; log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); @@ -358,19 +363,9 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page) struct r5l_io_unit *io = log->current_io; alloc_bio: - if (!io->current_bio) { - struct bio *bio; - - bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); - bio->bi_rw = WRITE; - bio->bi_bdev = log->rdev->bdev; - bio->bi_iter.bi_sector = log->log_start; - bio->bi_end_io = r5l_log_endio; - bio->bi_private = io; - bio_list_add(&io->bios, bio); - atomic_inc(&io->pending_io); - io->current_bio = bio; - } + if (!io->current_bio) + io->current_bio = r5l_bio_alloc(log, io); + if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) { io->current_bio = NULL; goto alloc_bio; -- cgit v1.2.3 From 1e932a37ccb9ad1984823dead4b48902fc5789f4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:12 +0200 Subject: raid5-cache: take rdev->data_offset into account early on Set up bi_sector properly when we allocate an bio instead of updating it at submission time. Signed-off-by: Christoph Hellwig Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 59a4640f09e5..2ac93fbc31eb 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -269,11 +269,8 @@ static void r5l_submit_current_io(struct r5l_log *log) __r5l_set_io_unit_state(io, IO_UNIT_IO_START); spin_unlock_irqrestore(&log->io_list_lock, flags); - while ((bio = bio_list_pop(&io->bios))) { - /* all IO must start from rdev->data_offset */ - bio->bi_iter.bi_sector += log->rdev->data_offset; + while ((bio = bio_list_pop(&io->bios))) submit_bio(WRITE, bio); - } } static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io) @@ -282,7 +279,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io) bio->bi_rw = WRITE; bio->bi_bdev = log->rdev->bdev; - bio->bi_iter.bi_sector = log->log_start; + bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; bio->bi_end_io = r5l_log_endio; bio->bi_private = io; -- cgit v1.2.3 From 51039cd066553689bb82a588b25a6eba7d453837 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:13 +0200 Subject: raid5-cache: inline r5l_alloc_io_unit into r5l_new_meta This is the only user, and keeping all code initializing the io_unit structure together improves readbility. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 2ac93fbc31eb..88758285261c 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -150,23 +150,6 @@ static bool r5l_has_free_space(struct r5l_log *log, sector_t size) return log->device_size > used_size + size; } -static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log) -{ - struct r5l_io_unit *io; - /* We can't handle memory allocate failure so far */ - gfp_t gfp = GFP_NOIO | __GFP_NOFAIL; - - io = kmem_cache_zalloc(log->io_kc, gfp); - io->log = log; - io->meta_page = alloc_page(gfp | __GFP_ZERO); - - bio_list_init(&io->bios); - INIT_LIST_HEAD(&io->log_sibling); - INIT_LIST_HEAD(&io->stripe_list); - io->state = IO_UNIT_RUNNING; - return io; -} - static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io) { __free_page(io->meta_page); @@ -293,8 +276,15 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) struct r5l_io_unit *io; struct r5l_meta_block *block; - io = r5l_alloc_io_unit(log); + /* We can't handle memory allocate failure so far */ + io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL); + io->log = log; + bio_list_init(&io->bios); + INIT_LIST_HEAD(&io->log_sibling); + INIT_LIST_HEAD(&io->stripe_list); + io->state = IO_UNIT_RUNNING; + io->meta_page = alloc_page(GFP_NOIO | __GFP_NOFAIL | __GFP_ZERO); block = page_address(io->meta_page); block->magic = cpu_to_le32(R5LOG_MAGIC); block->version = R5LOG_VERSION; -- cgit v1.2.3 From c1b9919849866b96dc435f025beaa307dc76ca27 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:14 +0200 Subject: raid5-cache: new helper: r5_reserve_log_entry Factor out code to reserve log space. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 88758285261c..ea20d740a67c 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -271,6 +271,23 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io) return bio; } +static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) +{ + log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); + + /* + * If we filled up the log device start from the beginning again, + * which will require a new bio. + * + * Note: for this to work properly the log size needs to me a multiple + * of BLOCK_SECTORS. + */ + if (log->log_start == 0) + io->current_bio = NULL; + + io->log_end = log->log_start; +} + static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) { struct r5l_io_unit *io; @@ -299,11 +316,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); log->seq++; - log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); - io->log_end = log->log_start; - /* current bio hit disk end */ - if (log->log_start == 0) - io->current_bio = NULL; + r5_reserve_log_entry(log, io); spin_lock_irq(&log->io_list_lock); list_add_tail(&io->log_sibling, &log->running_ios); @@ -357,13 +370,8 @@ alloc_bio: io->current_bio = NULL; goto alloc_bio; } - log->log_start = r5l_ring_add(log, log->log_start, - BLOCK_SECTORS); - /* current bio hit disk end */ - if (log->log_start == 0) - io->current_bio = NULL; - io->log_end = log->log_start; + r5_reserve_log_entry(log, io); } static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, -- cgit v1.2.3 From 2b8ef16ec4c529c7df3bb4e4eba45ac645a97b00 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:15 +0200 Subject: raid5-cache: small log->seq cleanup Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index ea20d740a67c..4e6046f1b0aa 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -310,12 +310,11 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) io->log_start = log->log_start; io->meta_offset = sizeof(struct r5l_meta_block); - io->seq = log->seq; + io->seq = log->seq++; io->current_bio = r5l_bio_alloc(log, io); bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); - log->seq++; r5_reserve_log_entry(log, io); spin_lock_irq(&log->io_list_lock); -- cgit v1.2.3 From 6143e2cecb9ef9c7d3392c9c2583f231705413a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Oct 2015 09:31:16 +0200 Subject: raid5-cache: use bio chaining Simplify the bio completion handler by using bio chaining and submitting bios as soon as they are full. Signed-off-by: Christoph Hellwig Reviewed-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4e6046f1b0aa..d3b5441b4c11 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -100,8 +100,6 @@ struct r5l_io_unit { struct page *meta_page; /* store meta block */ int meta_offset; /* current offset in meta_page */ - struct bio_list bios; - atomic_t pending_io; /* pending bios not written to log yet */ struct bio *current_bio;/* current_bio accepting new data */ atomic_t pending_stripe;/* how many stripes not flushed to raid */ @@ -112,6 +110,7 @@ struct r5l_io_unit { struct list_head stripe_list; /* stripes added to the io_unit */ int state; + bool need_split_bio; }; /* r5l_io_unit state */ @@ -215,9 +214,6 @@ static void r5l_log_endio(struct bio *bio) bio_put(bio); - if (!atomic_dec_and_test(&io->pending_io)) - return; - spin_lock_irqsave(&log->io_list_lock, flags); __r5l_set_io_unit_state(io, IO_UNIT_IO_END); if (log->need_cache_flush) @@ -235,7 +231,6 @@ static void r5l_submit_current_io(struct r5l_log *log) { struct r5l_io_unit *io = log->current_io; struct r5l_meta_block *block; - struct bio *bio; unsigned long flags; u32 crc; @@ -252,22 +247,17 @@ static void r5l_submit_current_io(struct r5l_log *log) __r5l_set_io_unit_state(io, IO_UNIT_IO_START); spin_unlock_irqrestore(&log->io_list_lock, flags); - while ((bio = bio_list_pop(&io->bios))) - submit_bio(WRITE, bio); + submit_bio(WRITE, io->current_bio); } -static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io) +static struct bio *r5l_bio_alloc(struct r5l_log *log) { struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); bio->bi_rw = WRITE; bio->bi_bdev = log->rdev->bdev; bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; - bio->bi_end_io = r5l_log_endio; - bio->bi_private = io; - bio_list_add(&io->bios, bio); - atomic_inc(&io->pending_io); return bio; } @@ -283,7 +273,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) * of BLOCK_SECTORS. */ if (log->log_start == 0) - io->current_bio = NULL; + io->need_split_bio = true; io->log_end = log->log_start; } @@ -296,7 +286,6 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) /* We can't handle memory allocate failure so far */ io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL); io->log = log; - bio_list_init(&io->bios); INIT_LIST_HEAD(&io->log_sibling); INIT_LIST_HEAD(&io->stripe_list); io->state = IO_UNIT_RUNNING; @@ -312,7 +301,9 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) io->meta_offset = sizeof(struct r5l_meta_block); io->seq = log->seq++; - io->current_bio = r5l_bio_alloc(log, io); + io->current_bio = r5l_bio_alloc(log); + io->current_bio->bi_end_io = r5l_log_endio; + io->current_bio->bi_private = io; bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); r5_reserve_log_entry(log, io); @@ -361,15 +352,18 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page) { struct r5l_io_unit *io = log->current_io; -alloc_bio: - if (!io->current_bio) - io->current_bio = r5l_bio_alloc(log, io); + if (io->need_split_bio) { + struct bio *prev = io->current_bio; - if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) { - io->current_bio = NULL; - goto alloc_bio; + io->current_bio = r5l_bio_alloc(log); + bio_chain(io->current_bio, prev); + + submit_bio(WRITE, prev); } + if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) + BUG(); + r5_reserve_log_entry(log, io); } -- cgit v1.2.3 From 9efdca16e0182eca489a519f576019fd9c0c1b25 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 12 Oct 2015 16:59:50 -0700 Subject: MD: fix info output for journal disk journal disk can be faulty. The Journal and Faulty aren't exclusive with each other. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 5744829b7d05..e4e2731f7660 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5874,7 +5874,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg) else if (test_bit(In_sync, &rdev->flags)) { info.state |= (1<flags)) + } + if (test_bit(Journal, &rdev->flags)) info.state |= (1<flags)) info.state |= (1<bdev,b), rdev->desc_nr); if (test_bit(WriteMostly, &rdev->flags)) seq_printf(seq, "(W)"); + if (test_bit(Journal, &rdev->flags)) + seq_printf(seq, "(J)"); if (test_bit(Faulty, &rdev->flags)) { seq_printf(seq, "(F)"); continue; } - if (test_bit(Journal, &rdev->flags)) { - seq_printf(seq, "(J)"); - continue; - } if (rdev->raid_disk < 0) seq_printf(seq, "(S)"); /* spare */ if (test_bit(Replacement, &rdev->flags)) -- cgit v1.2.3 From 4b482044d24f3db2e11607d0a18f64b3a326972d Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Oct 2015 21:54:06 -0700 Subject: raid5-cache: add trim support for log Since superblock is updated infrequently, we do a simple trim of log disk (a synchronous trim) Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index d3b5441b4c11..7071c7598f5d 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -85,6 +85,7 @@ struct r5l_log { spinlock_t no_space_stripes_lock; bool need_cache_flush; + bool in_teardown; }; /* @@ -644,6 +645,60 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) } static void r5l_write_super(struct r5l_log *log, sector_t cp); +static void r5l_write_super_and_discard_space(struct r5l_log *log, + sector_t end) +{ + struct block_device *bdev = log->rdev->bdev; + struct mddev *mddev; + + r5l_write_super(log, end); + + if (!blk_queue_discard(bdev_get_queue(bdev))) + return; + + mddev = log->rdev->mddev; + /* + * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and + * wait for this thread to finish. This thread waits for + * MD_CHANGE_PENDING clear, which is supposed to be done in + * md_check_recovery(). md_check_recovery() tries to get + * reconfig_mutex. Since r5l_quiesce already holds the mutex, + * md_check_recovery() fails, so the PENDING never get cleared. The + * in_teardown check workaround this issue. + */ + if (!log->in_teardown) { + set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_CHANGE_PENDING, &mddev->flags); + md_wakeup_thread(mddev->thread); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_PENDING, &mddev->flags) || + log->in_teardown); + /* + * r5l_quiesce could run after in_teardown check and hold + * mutex first. Superblock might get updated twice. + */ + if (log->in_teardown) + md_update_sb(mddev, 1); + } else { + WARN_ON(!mddev_is_locked(mddev)); + md_update_sb(mddev, 1); + } + + if (log->last_checkpoint < end) { + blkdev_issue_discard(bdev, + log->last_checkpoint + log->rdev->data_offset, + end - log->last_checkpoint, GFP_NOIO, 0); + } else { + blkdev_issue_discard(bdev, + log->last_checkpoint + log->rdev->data_offset, + log->device_size - log->last_checkpoint, + GFP_NOIO, 0); + blkdev_issue_discard(bdev, log->rdev->data_offset, end, + GFP_NOIO, 0); + } +} + + static void r5l_do_reclaim(struct r5l_log *log) { sector_t reclaim_target = xchg(&log->reclaim_target, 0); @@ -685,7 +740,7 @@ static void r5l_do_reclaim(struct r5l_log *log) * here, because the log area might be reused soon and we don't want to * confuse recovery */ - r5l_write_super(log, next_checkpoint); + r5l_write_super_and_discard_space(log, next_checkpoint); mutex_lock(&log->io_mutex); log->last_checkpoint = next_checkpoint; @@ -721,9 +776,11 @@ static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) void r5l_quiesce(struct r5l_log *log, int state) { + struct mddev *mddev; if (!log || state == 2) return; if (state == 0) { + log->in_teardown = 0; log->reclaim_thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev, "reclaim"); } else if (state == 1) { @@ -731,6 +788,10 @@ void r5l_quiesce(struct r5l_log *log, int state) * at this point all stripes are finished, so io_unit is at * least in STRIPE_END state */ + log->in_teardown = 1; + /* make sure r5l_write_super_and_discard_space exits */ + mddev = log->rdev->mddev; + wake_up(&mddev->sb_wait); r5l_wake_reclaim(log, -1L); md_unregister_thread(&log->reclaim_thread); r5l_do_reclaim(log); -- cgit v1.2.3 From c2bb6242ece5a2a0b6bd415c1d58babe83e971a8 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Oct 2015 21:54:07 -0700 Subject: raid5: journal disk can't be removed raid5-cache uses journal disk rdev->bdev, rdev->mddev in several places. Don't allow journal disk disappear magically. On the other hand, we do need to update superblock for other disks to bump up ->events, so next time journal disk will be identified as stale. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 216fa3c64287..693c000e739b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7128,6 +7128,15 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct disk_info *p = conf->disks + number; print_raid5_conf(conf); + if (test_bit(Journal, &rdev->flags)) { + /* + * journal disk is not removable, but we need give a chance to + * update superblock of other disks. Otherwise journal disk + * will be considered as 'fresh' + */ + set_bit(MD_CHANGE_DEVS, &mddev->flags); + return -EINVAL; + } if (rdev == p->rdev) rdevp = &p->rdev; else if (rdev == p->replacement) @@ -7190,6 +7199,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) int first = 0; int last = conf->raid_disks - 1; + if (test_bit(Journal, &rdev->flags)) + return -EINVAL; if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; -- cgit v1.2.3 From 6e74a9cfb5a55b0a4214809321b67d7065e55555 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Oct 2015 21:54:08 -0700 Subject: raid5-cache: IO error handling There are 3 places the raid5-cache dispatches IO. The discard IO error doesn't matter, so we ignore it. The superblock write IO error can be handled in MD core. The remaining are log write and flush. When the IO error happens, we mark log disk faulty and fail all write IO. Read IO is still allowed to run. Userspace will get a notification too and corresponding daemon can choose setting raid array readonly for example. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 15 ++++++++++++++- drivers/md/raid5.c | 4 +++- drivers/md/raid5.h | 2 ++ 3 files changed, 19 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 7071c7598f5d..62e5fe4afae8 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -190,7 +190,6 @@ static void r5l_io_run_stripes(struct r5l_io_unit *io) } } -/* XXX: totally ignores I/O errors */ static void r5l_log_run_stripes(struct r5l_log *log) { struct r5l_io_unit *io, *next; @@ -213,6 +212,9 @@ static void r5l_log_endio(struct bio *bio) struct r5l_log *log = io->log; unsigned long flags; + if (bio->bi_error) + md_error(log->rdev->mddev, log->rdev); + bio_put(bio); spin_lock_irqsave(&log->io_list_lock, flags); @@ -598,6 +600,9 @@ static void r5l_log_flush_endio(struct bio *bio) unsigned long flags; struct r5l_io_unit *io; + if (bio->bi_error) + md_error(log->rdev->mddev, log->rdev); + spin_lock_irqsave(&log->io_list_lock, flags); list_for_each_entry(io, &log->flushing_ios, log_sibling) r5l_io_run_stripes(io); @@ -684,6 +689,7 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, md_update_sb(mddev, 1); } + /* discard IO error really doesn't matter, ignore it */ if (log->last_checkpoint < end) { blkdev_issue_discard(bdev, log->last_checkpoint + log->rdev->data_offset, @@ -798,6 +804,13 @@ void r5l_quiesce(struct r5l_log *log, int state) } } +bool r5l_log_disk_error(struct r5conf *conf) +{ + if (!conf->log) + return false; + return test_bit(Faulty, &conf->log->rdev->flags); +} + struct r5l_recovery_ctx { struct page *meta_page; /* current meta */ sector_t meta_total_blocks; /* total size of current meta and data */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 693c000e739b..68c36ce4fe8e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3147,6 +3147,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, * the data has not reached the cache yet. */ if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && + s->failed > conf->max_degraded && (!test_bit(R5_Insync, &sh->dev[i].flags) || test_bit(R5_ReadError, &sh->dev[i].flags))) { spin_lock_irq(&sh->stripe_lock); @@ -4015,6 +4016,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; s->failed_num[0] = -1; s->failed_num[1] = -1; + s->log_failed = r5l_log_disk_error(conf); /* Now to look around and see what can be done */ rcu_read_lock(); @@ -4358,7 +4360,7 @@ static void handle_stripe(struct stripe_head *sh) /* check if the array has lost more than max_degraded devices and, * if so, some requests might need to be failed. */ - if (s.failed > conf->max_degraded) { + if (s.failed > conf->max_degraded || s.log_failed) { sh->check_state = 0; sh->reconstruct_state = 0; break_stripe_batch_list(sh, 0); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 1ab534c909fe..a415e1cd39b8 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -272,6 +272,7 @@ struct stripe_head_state { struct bio_list return_bi; struct md_rdev *blocked_rdev; int handle_bad_blocks; + int log_failed; }; /* Flags for struct r5dev.flags */ @@ -631,4 +632,5 @@ extern void r5l_flush_stripe_to_raid(struct r5l_log *log); extern void r5l_stripe_write_finished(struct stripe_head *sh); extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); extern void r5l_quiesce(struct r5l_log *log, int state); +extern bool r5l_log_disk_error(struct r5conf *conf); #endif -- cgit v1.2.3 From a97b7896447a89749d9258fbb9d8c3faf48a7a4e Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 8 Oct 2015 21:54:09 -0700 Subject: MD: add new bit to indicate raid array with journal If a raid array has journal feature bit set, add a new bit to indicate this. If the array is started without journal disk existing, we know there is something wrong. Signed-off-by: Song Liu Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 10 +++++++--- drivers/md/md.h | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index e4e2731f7660..bca859a6e3fd 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1667,6 +1667,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) set_bit(WriteMostly, &rdev->flags); if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) set_bit(Replacement, &rdev->flags); + if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) + set_bit(MD_HAS_JOURNAL, &mddev->flags); } else /* MULTIPATH are always insync */ set_bit(In_sync, &rdev->flags); @@ -1807,16 +1809,18 @@ retry: for (i=0; idev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) + sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); + rdev_for_each(rdev2, mddev) { i = rdev2->desc_nr; if (test_bit(Faulty, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); else if (test_bit(In_sync, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); - else if (test_bit(Journal, &rdev2->flags)) { + else if (test_bit(Journal, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); - sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); - } else if (rdev2->raid_disk >= 0) + else if (rdev2->raid_disk >= 0) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); diff --git a/drivers/md/md.h b/drivers/md/md.h index e14e667a20e9..2bea51edfab7 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -233,6 +233,7 @@ struct mddev { * md_ioctl checked on it. */ #define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */ +#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */ int suspended; atomic_t active_io; -- cgit v1.2.3 From 7dde2ad3c5b4afb4b2544b864fa34dd1f4897ab6 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Oct 2015 21:54:10 -0700 Subject: raid5-cache: start raid5 readonly if journal is missing If raid array is expected to have journal (eg, journal is set in MD superblock feature map) and the array is started without journal disk, start the array readonly. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5-cache.c | 3 ++- drivers/md/raid5.c | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 62e5fe4afae8..b887e04d7e5c 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -806,8 +806,9 @@ void r5l_quiesce(struct r5l_log *log, int state) bool r5l_log_disk_error(struct r5conf *conf) { + /* don't allow write if journal disk is missing */ if (!conf->log) - return false; + return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); return test_bit(Faulty, &conf->log->rdev->flags); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 68c36ce4fe8e..e1d18be67243 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6810,6 +6810,13 @@ static int run(struct mddev *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); + if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) { + printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n", + mdname(mddev)); + mddev->ro = 1; + set_disk_ro(mddev->gendisk, 1); + } + conf->min_offset_diff = min_offset_diff; mddev->thread = conf->thread; conf->thread = NULL; -- cgit v1.2.3 From a3dfbdaadba2612faf11f025b8156c36e3700247 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 8 Oct 2015 21:54:11 -0700 Subject: MD: kick out journal disk if it's not fresh When journal disk is faulty and we are reassemabling the raid array, the journal disk is old. We don't allow the journal disk added to the raid array. Since journal disk is missing in the array, the raid5 will mark the array readonly. Signed-off-by: Song Liu Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index bca859a6e3fd..f67cd5b68771 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1608,7 +1608,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) ++ev1; if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) && - le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX) + (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || + le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) if (ev1 < mddev->events) return -EINVAL; } else if (mddev->bitmap) { -- cgit v1.2.3 From f2076e7d0643d15b11db979acc7cffd2e8d69e77 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Oct 2015 21:54:12 -0700 Subject: MD: set journal disk ->raid_disk Set journal disk ->raid_disk to >=0, I choose raid_disks + 1 instead of 0, because we already have a disk with ->raid_disk 0 and this causes sysfs entry creation conflict. A lot of places assumes disk with ->raid_disk >=0 is normal raid disk, so we add check for journal disk. Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 27 ++++++++++++++++++++++----- drivers/md/raid5.c | 6 ++++-- 2 files changed, 26 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index f67cd5b68771..b5057596b630 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1650,6 +1650,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) rdev->journal_tail = le64_to_cpu(sb->journal_tail); if (mddev->recovery_cp == MaxSector) set_bit(MD_JOURNAL_CLEAN, &mddev->flags); + rdev->raid_disk = mddev->raid_disks; break; default: rdev->saved_raid_disk = role; @@ -1719,7 +1720,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); } - if (rdev->raid_disk >= 0 && + if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && !test_bit(In_sync, &rdev->flags)) { sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); @@ -2304,6 +2305,7 @@ repeat: rdev_for_each(rdev, mddev) { if (rdev->raid_disk >= 0 && mddev->delta_disks >= 0 && + !test_bit(Journal, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && mddev->curr_resync_completed > rdev->recovery_offset) rdev->recovery_offset = mddev->curr_resync_completed; @@ -2540,6 +2542,7 @@ state_show(struct md_rdev *rdev, char *page) sep = ","; } if (!test_bit(Faulty, &flags) && + !test_bit(Journal, &flags) && !test_bit(In_sync, &flags)) { len += sprintf(page+len, "%sspare", sep); sep = ","; @@ -2626,7 +2629,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { set_bit(In_sync, &rdev->flags); err = 0; - } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) { + } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && + !test_bit(Journal, &rdev->flags)) { if (rdev->mddev->pers == NULL) { clear_bit(In_sync, &rdev->flags); rdev->saved_raid_disk = rdev->raid_disk; @@ -2645,6 +2649,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) * check if recovery is needed. */ if (rdev->raid_disk >= 0 && + !test_bit(Journal, &rdev->flags) && !test_bit(Replacement, &rdev->flags)) set_bit(WantReplacement, &rdev->flags); set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); @@ -2722,7 +2727,9 @@ __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); static ssize_t slot_show(struct md_rdev *rdev, char *page) { - if (rdev->raid_disk < 0) + if (test_bit(Journal, &rdev->flags)) + return sprintf(page, "journal\n"); + else if (rdev->raid_disk < 0) return sprintf(page, "none\n"); else return sprintf(page, "%d\n", rdev->raid_disk); @@ -2734,6 +2741,8 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) int slot; int err; + if (test_bit(Journal, &rdev->flags)) + return -EBUSY; if (strncmp(buf, "none", 4)==0) slot = -1; else { @@ -2932,6 +2941,8 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) sector_t oldsectors = rdev->sectors; sector_t sectors; + if (test_bit(Journal, &rdev->flags)) + return -EBUSY; if (strict_blocks_to_sectors(buf, §ors) < 0) return -EINVAL; if (rdev->data_offset != rdev->new_data_offset) @@ -3294,7 +3305,9 @@ static void analyze_sbs(struct mddev *mddev) rdev->desc_nr = i++; rdev->raid_disk = rdev->desc_nr; set_bit(In_sync, &rdev->flags); - } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { + } else if (rdev->raid_disk >= + (mddev->raid_disks - min(0, mddev->delta_disks)) && + !test_bit(Journal, &rdev->flags)) { rdev->raid_disk = -1; clear_bit(In_sync, &rdev->flags); } @@ -7825,6 +7838,7 @@ void md_do_sync(struct md_thread *thread) rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && + !test_bit(Journal, &rdev->flags) && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && rdev->recovery_offset < j) @@ -8050,6 +8064,7 @@ void md_do_sync(struct md_thread *thread) rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && mddev->delta_disks >= 0 && + !test_bit(Journal, &rdev->flags) && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && rdev->recovery_offset < mddev->curr_resync) @@ -8095,7 +8110,8 @@ static int remove_and_add_spares(struct mddev *mddev, rdev->raid_disk >= 0 && !test_bit(Blocked, &rdev->flags) && (test_bit(Faulty, &rdev->flags) || - ! test_bit(In_sync, &rdev->flags)) && + (!test_bit(In_sync, &rdev->flags) && + !test_bit(Journal, &rdev->flags))) && atomic_read(&rdev->nr_pending)==0) { if (mddev->pers->hot_remove_disk( mddev, rdev) == 0) { @@ -8117,6 +8133,7 @@ static int remove_and_add_spares(struct mddev *mddev, continue; if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && + !test_bit(Journal, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) spares++; if (rdev->raid_disk >= 0) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e1d18be67243..e5befa356dbe 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6560,7 +6560,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) rdev_for_each(rdev, mddev) { raid_disk = rdev->raid_disk; if (raid_disk >= max_disks - || raid_disk < 0) + || raid_disk < 0 || test_bit(Journal, &rdev->flags)) continue; disk = conf->disks + raid_disk; @@ -6694,8 +6694,10 @@ static int run(struct mddev *mddev) rdev_for_each(rdev, mddev) { long long diff; - if (test_bit(Journal, &rdev->flags)) + if (test_bit(Journal, &rdev->flags)) { journal_dev = rdev; + continue; + } if (rdev->raid_disk < 0) continue; diff = (rdev->new_data_offset - rdev->data_offset); -- cgit v1.2.3 From 339421def582abb14c2217aa8c8f28bb2e299174 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 8 Oct 2015 21:54:13 -0700 Subject: MD: when RAID journal is missing/faulty, block RESTART_ARRAY_RW When RAID-4/5/6 array suffers from missing journal device, we put the array in read only state. We should not allow trasition to read-write states (clean and active) before replacing journal device. Signed-off-by: Song Liu Signed-off-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/md.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index b5057596b630..08a4034351de 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3970,7 +3970,9 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; case clean: if (mddev->pers) { - restart_array(mddev); + err = restart_array(mddev); + if (err) + break; spin_lock(&mddev->lock); if (atomic_read(&mddev->writes_pending) == 0) { if (mddev->in_sync == 0) { @@ -3988,7 +3990,9 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; case active: if (mddev->pers) { - restart_array(mddev); + err = restart_array(mddev); + if (err) + break; clear_bit(MD_CHANGE_PENDING, &mddev->flags); wake_up(&mddev->sb_wait); err = 0; @@ -5351,6 +5355,25 @@ static int restart_array(struct mddev *mddev) return -EINVAL; if (!mddev->ro) return -EBUSY; + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { + struct md_rdev *rdev; + bool has_journal = false; + + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) { + if (test_bit(Journal, &rdev->flags) && + !test_bit(Faulty, &rdev->flags)) { + has_journal = true; + break; + } + } + rcu_read_unlock(); + + /* Don't restart rw with journal missing/faulty */ + if (!has_journal) + return -EINVAL; + } + mddev->safemode = 0; mddev->ro = 0; set_disk_ro(disk, 0); -- cgit v1.2.3