summaryrefslogtreecommitdiff
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-06-22 15:59:24 +0300
committerDavid S. Miller <davem@davemloft.net>2019-06-22 15:59:24 +0300
commit92ad6325cb891bb455487bfe90cc47d18aa6ec37 (patch)
tree433a7ef938fae69789216043f67eff9f9c6b0c68 /drivers/md/md.c
parente0effb5fbd56a8b2b8917611cbf4fcd9aba92b8f (diff)
parentc356dc4b540edd6c02b409dd8cf3208ba2804c38 (diff)
downloadlinux-92ad6325cb891bb455487bfe90cc47d18aa6ec37.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor SPDX change conflict. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 04f4f131f9d6..9801d540fea1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7607,9 +7607,9 @@ static void status_unused(struct seq_file *seq)
static int status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
- unsigned long dt, db;
- sector_t rt;
- int scale;
+ unsigned long dt, db = 0;
+ sector_t rt, curr_mark_cnt, resync_mark_cnt;
+ int scale, recovery_active;
unsigned int per_milli;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -7698,22 +7698,30 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
* db: blocks written from mark until now
* rt: remaining time
*
- * rt is a sector_t, so could be 32bit or 64bit.
- * So we divide before multiply in case it is 32bit and close
- * to the limit.
- * We scale the divisor (db) by 32 to avoid losing precision
- * near the end of resync when the number of remaining sectors
- * is close to 'db'.
- * We then divide rt by 32 after multiplying by db to compensate.
- * The '+1' avoids division by zero if db is very small.
+ * rt is a sector_t, which is always 64bit now. We are keeping
+ * the original algorithm, but it is not really necessary.
+ *
+ * Original algorithm:
+ * So we divide before multiply in case it is 32bit and close
+ * to the limit.
+ * We scale the divisor (db) by 32 to avoid losing precision
+ * near the end of resync when the number of remaining sectors
+ * is close to 'db'.
+ * We then divide rt by 32 after multiplying by db to compensate.
+ * The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
if (!dt) dt++;
- db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
- - mddev->resync_mark_cnt;
+
+ curr_mark_cnt = mddev->curr_mark_cnt;
+ recovery_active = atomic_read(&mddev->recovery_active);
+ resync_mark_cnt = mddev->resync_mark_cnt;
+
+ if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
+ db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
rt = max_sectors - resync; /* number of remaining sectors */
- sector_div(rt, db/32+1);
+ rt = div64_u64(rt, db/32+1);
rt *= dt;
rt >>= 5;