summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/bcachefs/buckets.c6
-rw-r--r--fs/bcachefs/buckets_types.h2
-rw-r--r--fs/bcachefs/io.c3
-rw-r--r--fs/bcachefs/move.c6
-rw-r--r--fs/bcachefs/move.h3
-rw-r--r--fs/bcachefs/movinggc.c61
-rw-r--r--fs/bcachefs/rebalance.c1
7 files changed, 50 insertions, 32 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 2277143b1890..7558e2bffbdd 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -979,6 +979,9 @@ static int bucket_set_stripe(struct bch_fs *c, struct bkey_s_c k,
char buf[200];
int ret;
+ if (enabled)
+ g->ec_redundancy = s->nr_redundant;
+
old = bucket_cmpxchg(g, new, ({
ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
new.dirty_sectors, new.cached_sectors);
@@ -1010,6 +1013,9 @@ static int bucket_set_stripe(struct bch_fs *c, struct bkey_s_c k,
}
}));
+ if (!enabled)
+ g->ec_redundancy = 0;
+
bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
return 0;
}
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 26779e94a189..9364addf8441 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -41,6 +41,7 @@ struct bucket {
u8 oldest_gen;
u8 gc_gen;
unsigned gen_valid:1;
+ u8 ec_redundancy;
};
struct bucket_array {
@@ -123,6 +124,7 @@ struct disk_reservation {
struct copygc_heap_entry {
u8 dev;
u8 gen;
+ u8 replicas;
u16 fragmentation;
u32 sectors;
u64 offset;
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 78adccbee9d9..5c12bfed3a7b 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -1466,7 +1466,8 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
opts,
DATA_PROMOTE,
(struct data_opts) {
- .target = opts.promote_target
+ .target = opts.promote_target,
+ .nr_replicas = 1,
},
btree_id, k);
BUG_ON(ret);
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 8aa13b41d20d..9d190ae4f391 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -265,8 +265,8 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
BCH_WRITE_DATA_ENCODED|
BCH_WRITE_FROM_INTERNAL;
- m->op.nr_replicas = 1;
- m->op.nr_replicas_required = 1;
+ m->op.nr_replicas = data_opts.nr_replicas;
+ m->op.nr_replicas_required = data_opts.nr_replicas;
m->op.index_update_fn = bch2_migrate_index_update;
switch (data_cmd) {
@@ -754,6 +754,7 @@ static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg,
return DATA_SKIP;
data_opts->target = 0;
+ data_opts->nr_replicas = 1;
data_opts->btree_insert_flags = 0;
return DATA_ADD_REPLICAS;
}
@@ -769,6 +770,7 @@ static enum data_cmd migrate_pred(struct bch_fs *c, void *arg,
return DATA_SKIP;
data_opts->target = 0;
+ data_opts->nr_replicas = 1;
data_opts->btree_insert_flags = 0;
data_opts->rewrite_dev = op->migrate.dev;
return DATA_REWRITE;
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
index 0acd1720d4f8..b04bc669226d 100644
--- a/fs/bcachefs/move.h
+++ b/fs/bcachefs/move.h
@@ -20,7 +20,8 @@ enum data_cmd {
struct data_opts {
u16 target;
- unsigned rewrite_dev;
+ u8 rewrite_dev;
+ u8 nr_replicas;
int btree_insert_flags;
};
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 5f96f619bee0..e858e2a35f8d 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -53,17 +53,21 @@ static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
cmp_int(l->offset, r->offset);
}
-static int __copygc_pred(struct bch_fs *c, struct bkey_s_c k)
+static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_opts *data_opts)
{
copygc_heap *h = &c->copygc_heap;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct copygc_heap_entry search = {
- .dev = ptr->dev,
- .offset = ptr->offset
+ .dev = p.ptr.dev,
+ .offset = p.ptr.offset,
};
ssize_t i = eytzinger0_find_le(h->data, h->used,
@@ -81,27 +85,24 @@ static int __copygc_pred(struct bch_fs *c, struct bkey_s_c k)
BUG_ON(i != j);
#endif
if (i >= 0 &&
- ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
- ptr->gen == h->data[i].gen)
- return ptr->dev;
- }
+ p.ptr.offset < h->data[i].offset + ca->mi.bucket_size &&
+ p.ptr.gen == h->data[i].gen) {
+ data_opts->target = io_opts->background_target;
+ data_opts->nr_replicas = 1;
+ data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
+ data_opts->rewrite_dev = p.ptr.dev;
- return -1;
-}
+ if (p.has_ec) {
+ struct stripe *m = genradix_ptr(&c->stripes[0], p.ec.idx);
-static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_opts *data_opts)
-{
- int dev_idx = __copygc_pred(c, k);
- if (dev_idx < 0)
- return DATA_SKIP;
-
- data_opts->target = io_opts->background_target;
- data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
- data_opts->rewrite_dev = dev_idx;
- return DATA_REWRITE;
+ data_opts->nr_replicas += m->nr_redundant;
+ }
+
+ return DATA_REWRITE;
+ }
+ }
+
+ return DATA_SKIP;
}
static bool have_copygc_reserve(struct bch_dev *ca)
@@ -168,7 +169,8 @@ static int bch2_copygc(struct bch_fs *c)
buckets = bucket_array(ca);
for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
- struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
+ struct bucket *g = buckets->b + b;
+ struct bucket_mark m = READ_ONCE(g->mark);
struct copygc_heap_entry e;
if (m.owned_by_allocator ||
@@ -177,9 +179,12 @@ static int bch2_copygc(struct bch_fs *c)
bucket_sectors_used(m) >= ca->mi.bucket_size)
continue;
+ WARN_ON(m.stripe && !g->ec_redundancy);
+
e = (struct copygc_heap_entry) {
.dev = dev_idx,
.gen = m.gen,
+ .replicas = 1 + g->ec_redundancy,
.fragmentation = bucket_sectors_used(m) * (1U << 15)
/ ca->mi.bucket_size,
.sectors = bucket_sectors_used(m),
@@ -196,11 +201,11 @@ static int bch2_copygc(struct bch_fs *c)
}
for (i = h->data; i < h->data + h->used; i++)
- sectors_to_move += i->sectors;
+ sectors_to_move += i->sectors * i->replicas;
while (sectors_to_move > sectors_reserved) {
BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
- sectors_to_move -= e.sectors;
+ sectors_to_move -= e.sectors * e.replicas;
}
buckets_to_move = h->used;
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index a0bbddeac623..cce6f58fe609 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -73,6 +73,7 @@ static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
{
if (__bch2_rebalance_pred(c, k, io_opts) >= 0) {
data_opts->target = io_opts->background_target;
+ data_opts->nr_replicas = 1;
data_opts->btree_insert_flags = 0;
return DATA_ADD_REPLICAS;
} else {