summaryrefslogtreecommitdiff
path: root/fs/bcachefs/extents.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-12-15 05:59:33 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:50 +0300
commit35a067b42dcfd884fb132128ae94f240c6511fea (patch)
tree39ec0fe89492d3322900902ae40f3dacd54d8291 /fs/bcachefs/extents.c
parent3187aa8d57025f60f1b8f9e14b6fc33f5e2d2960 (diff)
downloadlinux-35a067b42dcfd884fb132128ae94f240c6511fea.tar.xz
bcachefs: Change when we allow overwrites
Originally, we'd check for -ENOSPC when getting a disk reservation whenever the new extent took up more space on disk than the old extent. Erasure coding screwed this up, because with erasure coding writes are initially replicated, and then in the background the extra replicas are dropped when the stripe is created. This means that with erasure coding enabled, writes will always take up more space on disk than the data they're overwriting - but, according to posix, overwrites aren't supposed to return ENOSPC. So, in this patch we fudge things: if the new extent has more replicas than the _effective_ replicas of the old extent, or if the old extent is compressed and the new one isn't, we check for ENOSPC when getting the disk reservation - otherwise, we don't. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/extents.c')
-rw-r--r--fs/bcachefs/extents.c32
1 files changed, 30 insertions, 2 deletions
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 7cdfd09d797e..a924cc66b4d0 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -664,7 +664,7 @@ bool bch2_bkey_is_incompressible(struct bkey_s_c k)
}
bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
- unsigned nr_replicas)
+ unsigned nr_replicas, bool compressed)
{
struct btree_trans trans;
struct btree_iter *iter;
@@ -682,7 +682,8 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
- if (nr_replicas > bch2_bkey_nr_ptrs_fully_allocated(k)) {
+ if (nr_replicas > bch2_bkey_replicas(c, k) ||
+ (!compressed && bch2_bkey_sectors_compressed(k))) {
ret = false;
break;
}
@@ -692,6 +693,33 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
return ret;
}
+unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned replicas = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if (p.ptr.cached)
+ continue;
+
+ if (p.has_ec) {
+ struct stripe *s =
+ genradix_ptr(&c->stripes[0], p.ec.idx);
+
+ WARN_ON(!s);
+ if (s)
+ replicas += s->nr_redundant;
+ }
+
+ replicas++;
+
+ }
+
+ return replicas;
+}
+
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
struct extent_ptr_decoded p)
{