summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-09-10 03:10:11 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:12 +0300
commit5cfd69775eb5460ef78bb5034a37eb0dc52ab65d (patch)
tree26a5e49f14d153acbf243df39a75e38ffdc9f3d6
parenta9a7bbab1469f0c427f90c309720c543e37ab110 (diff)
downloadlinux-5cfd69775eb5460ef78bb5034a37eb0dc52ab65d.tar.xz
bcachefs: Array bounds fixes
It's no longer legal to use a zero size array as a flexible array member - this causes UBSAN to complain. This patch switches our zero size arrays to normal flexible array members when possible, and inserts casts in other places (e.g. where we use the zero size array as a marker partway through an array). Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/bcachefs_format.h66
-rw-r--r--fs/bcachefs/bkey.c2
-rw-r--r--fs/bcachefs/bkey.h6
-rw-r--r--fs/bcachefs/bkey_sort.h16
-rw-r--r--fs/bcachefs/bset.c13
-rw-r--r--fs/bcachefs/btree_io.c21
-rw-r--r--fs/bcachefs/vstructs.h6
7 files changed, 64 insertions, 66 deletions
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index f17238be494c..1cce2504bca6 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -723,7 +723,7 @@ struct bch_inode {
__le64 bi_hash_seed;
__le32 bi_flags;
__le16 bi_mode;
- __u8 fields[0];
+ __u8 fields[];
} __packed __aligned(8);
struct bch_inode_v2 {
@@ -733,7 +733,7 @@ struct bch_inode_v2 {
__le64 bi_hash_seed;
__le64 bi_flags;
__le16 bi_mode;
- __u8 fields[0];
+ __u8 fields[];
} __packed __aligned(8);
struct bch_inode_v3 {
@@ -745,7 +745,7 @@ struct bch_inode_v3 {
__le64 bi_sectors;
__le64 bi_size;
__le64 bi_version;
- __u8 fields[0];
+ __u8 fields[];
} __packed __aligned(8);
#define INODEv3_FIELDS_START_INITIAL 6
@@ -1097,20 +1097,20 @@ struct bch_reflink_v {
struct bch_val v;
__le64 refcount;
union bch_extent_entry start[0];
- __u64 _data[0];
+ __u64 _data[];
} __packed __aligned(8);
struct bch_indirect_inline_data {
struct bch_val v;
__le64 refcount;
- u8 data[0];
+ u8 data[];
};
/* Inline data */
struct bch_inline_data {
struct bch_val v;
- u8 data[0];
+ u8 data[];
};
/* Subvolumes: */
@@ -1223,7 +1223,7 @@ enum bch_sb_field_type {
struct bch_sb_field_journal {
struct bch_sb_field field;
- __le64 buckets[0];
+ __le64 buckets[];
};
struct bch_sb_field_journal_v2 {
@@ -1232,7 +1232,7 @@ struct bch_sb_field_journal_v2 {
struct bch_sb_field_journal_v2_entry {
__le64 start;
__le64 nr;
- } d[0];
+ } d[];
};
/* BCH_SB_FIELD_members: */
@@ -1279,7 +1279,7 @@ enum bch_member_state {
struct bch_sb_field_members {
struct bch_sb_field field;
- struct bch_member members[0];
+ struct bch_member members[];
};
/* BCH_SB_FIELD_crypt: */
@@ -1377,19 +1377,19 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[0];
+ __u8 devs[];
} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[0];
+ struct bch_replicas_entry_v0 entries[];
} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[0];
+ __u8 devs[];
} __packed;
#define replicas_entry_bytes(_i) \
@@ -1397,7 +1397,7 @@ struct bch_replicas_entry {
struct bch_sb_field_replicas {
struct bch_sb_field field;
- struct bch_replicas_entry entries[0];
+ struct bch_replicas_entry entries[];
} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
@@ -1432,7 +1432,7 @@ LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
struct bch_sb_field_disk_groups {
struct bch_sb_field field;
- struct bch_disk_group entries[0];
+ struct bch_disk_group entries[];
} __packed __aligned(8);
/* BCH_SB_FIELD_counters */
@@ -1525,7 +1525,7 @@ enum bch_persistent_counters {
struct bch_sb_field_counters {
struct bch_sb_field field;
- __le64 d[0];
+ __le64 d[];
};
/*
@@ -1539,10 +1539,8 @@ struct jset_entry {
__u8 type; /* designates what this jset holds */
__u8 pad[3];
- union {
- struct bkey_i start[0];
- __u64 _data[0];
- };
+ struct bkey_i start[0];
+ __u64 _data[];
};
struct bch_sb_field_clean {
@@ -1553,10 +1551,8 @@ struct bch_sb_field_clean {
__le16 _write_clock;
__le64 journal_seq;
- union {
- struct jset_entry start[0];
- __u64 _data[0];
- };
+ struct jset_entry start[0];
+ __u64 _data[];
};
struct journal_seq_blacklist_entry {
@@ -1567,10 +1563,8 @@ struct journal_seq_blacklist_entry {
struct bch_sb_field_journal_seq_blacklist {
struct bch_sb_field field;
- union {
- struct journal_seq_blacklist_entry start[0];
- __u64 _data[0];
- };
+ struct journal_seq_blacklist_entry start[0];
+ __u64 _data[];
};
/* Superblock: */
@@ -1706,10 +1700,8 @@ struct bch_sb {
struct bch_sb_layout layout;
- union {
- struct bch_sb_field start[0];
- __le64 _data[0];
- };
+ struct bch_sb_field start[0];
+ __le64 _data[];
} __packed __aligned(8);
/*
@@ -2186,10 +2178,8 @@ struct jset {
__le64 last_seq;
- union {
- struct jset_entry start[0];
- __u64 _data[0];
- };
+ struct jset_entry start[0];
+ __u64 _data[];
} __packed __aligned(8);
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
@@ -2294,10 +2284,8 @@ struct bset {
__le16 version;
__le16 u64s; /* count of d[] in u64s */
- union {
- struct bkey_packed start[0];
- __u64 _data[0];
- };
+ struct bkey_packed start[0];
+ __u64 _data[];
} __packed __aligned(8);
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
index 0a5bfe6e9a2d..a3abd9d2d176 100644
--- a/fs/bcachefs/bkey.c
+++ b/fs/bcachefs/bkey.c
@@ -127,7 +127,7 @@ static void pack_state_finish(struct pack_state *state,
struct bkey_packed *k)
{
EBUG_ON(state->p < k->_data);
- EBUG_ON(state->p >= k->_data + state->format->key_u64s);
+ EBUG_ON(state->p >= (u64 *) k->_data + state->format->key_u64s);
*state->p = state->w;
}
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index 51969a46265e..518450209236 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -52,7 +52,7 @@ struct bkey_s {
static inline struct bkey_i *bkey_next(struct bkey_i *k)
{
- return (struct bkey_i *) (k->_data + k->k.u64s);
+ return (struct bkey_i *) ((u64 *) k->_data + k->k.u64s);
}
#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
@@ -397,7 +397,7 @@ static inline void set_bkeyp_val_u64s(const struct bkey_format *format,
}
#define bkeyp_val(_format, _k) \
- ((struct bch_val *) ((_k)->_data + bkeyp_key_u64s(_format, _k)))
+ ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k)))
extern const struct bkey_format bch2_bkey_format_current;
@@ -732,7 +732,7 @@ static inline unsigned high_word_offset(const struct bkey_format *f)
#error edit for your odd byteorder.
#endif
-#define high_word(f, k) ((k)->_data + high_word_offset(f))
+#define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f))
#define next_word(p) nth_word(p, 1)
#define prev_word(p) nth_word(p, -1)
diff --git a/fs/bcachefs/bkey_sort.h b/fs/bcachefs/bkey_sort.h
index 79cf11d1b4e7..7c0f0b160f18 100644
--- a/fs/bcachefs/bkey_sort.h
+++ b/fs/bcachefs/bkey_sort.h
@@ -9,14 +9,24 @@ struct sort_iter {
struct sort_iter_set {
struct bkey_packed *k, *end;
- } data[MAX_BSETS + 1];
+ } data[];
};
-static inline void sort_iter_init(struct sort_iter *iter, struct btree *b)
+static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size)
{
iter->b = b;
iter->used = 0;
- iter->size = ARRAY_SIZE(iter->data);
+ iter->size = size;
+}
+
+struct sort_iter_stack {
+ struct sort_iter iter;
+ struct sort_iter_set sets[MAX_BSETS + 1];
+};
+
+static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b)
+{
+ sort_iter_init(&iter->iter, b, ARRAY_SIZE(iter->sets));
}
static inline void sort_iter_add(struct sort_iter *iter,
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index bcdf28f39b9c..685792137d2a 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -232,7 +232,7 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
{
struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
- struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
+ struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
#if 0
@@ -300,7 +300,8 @@ static unsigned bkey_float_byte_offset(unsigned idx)
}
struct ro_aux_tree {
- struct bkey_float f[0];
+ u8 nothing[0];
+ struct bkey_float f[];
};
struct rw_aux_tree {
@@ -476,7 +477,7 @@ static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
{
unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
- return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
+ return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
}
static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
@@ -1010,8 +1011,8 @@ void bch2_bset_insert(struct btree *b,
btree_keys_account_key_add(&b->nr, t - b->set, src);
if (src->u64s != clobber_u64s) {
- u64 *src_p = where->_data + clobber_u64s;
- u64 *dst_p = where->_data + src->u64s;
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
+ u64 *dst_p = (u64 *) where->_data + src->u64s;
EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
(int) clobber_u64s - src->u64s);
@@ -1037,7 +1038,7 @@ void bch2_bset_delete(struct btree *b,
unsigned clobber_u64s)
{
struct bset_tree *t = bset_tree_last(b);
- u64 *src_p = where->_data + clobber_u64s;
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
u64 *dst_p = where->_data;
bch2_bset_verify_rw_aux_tree(b, t);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index cba3c081b1d0..0edbb73a5ec8 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -292,7 +292,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
bool filter_whiteouts)
{
struct btree_node *out;
- struct sort_iter sort_iter;
+ struct sort_iter_stack sort_iter;
struct bset_tree *t;
struct bset *start_bset = bset(b, &b->set[start_idx]);
bool used_mempool = false;
@@ -301,13 +301,13 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
bool sorting_entire_node = start_idx == 0 &&
end_idx == b->nsets;
- sort_iter_init(&sort_iter, b);
+ sort_iter_stack_init(&sort_iter, b);
for (t = b->set + start_idx;
t < b->set + end_idx;
t++) {
u64s += le16_to_cpu(bset(b, t)->u64s);
- sort_iter_add(&sort_iter,
+ sort_iter_add(&sort_iter.iter,
btree_bkey_first(b, t),
btree_bkey_last(b, t));
}
@@ -320,7 +320,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
start_time = local_clock();
- u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
+ u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
out->keys.u64s = cpu_to_le16(u64s);
@@ -918,8 +918,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
b->written = 0;
iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
- sort_iter_init(iter, b);
- iter->size = (btree_blocks(c) + 1) * 2;
+ sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
if (bch2_meta_read_fault("btree"))
btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
@@ -1852,7 +1851,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
struct bset *i;
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
- struct sort_iter sort_iter;
+ struct sort_iter_stack sort_iter;
struct nonce nonce;
unsigned bytes_to_write, sectors_to_write, bytes, u64s;
u64 seq = 0;
@@ -1925,7 +1924,7 @@ do_write:
bch2_sort_whiteouts(c, b);
- sort_iter_init(&sort_iter, b);
+ sort_iter_stack_init(&sort_iter, b);
bytes = !b->written
? sizeof(struct btree_node)
@@ -1940,7 +1939,7 @@ do_write:
continue;
bytes += le16_to_cpu(i->u64s) * sizeof(u64);
- sort_iter_add(&sort_iter,
+ sort_iter_add(&sort_iter.iter,
btree_bkey_first(b, t),
btree_bkey_last(b, t));
seq = max(seq, le64_to_cpu(i->journal_seq));
@@ -1969,14 +1968,14 @@ do_write:
i->journal_seq = cpu_to_le64(seq);
i->u64s = 0;
- sort_iter_add(&sort_iter,
+ sort_iter_add(&sort_iter.iter,
unwritten_whiteouts_start(c, b),
unwritten_whiteouts_end(c, b));
SET_BSET_SEPARATE_WHITEOUTS(i, false);
b->whiteout_u64s = 0;
- u64s = bch2_sort_keys(i->start, &sort_iter, false);
+ u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
le16_add_cpu(&i->u64s, u64s);
BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
diff --git a/fs/bcachefs/vstructs.h b/fs/bcachefs/vstructs.h
index 53a694d71967..a6561b4b36a6 100644
--- a/fs/bcachefs/vstructs.h
+++ b/fs/bcachefs/vstructs.h
@@ -41,11 +41,11 @@
(round_up(vstruct_bytes(_s), 512 << (_sector_block_bits)) >> 9)
#define vstruct_next(_s) \
- ((typeof(_s)) ((_s)->_data + __vstruct_u64s(_s)))
+ ((typeof(_s)) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
#define vstruct_last(_s) \
- ((typeof(&(_s)->start[0])) ((_s)->_data + __vstruct_u64s(_s)))
+ ((typeof(&(_s)->start[0])) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
#define vstruct_end(_s) \
- ((void *) ((_s)->_data + __vstruct_u64s(_s)))
+ ((void *) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
#define vstruct_for_each(_s, _i) \
for (_i = (_s)->start; \