summaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-02-01 02:40:05 +0300
committerDaniel Borkmann <daniel@iogearbox.net>2019-02-01 22:55:38 +0300
commite16d2f1ab96849b4b65e64b82550a7ecdbf405eb (patch)
treec5bb1bfee577712f7a0c1312716b2fb573560c74 /kernel/bpf
parentd83525ca62cf8ebe3271d14c36fb900c294274a2 (diff)
downloadlinux-e16d2f1ab96849b4b65e64b82550a7ecdbf405eb.tar.xz
bpf: add support for bpf_spin_lock to cgroup local storage
Allow 'struct bpf_spin_lock' to reside inside cgroup local storage. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/local_storage.c2
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/bpf/verifier.c2
3 files changed, 6 insertions, 1 deletions
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 07a34ef562a0..0295427f06e2 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -147,6 +147,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return -ENOMEM;
memcpy(&new->data[0], value, map->value_size);
+ check_and_init_map_lock(map, new->data);
new = xchg(&storage->buf, new);
kfree_rcu(new, rcu);
@@ -483,6 +484,7 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
storage->buf = kmalloc_node(size, flags, map->numa_node);
if (!storage->buf)
goto enomem;
+ check_and_init_map_lock(map, storage->buf->data);
} else {
storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
if (!storage->percpu_buf)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ebf0a673cb83..b29e6dc44650 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -482,7 +482,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
if (map_value_has_spin_lock(map)) {
if (map->map_type != BPF_MAP_TYPE_HASH &&
- map->map_type != BPF_MAP_TYPE_ARRAY)
+ map->map_type != BPF_MAP_TYPE_ARRAY &&
+ map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
return -ENOTSUPP;
if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
map->value_size) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 38892bdee651..b63bc77af2d1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3089,6 +3089,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].map_ptr = meta.map_ptr;
if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
+ if (map_value_has_spin_lock(meta.map_ptr))
+ regs[BPF_REG_0].id = ++env->id_gen;
} else {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;