summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2023-10-20 16:31:57 +0300
committerAlexei Starovoitov <ast@kernel.org>2023-10-21 00:15:06 +0300
commitb460bc8302f222d346f0c15bba980eb8c36d6278 (patch)
tree73bd8effe56c8dcae3d9bfd166ff6772d5abb604 /mm
parent394e6869f0185e89cb815db29bf819474df858ae (diff)
downloadlinux-b460bc8302f222d346f0c15bba980eb8c36d6278.tar.xz
mm/percpu.c: introduce pcpu_alloc_size()
Introduce pcpu_alloc_size() to get the size of the dynamic per-cpu area. It will be used by bpf memory allocator in the following patches. BPF memory allocator maintains per-cpu area caches for multiple area sizes and its free API only has the to-be-freed per-cpu pointer, so it needs the size of dynamic per-cpu area to select the corresponding cache when bpf program frees the dynamic per-cpu pointer. Acked-by: Dennis Zhou <dennis@kernel.org> Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20231020133202.4043247-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index ea607078368d..60ed078e4cd0 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2245,6 +2245,37 @@ static void pcpu_balance_workfn(struct work_struct *work)
}
/**
+ * pcpu_alloc_size - the size of the dynamic percpu area
+ * @ptr: pointer to the dynamic percpu area
+ *
+ * Returns the size of the @ptr allocation. This is undefined for statically
+ * defined percpu variables as there is no corresponding chunk->bound_map.
+ *
+ * RETURNS:
+ * The size of the dynamic percpu area.
+ *
+ * CONTEXT:
+ * Can be called from atomic context.
+ */
+size_t pcpu_alloc_size(void __percpu *ptr)
+{
+ struct pcpu_chunk *chunk;
+ unsigned long bit_off, end;
+ void *addr;
+
+ if (!ptr)
+ return 0;
+
+ addr = __pcpu_ptr_to_addr(ptr);
+ /* No pcpu_lock here: ptr has not been freed, so chunk is still alive */
+ chunk = pcpu_chunk_addr_search(addr);
+ bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE;
+ end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
+ bit_off + 1);
+ return (end - bit_off) * PCPU_MIN_ALLOC_SIZE;
+}
+
+/**
* free_percpu - free percpu area
* @ptr: pointer to area to free
*