summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2023-02-11 00:16:01 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-02-17 07:43:51 +0300
commitcd0fc64e76844758b78d0fd376ae3ca4fd802049 (patch)
tree784b5d5893215d53e3bd3193e6a0ce82bb94a32a /lib
parent514d5c557b8b590a80f0569af5ae5f4d455ecef2 (diff)
downloadlinux-cd0fc64e76844758b78d0fd376ae3ca4fd802049.tar.xz
lib/stackdepot: annotate depot_init_pool and depot_alloc_stack
Clean up the exisiting comments and add new ones to depot_init_pool and depot_alloc_stack. As a part of the clean-up, remove mentions of which variable is accessed by smp_store_release and smp_load_acquire: it is clear as is from the code. Link: https://lkml.kernel.org/r/f80b02951364e6b40deda965b4003de0cd1a532d.1676063693.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/stackdepot.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index d4d988276b91..c4bc198c3d93 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -218,32 +218,39 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(stack_depot_init);
+/* Uses preallocated memory to initialize a new stack depot pool. */
static void depot_init_pool(void **prealloc)
{
/*
- * This smp_load_acquire() pairs with smp_store_release() to
- * |next_pool_inited| below and in depot_alloc_stack().
+ * smp_load_acquire() here pairs with smp_store_release() below and
+ * in depot_alloc_stack().
*/
if (smp_load_acquire(&next_pool_inited))
return;
+
+ /* Check if the current pool is not yet allocated. */
if (stack_pools[pool_index] == NULL) {
+ /* Use the preallocated memory for the current pool. */
stack_pools[pool_index] = *prealloc;
*prealloc = NULL;
} else {
- /* If this is the last depot pool, do not touch the next one. */
+ /*
+ * Otherwise, use the preallocated memory for the next pool
+ * as long as we do not exceed the maximum number of pools.
+ */
if (pool_index + 1 < DEPOT_MAX_POOLS) {
stack_pools[pool_index + 1] = *prealloc;
*prealloc = NULL;
}
/*
- * This smp_store_release pairs with smp_load_acquire() from
- * |next_pool_inited| above and in stack_depot_save().
+ * This smp_store_release pairs with smp_load_acquire() above
+ * and in stack_depot_save().
*/
smp_store_release(&next_pool_inited, 1);
}
}
-/* Allocation of a new stack in raw storage */
+/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
@@ -252,28 +259,35 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
+ /* Check if there is not enough space in the current pool. */
if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
+ /* Bail out if we reached the pool limit. */
if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
+
+ /* Move on to the next pool. */
pool_index++;
pool_offset = 0;
/*
- * smp_store_release() here pairs with smp_load_acquire() from
- * |next_pool_inited| in stack_depot_save() and
- * depot_init_pool().
+ * smp_store_release() here pairs with smp_load_acquire() in
+ * stack_depot_save() and depot_init_pool().
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_inited, 0);
}
+
+ /* Assign the preallocated memory to a pool if required. */
if (*prealloc)
depot_init_pool(prealloc);
+
+ /* Check if we have a pool to save the stack trace. */
if (stack_pools[pool_index] == NULL)
return NULL;
+ /* Save the stack trace. */
stack = stack_pools[pool_index] + pool_offset;
-
stack->hash = hash;
stack->size = size;
stack->handle.pool_index = pool_index;