summaryrefslogtreecommitdiff
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2022-11-30 05:02:05 +0300
committerJason A. Donenfeld <Jason@zx2c4.com>2022-12-04 16:37:08 +0300
commit39ec9e6b141e5a9d7274f40531888f890385a013 (patch)
tree3a08982293fe2b6380b160dda771d328419478c3 /drivers/char/random.c
parentb83e45fd065c3cfdb8cc0179bbddf296ce4d4fda (diff)
downloadlinux-39ec9e6b141e5a9d7274f40531888f890385a013.tar.xz
random: align entropy_timer_state to cache line
The theory behind the jitter dance is that multiple things are poking at the same cache line. This only works, however, if what's being poked at is actually all in the same cache line. Ensure this is the case by aligning the struct on the stack to the cache line size. We can't use ____cacheline_aligned on a stack variable, because gcc assumes 16 byte alignment when only 8 byte alignment is provided by the kernel, which means gcc could technically do something pathological like `(rsp & ~48) - 64`. It doesn't, but rather than risk it, just do the stack alignment manually with PTR_ALIGN and an oversized buffer. Fixes: 50ee7529ec45 ("random: try to actively add entropy rather than passively wait for it") Cc: Eric Biggers <ebiggers@kernel.org> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 36d29da31af0..e872acc1238f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1265,29 +1265,30 @@ static void __cold entropy_timer(struct timer_list *timer)
static void __cold try_to_generate_entropy(void)
{
enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
- struct entropy_timer_state stack;
+ u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
+ struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
- stack.entropy = random_get_entropy();
- if (stack.entropy != last)
+ stack->entropy = random_get_entropy();
+ if (stack->entropy != last)
++num_different;
- last = stack.entropy;
+ last = stack->entropy;
}
- stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
- if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
+ stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
+ if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
return;
- atomic_set(&stack.samples, 0);
- timer_setup_on_stack(&stack.timer, entropy_timer, 0);
+ atomic_set(&stack->samples, 0);
+ timer_setup_on_stack(&stack->timer, entropy_timer, 0);
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
* executing by checking try_to_del_timer_sync(), before queueing the next one.
*/
- if (!timer_pending(&stack.timer) && try_to_del_timer_sync(&stack.timer) >= 0) {
+ if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
struct cpumask timer_cpus;
unsigned int num_cpus;
@@ -1314,20 +1315,20 @@ static void __cold try_to_generate_entropy(void)
} while (cpu == smp_processor_id() && num_cpus > 1);
/* Expiring the timer at `jiffies` means it's the next tick. */
- stack.timer.expires = jiffies;
+ stack->timer.expires = jiffies;
- add_timer_on(&stack.timer, cpu);
+ add_timer_on(&stack->timer, cpu);
preempt_enable();
}
- mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
schedule();
- stack.entropy = random_get_entropy();
+ stack->entropy = random_get_entropy();
}
- mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
- del_timer_sync(&stack.timer);
- destroy_timer_on_stack(&stack.timer);
+ del_timer_sync(&stack->timer);
+ destroy_timer_on_stack(&stack->timer);
}