summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-29 02:53:52 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-30 03:38:52 +0300
commit50ee7529ec4500c88f8664560770a7a1b65db72b (patch)
tree122866c9cd0b4e2425309dc4696c728682434ee1
parent4d856f72c10ecb060868ed10ff1b1453943fc6c8 (diff)
downloadlinux-50ee7529ec4500c88f8664560770a7a1b65db72b.tar.xz
random: try to actively add entropy rather than passively wait for it
For 5.3 we had to revert a nice ext4 IO pattern improvement, because it caused a bootup regression due to lack of entropy at bootup together with arguably broken user space that was asking for secure random numbers when it really didn't need to. See commit 72dbcf721566 (Revert "ext4: make __ext4_get_inode_loc plug"). This aims to solve the issue by actively generating entropy noise using the CPU cycle counter when waiting for the random number generator to initialize. This only works when you have a high-frequency time stamp counter available, but that's the case on all modern x86 CPU's, and on most other modern CPU's too. What we do is to generate jitter entropy from the CPU cycle counter under a somewhat complex load: calling the scheduler while also guaranteeing a certain amount of timing noise by also triggering a timer. I'm sure we can tweak this, and that people will want to look at other alternatives, but there's been a number of papers written on jitter entropy, and this should really be fairly conservative by crediting one bit of entropy for every timer-induced jump in the cycle counter. Not because the timer itself would be all that unpredictable, but because the interaction between the timer and the loop is going to be. Even if (and perhaps particularly if) the timer actually happens on another CPU, the cacheline interaction between the loop that reads the cycle counter and the timer itself firing is going to add perturbations to the cycle counter values that get mixed into the entropy pool. As Thomas pointed out, with a modern out-of-order CPU, even quite simple loops show a fair amount of hard-to-predict timing variability even in the absense of external interrupts. But this tries to take that further by actually having a fairly complex interaction. This is not going to solve the entropy issue for architectures that have no CPU cycle counter, but it's not clear how (and if) that is solvable, and the hardware in question is largely starting to be irrelevant. And by doing this we can at least avoid some of the even more contentious approaches (like making the entropy waiting time out in order to avoid the possibly unbounded waiting). Cc: Ahmed Darwish <darwish.07@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Nicholas Mc Guire <hofrat@opentech.at> Cc: Andy Lutomirski <luto@kernel.org> Cc: Kees Cook <keescook@chromium.org> Cc: Willy Tarreau <w@1wt.eu> Cc: Alexander E. Patrakov <patrakov@gmail.com> Cc: Lennart Poettering <mzxreary@0pointer.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/char/random.c62
1 files changed, 61 insertions, 1 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5d5ea4ce1442..2fda6166c1dd 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1731,6 +1731,56 @@ void get_random_bytes(void *buf, int nbytes)
}
EXPORT_SYMBOL(get_random_bytes);
+
+/*
+ * Each time the timer fires, we expect that we got an unpredictable
+ * jump in the cycle counter. Even if the timer is running on another
+ * CPU, the timer activity will be touching the stack of the CPU that is
+ * generating entropy..
+ *
+ * Note that we don't re-arm the timer in the timer itself - we are
+ * happy to be scheduled away, since that just makes the load more
+ * complex, but we do not want the timer to keep ticking unless the
+ * entropy loop is running.
+ *
+ * So the re-arming always happens in the entropy loop itself.
+ */
+static void entropy_timer(struct timer_list *t)
+{
+ credit_entropy_bits(&input_pool, 1);
+}
+
+/*
+ * If we have an actual cycle counter, see if we can
+ * generate enough entropy with timing noise
+ */
+static void try_to_generate_entropy(void)
+{
+ struct {
+ unsigned long now;
+ struct timer_list timer;
+ } stack;
+
+ stack.now = random_get_entropy();
+
+ /* Slow counter - or none. Don't even bother */
+ if (stack.now == random_get_entropy())
+ return;
+
+ timer_setup_on_stack(&stack.timer, entropy_timer, 0);
+ while (!crng_ready()) {
+ if (!timer_pending(&stack.timer))
+ mod_timer(&stack.timer, jiffies+1);
+ mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ schedule();
+ stack.now = random_get_entropy();
+ }
+
+ del_timer_sync(&stack.timer);
+ destroy_timer_on_stack(&stack.timer);
+ mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+}
+
/*
* Wait for the urandom pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
@@ -1745,7 +1795,17 @@ int wait_for_random_bytes(void)
{
if (likely(crng_ready()))
return 0;
- return wait_event_interruptible(crng_init_wait, crng_ready());
+
+ do {
+ int ret;
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ if (ret)
+ return ret > 0 ? 0 : ret;
+
+ try_to_generate_entropy();
+ } while (!crng_ready());
+
+ return 0;
}
EXPORT_SYMBOL(wait_for_random_bytes);