summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-04-05 20:59:18 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-05 20:59:18 +0300
commit31c0aa87ec8a30b1e9e4cf862905a369560f7705 (patch)
tree0bdbff8a5adf82925fff819bd1d12a9696387a49
parent9c94b39560c3a013de5886ea21ef1eaf21840cb9 (diff)
parentab9a7e27044b87ff2be47b8f8e095400e7fccc44 (diff)
downloadlinux-31c0aa87ec8a30b1e9e4cf862905a369560f7705.tar.xz
Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random
Pull /dev/random updates from Ted Ts'o: - Improve getrandom and /dev/random's support for those arm64 architecture variants that have RNG instructions. - Use batched output from CRNG instead of CPU's RNG instructions for better performance. - Miscellaneous bug fixes. * tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random: random: avoid warnings for !CONFIG_NUMA builds random: fix data races at timer_rand_state random: always use batched entropy for get_random_u{32,64} random: Make RANDOM_TRUST_CPU depend on ARCH_RANDOM arm64: add credited/trusted RNG support random: add arch_get_random_*long_early() random: split primary/secondary crng init paths
-rw-r--r--arch/arm64/include/asm/archrandom.h14
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/random.c84
-rw-r--r--include/linux/random.h22
4 files changed, 87 insertions, 35 deletions
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 3fe02da70004..fc1594a0710e 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -4,6 +4,8 @@
#ifdef CONFIG_ARCH_RANDOM
+#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/random.h>
#include <asm/cpufeature.h>
@@ -66,6 +68,18 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
}
+static inline bool __init __must_check
+arch_get_random_seed_long_early(unsigned long *v)
+{
+ WARN_ON(system_state != SYSTEM_BOOTING);
+
+ if (!__early_cpu_has_rndr())
+ return false;
+
+ return __arm64_rndr(v);
+}
+#define arch_get_random_seed_long_early arch_get_random_seed_long_early
+
#else
static inline bool __arm64_rndr(unsigned long *v) { return false; }
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fea084e0909b..d4665fe9ccd2 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -474,7 +474,7 @@ endmenu
config RANDOM_TRUST_CPU
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
- depends on X86 || S390 || PPC
+ depends on ARCH_RANDOM
default n
help
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c7f9584de2c8..0d10e31fd342 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -781,27 +781,55 @@ static int __init parse_trust_cpu(char *arg)
}
early_param("random.trust_cpu", parse_trust_cpu);
-static void crng_initialize(struct crng_state *crng)
+static bool crng_init_try_arch(struct crng_state *crng)
{
int i;
- int arch_init = 1;
+ bool arch_init = true;
unsigned long rv;
- memcpy(&crng->state[0], "expand 32-byte k", 16);
- if (crng == &primary_crng)
- _extract_entropy(&input_pool, &crng->state[4],
- sizeof(__u32) * 12, 0);
- else
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv)) {
rv = random_get_entropy();
- arch_init = 0;
+ arch_init = false;
+ }
+ crng->state[i] ^= rv;
+ }
+
+ return arch_init;
+}
+
+static bool __init crng_init_try_arch_early(struct crng_state *crng)
+{
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
+
+ for (i = 4; i < 16; i++) {
+ if (!arch_get_random_seed_long_early(&rv) &&
+ !arch_get_random_long_early(&rv)) {
+ rv = random_get_entropy();
+ arch_init = false;
}
crng->state[i] ^= rv;
}
- if (trust_cpu && arch_init && crng == &primary_crng) {
+
+ return arch_init;
+}
+
+static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+{
+ memcpy(&crng->state[0], "expand 32-byte k", 16);
+ _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ crng_init_try_arch(crng);
+ crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+}
+
+static void __init crng_initialize_primary(struct crng_state *crng)
+{
+ memcpy(&crng->state[0], "expand 32-byte k", 16);
+ _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
+ if (crng_init_try_arch_early(crng) && trust_cpu) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
@@ -822,7 +850,7 @@ static void do_numa_crng_init(struct work_struct *work)
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
spin_lock_init(&crng->lock);
- crng_initialize(crng);
+ crng_initialize_secondary(crng);
pool[i] = crng;
}
mb();
@@ -1142,14 +1170,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - state->last_time;
- state->last_time = sample.jiffies;
+ delta = sample.jiffies - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, sample.jiffies);
- delta2 = delta - state->last_delta;
- state->last_delta = delta;
+ delta2 = delta - READ_ONCE(state->last_delta);
+ WRITE_ONCE(state->last_delta, delta);
- delta3 = delta2 - state->last_delta2;
- state->last_delta2 = delta2;
+ delta3 = delta2 - READ_ONCE(state->last_delta2);
+ WRITE_ONCE(state->last_delta2, delta2);
if (delta < 0)
delta = -delta;
@@ -1771,7 +1799,7 @@ static void __init init_std_data(struct entropy_store *r)
int __init rand_initialize(void)
{
init_std_data(&input_pool);
- crng_initialize(&primary_crng);
+ crng_initialize_primary(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
urandom_warning.interval = 0;
@@ -2149,11 +2177,11 @@ struct batched_entropy {
/*
* Get a random word for internal kernel use only. The quality of the random
- * number is either as good as RDRAND or as good as /dev/urandom, with the
- * goal of being quite fast and not depleting entropy. In order to ensure
+ * number is good as /dev/urandom, but there is no backtrack protection, with
+ * the goal of being quite fast and not depleting entropy. In order to ensure
* that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
+ * wait_for_random_bytes() should be called and return 0 at least once at any
+ * point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
@@ -2166,15 +2194,6 @@ u64 get_random_u64(void)
struct batched_entropy *batch;
static void *previous;
-#if BITS_PER_LONG == 64
- if (arch_get_random_long((unsigned long *)&ret))
- return ret;
-#else
- if (arch_get_random_long((unsigned long *)&ret) &&
- arch_get_random_long((unsigned long *)&ret + 1))
- return ret;
-#endif
-
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u64);
@@ -2199,9 +2218,6 @@ u32 get_random_u32(void)
struct batched_entropy *batch;
static void *previous;
- if (arch_get_random_int(&ret))
- return ret;
-
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u32);
diff --git a/include/linux/random.h b/include/linux/random.h
index d319f9a1e429..45e1f8fa742b 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -7,6 +7,8 @@
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/once.h>
@@ -185,6 +187,26 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
}
#endif
+/*
+ * Called from the boot CPU during startup; not valid to call once
+ * secondary CPUs are up and preemption is possible.
+ */
+#ifndef arch_get_random_seed_long_early
+static inline bool __init arch_get_random_seed_long_early(unsigned long *v)
+{
+ WARN_ON(system_state != SYSTEM_BOOTING);
+ return arch_get_random_seed_long(v);
+}
+#endif
+
+#ifndef arch_get_random_long_early
+static inline bool __init arch_get_random_long_early(unsigned long *v)
+{
+ WARN_ON(system_state != SYSTEM_BOOTING);
+ return arch_get_random_long(v);
+}
+#endif
+
/* Pseudo random number generator from numerical recipes. */
static inline u32 next_pseudo_random32(u32 seed)
{