diff options
author | Hannes Frederic Sowa <hannes@stressinduktion.org> | 2013-11-11 12:20:34 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-11-11 14:32:14 -0500 |
commit | 4af712e8df998475736f3e2727701bd31e3751a9 (patch) | |
tree | 315b7092cf6ee0aeb3a180a09f8d628984cd9ddf /lib | |
parent | 6d31920246a9fc80be4f16acd27c0bbe8d7b8494 (diff) | |
download | lwn-4af712e8df998475736f3e2727701bd31e3751a9.tar.gz lwn-4af712e8df998475736f3e2727701bd31e3751a9.zip |
random32: add prandom_reseed_late() and call when nonblocking pool becomes initialized
The Tausworthe PRNG is initialized at late_initcall time. At that time the
entropy pool serving get_random_bytes is not filled sufficiently. This
patch adds an additional reseeding step as soon as the nonblocking pool
gets marked as initialized.
On some machines it might be possible that late_initcall gets called after
the pool has been initialized. In this situation we won't reseed again.
(A call to prandom_seed_late blocks later invocations of early reseed
attempts.)
Joint work with Daniel Borkmann.
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: "Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/random32.c | 23 |
1 files changed, 22 insertions, 1 deletions
diff --git a/lib/random32.c b/lib/random32.c index 12215df701e8..9f2f2fb03dfe 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -200,9 +200,18 @@ static void prandom_start_seed_timer(void) * Generate better values after random number generator * is fully initialized. */ -static int __init prandom_reseed(void) +static void __prandom_reseed(bool late) { int i; + unsigned long flags; + static bool latch = false; + static DEFINE_SPINLOCK(lock); + + /* only allow initial seeding (late == false) once */ + spin_lock_irqsave(&lock, flags); + if (latch && !late) + goto out; + latch = true; for_each_possible_cpu(i) { struct rnd_state *state = &per_cpu(net_rand_state,i); @@ -216,6 +225,18 @@ static int __init prandom_reseed(void) /* mix it in */ prandom_u32_state(state); } +out: + spin_unlock_irqrestore(&lock, flags); +} + +void prandom_reseed_late(void) +{ + __prandom_reseed(true); +} + +static int __init prandom_reseed(void) +{ + __prandom_reseed(false); prandom_start_seed_timer(); return 0; } |