[v5,2/3] random: introduce generic vDSO getrandom() implementation
Commit Message
Provide a generic C vDSO getrandom() implementation, which operates on
an opaque state returned by vgetrandom_alloc() and produces random bytes
the same way as getrandom(). This has a the API signature:
ssize_t vgetrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state);
The return value and the first 3 arguments are the same as ordinary
getrandom(), while the last argument is a pointer to the opaque
allocated state. Were all four arguments passed to the getrandom()
syscall, nothing different would happen, and the functions would have
the exact same behavior.
The actual vDSO RNG algorithm implemented is the same one implemented by
drivers/char/random.c, using the same fast-erasure techniques as that.
Should the in-kernel implementation change, so too will the vDSO one.
Initially, the state is keyless, and so the first call makes a
getrandom() syscall to generate that key, and then uses it for
subsequent calls. By keeping track of a generation counter, it knows
when its key is invalidated and it should fetch a new one using the
syscall. Later, more than just a generation counter might be used.
Since MADV_WIPEONFORK is set on the opaque state, the key and related
state is wiped during a fork(), so secrets don't roll over into new
processes, and the same state doesn't accidentally generate the same
random stream. The generation counter, as well, is always >0, so that
the 0 counter is a useful indication of a fork() or otherwise
uninitialized state.
If the kernel RNG is not yet initialized, then the vDSO always calls the
syscall, because that behavior cannot be emulated in userspace, but
fortunately that state is short lived and only during early boot. If it
has been initialized, then there is no need to inspect the `flags`
argument, because the behavior does not change post-initialization
regardless of the `flags` value.
Together with the previous commit that introduces vgetrandom_alloc(),
this functionality is intended to be integrated into libc's thread
management. As an illustrative example, the following code might be used
to do the same outside of libc. All of the static functions are to be
considered implementation private, including the vgetrandom_alloc()
syscall wrapper, which generally shouldn't be exposed outside of libc,
with the non-static vgetrandom() function at the end being the exported
interface. The various pthread-isms are expected to be elided into libc
internals. This per-thread allocation scheme is very naive and does not
shrink; other implementations may choose to be more complex.
static void *vgetrandom_alloc(size_t *num, size_t *size_per_each, unsigned int flags)
{
unsigned long ret = syscall(__NR_vgetrandom_alloc, num, size_per_each, flags);
return ret > -4096UL ? NULL : (void *)ret;
}
static struct {
pthread_mutex_t lock;
void **states;
size_t len, cap;
} grnd_allocator = {
.lock = PTHREAD_MUTEX_INITIALIZER
};
static void *vgetrandom_get_state(void)
{
void *state = NULL;
pthread_mutex_lock(&grnd_allocator.lock);
if (!grnd_allocator.len) {
size_t new_cap, size_per_each, num = 16; /* Just a hint. */
void *new_block = vgetrandom_alloc(&num, &size_per_each, 0), *new_states;
if (!new_block)
goto out;
new_cap = grnd_allocator.cap + num;
new_states = reallocarray(grnd_allocator.states, new_cap, sizeof(*grnd_allocator.states));
if (!new_states) {
munmap(new_block, num * size_per_each);
goto out;
}
grnd_allocator.cap = new_cap;
grnd_allocator.states = new_states;
for (size_t i = 0; i < num; ++i) {
grnd_allocator.states[i] = new_block;
new_block += size_per_each;
}
grnd_allocator.len = num;
}
state = grnd_allocator.states[--grnd_allocator.len];
out:
pthread_mutex_unlock(&grnd_allocator.lock);
return state;
}
static void vgetrandom_put_state(void *state)
{
if (!state)
return;
pthread_mutex_lock(&grnd_allocator.lock);
grnd_allocator.states[grnd_allocator.len++] = state;
pthread_mutex_unlock(&grnd_allocator.lock);
}
static struct {
ssize_t(*fn)(void *buf, size_t len, unsigned long flags, void *state);
pthread_key_t key;
pthread_once_t initialized;
} grnd_ctx = {
.initialized = PTHREAD_ONCE_INIT
};
static void vgetrandom_init(void)
{
if (pthread_key_create(&grnd_ctx.key, vgetrandom_put_state) != 0)
return;
grnd_ctx.fn = __vdsosym("LINUX_2.6", "__vdso_getrandom");
}
ssize_t vgetrandom(void *buf, size_t len, unsigned long flags)
{
void *state;
pthread_once(&grnd_ctx.initialized, vgetrandom_init);
if (!grnd_ctx.fn)
return getrandom(buf, len, flags);
state = pthread_getspecific(grnd_ctx.key);
if (!state) {
state = vgetrandom_get_state();
if (pthread_setspecific(grnd_ctx.key, state) != 0) {
vgetrandom_put_state(state);
state = NULL;
}
if (!state)
return getrandom(buf, len, flags);
}
return grnd_ctx.fn(buf, len, flags, state);
}
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
MAINTAINERS | 1 +
drivers/char/random.c | 5 ++
include/vdso/datapage.h | 6 +++
lib/crypto/chacha.c | 4 ++
lib/vdso/Kconfig | 5 ++
lib/vdso/getrandom.c | 115 ++++++++++++++++++++++++++++++++++++++++
6 files changed, 136 insertions(+)
create mode 100644 lib/vdso/getrandom.c
Comments
On Sat, Nov 19, 2022 at 01:09:28PM +0100, Jason A. Donenfeld wrote:
> diff --git a/drivers/char/random.c b/drivers/char/random.c
> index ab6e02efa432..7dfdbf424c92 100644
> --- a/drivers/char/random.c
> +++ b/drivers/char/random.c
> @@ -61,6 +61,7 @@
> #include <asm/irq.h>
> #include <asm/irq_regs.h>
> #include <asm/io.h>
> +#include <vdso/datapage.h>
> #include "../../lib/vdso/getrandom.h"
>
> /*********************************************************************
> @@ -307,6 +308,8 @@ static void crng_reseed(struct work_struct *work)
> if (next_gen == ULONG_MAX)
> ++next_gen;
> WRITE_ONCE(base_crng.generation, next_gen);
> + if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
> + smp_store_release(&_vdso_rng_data.generation, next_gen + 1);
Is the purpose of the smp_store_release() here to order the writes of
base_crng.generation and _vdso_rng_data.generation? It could use a comment.
> if (!static_branch_likely(&crng_is_ready))
> crng_init = CRNG_READY;
> spin_unlock_irqrestore(&base_crng.lock, flags);
> @@ -756,6 +759,8 @@ static void __cold _credit_init_bits(size_t bits)
> crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
> if (static_key_initialized)
> execute_in_process_context(crng_set_ready, &set_ready);
> + if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
> + smp_store_release(&_vdso_rng_data.is_ready, true);
Similarly, is the purpose of this smp_store_release() to order the writes to the
the generation counters and is_ready? It could use a comment.
> diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
> new file mode 100644
> index 000000000000..8bef1e92a79d
> --- /dev/null
> +++ b/lib/vdso/getrandom.c
> @@ -0,0 +1,115 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
> + */
> +
> +#include <linux/kernel.h>
> +#include <linux/atomic.h>
> +#include <linux/fs.h>
> +#include <vdso/datapage.h>
> +#include <asm/vdso/getrandom.h>
> +#include <asm/vdso/vsyscall.h>
> +#include "getrandom.h"
> +
> +#undef memcpy
> +#define memcpy(d,s,l) __builtin_memcpy(d,s,l)
> +#undef memset
> +#define memset(d,c,l) __builtin_memset(d,c,l)
> +
> +#define CHACHA_FOR_VDSO_INCLUDE
> +#include "../crypto/chacha.c"
> +
> +static void memcpy_and_zero(void *dst, void *src, size_t len)
> +{
> +#define CASCADE(type) \
> + while (len >= sizeof(type)) { \
> + *(type *)dst = *(type *)src; \
> + *(type *)src = 0; \
> + dst += sizeof(type); \
> + src += sizeof(type); \
> + len -= sizeof(type); \
> + }
> +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> +#if BITS_PER_LONG == 64
> + CASCADE(u64);
> +#endif
> + CASCADE(u32);
> + CASCADE(u16);
> +#endif
> + CASCADE(u8);
> +#undef CASCADE
> +}
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS doesn't mean that dereferencing
misaligned pointers is okay. You still need to use get_unaligned() and
put_unaligned(). Take a look at crypto_xor(), for example.
> +static __always_inline ssize_t
> +__cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_t len,
> + unsigned int flags, void *opaque_state)
> +{
> + ssize_t ret = min_t(size_t, MAX_RW_COUNT, len);
> + struct vgetrandom_state *state = opaque_state;
> + u32 chacha_state[CHACHA_STATE_WORDS];
> + unsigned long current_generation;
> + size_t batch_len;
> +
> + if (unlikely(!rng_info->is_ready))
> + return getrandom_syscall(buffer, len, flags);
> +
> + if (unlikely(!len))
> + return 0;
> +
> +retry_generation:
> + current_generation = READ_ONCE(rng_info->generation);
> + if (unlikely(state->generation != current_generation)) {
> + if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key))
> + return getrandom_syscall(buffer, len, flags);
> + WRITE_ONCE(state->generation, current_generation);
> + state->pos = sizeof(state->batch);
> + }
> +
> + len = ret;
> +more_batch:
> + batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
> + if (batch_len) {
> + memcpy_and_zero(buffer, state->batch + state->pos, batch_len);
> + state->pos += batch_len;
> + buffer += batch_len;
> + len -= batch_len;
> + }
> + if (!len) {
> + /*
> + * Since rng_info->generation will never be 0, we re-read state->generation,
> + * rather than using the local current_generation variable, to learn whether
> + * we forked.
> + */
> + if (unlikely(READ_ONCE(state->generation) != READ_ONCE(rng_info->generation))) {
> + buffer -= ret;
> + goto retry_generation;
> + }
> + return ret;
> + }
> +
> + chacha_init_consts(chacha_state);
> + memcpy(&chacha_state[4], state->key, CHACHA_KEY_SIZE);
> + memset(&chacha_state[12], 0, sizeof(u32) * 4);
> +
> + while (len >= CHACHA_BLOCK_SIZE) {
> + chacha20_block(chacha_state, buffer);
> + if (unlikely(chacha_state[12] == 0))
> + ++chacha_state[13];
> + buffer += CHACHA_BLOCK_SIZE;
> + len -= CHACHA_BLOCK_SIZE;
> + }
> +
> + chacha20_block(chacha_state, state->key_batch);
> + if (unlikely(chacha_state[12] == 0))
> + ++chacha_state[13];
> + chacha20_block(chacha_state, state->key_batch + CHACHA_BLOCK_SIZE);
> + state->pos = 0;
> + memzero_explicit(chacha_state, sizeof(chacha_state));
> + goto more_batch;
> +}
There's a lot of subtle stuff going on here. Adding some more comments would be
helpful. Maybe bring in some of the explanation that's currently only in the
commit message.
One question I have is about forking. So, when a thread calls fork(), in the
child the kernel automatically replaces all vgetrandom_state pages with zeroed
pages (due to MADV_WIPEONFORK). If the child calls __cvdso_getrandom_data()
afterwards, it sees the zeroed state. But that's indistinguishable from the
state at the very beginning, after sys_vgetrandom_alloc() was just called,
right? So as long as this code handles initializing the state at the beginning,
then I'd think it would naturally handle fork() as well.
However, it seems you have something a bit more subtle in mind, where the thread
calls fork() *while* it's in the middle of __cvdso_getrandom_data(). I guess
you are thinking of the case where a signal is sent to the thread while it's
executing __cvdso_getrandom_data(), and then the signal handler calls fork()?
Note that it doesn't matter if a different thread in the *process* calls fork().
If it's possible for the thread to fork() (and hence for the vgetrandom_state to
be zeroed) at absolutely any time, it probably would be a good idea to mark that
whole struct as volatile.
- Eric
Hi Eric,
On Sat, Nov 19, 2022 at 03:10:12PM -0800, Eric Biggers wrote:
> > + if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
> > + smp_store_release(&_vdso_rng_data.generation, next_gen + 1);
>
> Is the purpose of the smp_store_release() here to order the writes of
> base_crng.generation and _vdso_rng_data.generation? It could use a comment.
>
> > + if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
> > + smp_store_release(&_vdso_rng_data.is_ready, true);
>
> Similarly, is the purpose of this smp_store_release() to order the writes to the
> the generation counters and is_ready? It could use a comment.
Yes, I guess so. Actually this comes from an unexplored IRC comment from
Andy back in July:
2022-07-29 21:21:56 <amluto> zx2c4: WRITE_ONCE(_vdso_rng_data.generation, next_gen + 1);
2022-07-29 21:22:23 <amluto> For x86 it shouldn’t matter much. For portability, smp_store_release
Though maybe that doesn't actually matter much? When the userspace CPU
learns about a change to vdso_rng_data, it's only course of action is
make a syscall to getrandom(), anyway, and those paths should be
consistent with themselves, thanks to the same locking and
synchronization there's always been there. So maybe I actually should
move back to WRITE_ONCE() here? Hm?
> > +static void memcpy_and_zero(void *dst, void *src, size_t len)
> > +{
> > +#define CASCADE(type) \
> > + while (len >= sizeof(type)) { \
> > + *(type *)dst = *(type *)src; \
> > + *(type *)src = 0; \
> > + dst += sizeof(type); \
> > + src += sizeof(type); \
> > + len -= sizeof(type); \
> > + }
> > +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> > +#if BITS_PER_LONG == 64
> > + CASCADE(u64);
> > +#endif
> > + CASCADE(u32);
> > + CASCADE(u16);
> > +#endif
> > + CASCADE(u8);
> > +#undef CASCADE
> > +}
>
> CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS doesn't mean that dereferencing
> misaligned pointers is okay. You still need to use get_unaligned() and
> put_unaligned(). Take a look at crypto_xor(), for example.
Right, thanks. Will do.
> There's a lot of subtle stuff going on here. Adding some more comments would be
> helpful. Maybe bring in some of the explanation that's currently only in the
> commit message.
Good idea.
> One question I have is about forking. So, when a thread calls fork(), in the
> child the kernel automatically replaces all vgetrandom_state pages with zeroed
> pages (due to MADV_WIPEONFORK). If the child calls __cvdso_getrandom_data()
> afterwards, it sees the zeroed state. But that's indistinguishable from the
> state at the very beginning, after sys_vgetrandom_alloc() was just called,
> right? So as long as this code handles initializing the state at the beginning,
> then I'd think it would naturally handle fork() as well.
Right, for this simple fork() case, it works fine. There are other cases
though that are trickier...
> However, it seems you have something a bit more subtle in mind, where the thread
> calls fork() *while* it's in the middle of __cvdso_getrandom_data(). I guess
> you are thinking of the case where a signal is sent to the thread while it's
> executing __cvdso_getrandom_data(), and then the signal handler calls fork()?
> Note that it doesn't matter if a different thread in the *process* calls fork().
>
> If it's possible for the thread to fork() (and hence for the vgetrandom_state to
> be zeroed) at absolutely any time, it probably would be a good idea to mark that
> whole struct as volatile.
Actually, this isn't something that matters, I don't think. If
state->key_batch is zeroed, the result will be wrong, but the function
logic will be fine. If state->pos is zeroed, it'll write to the
beginning of the batch, which might be wrong, but the function logic
will still be fine. That is, in both of these cases, even if the
calculation is wrong, there's no memory corruption or anything. So then,
the remaining member is state->generation. If this is zeroed, then it's
actually something we detect with that READ_ONCE()! And in this case,
it's a sign that something is off -- we forked -- and so we should start
over from the beginning. So I don't think there's a reason to mark the
whole struct as volatile. The one we care about is state->generation,
and for that we READ_ONCE() it at the place that matters.
There's actually a different scenario, though, that I'm concerned about,
and this is the case in which a multithreaded program forks in the
middle of one of its threads running this. Indeed, only the calling
thread will carry forward into the child process, but all the memory is
still left around from any concurrent threads in the middle of
vgetrandom(). And if they're in the middle of a vgetrandom() call, that
means they haven't yet done erasure and cleaned up the stack to prevent
their state from leaking, and so forward secrecy is potentially lost,
since the child process now has some state from the parent.
I'm not quite sure what the best approach here is. One idea would be to
just note that libcs should wait until vgetrandom() has returned
everywhere before forking, using its atfork functionality. Another
approach would be to say that multithreaded programs using this
shouldn't fork or something, but that seems disappointing. Or more state
could be allocated in the zeroing region, to hold a chacha state, so
another 64 bytes, which would be sort of unfortunate. Or something else?
I'd be interested to hear your impression of this quandary.
Jason
On Sun, Nov 20, 2022 at 01:53:53AM +0100, Jason A. Donenfeld wrote:
> I'm not quite sure what the best approach here is. One idea would be to
> just note that libcs should wait until vgetrandom() has returned
> everywhere before forking, using its atfork functionality.
To elaborate on this idea a bit, the way this looks is:
rwlock_t l;
pid_t fork(void)
{
pid_t pid;
write_lock(&l);
pid = syscall_fork();
write_unlock(&l);
return pid;
}
ssize_t getrandom(...)
{
ssize_t ret;
...
if (!read_try_lock(&l))
return syscall_getrandom(...);
ret = vdso_getrandom(...);
read_unlock(&l);
return ret;
}
So maybe that doesn't seem that bad, especially considering libc already
has the kind of infrastructure in place to do that somewhat easily.
Maybe there's a priority locking thing to get right here -- the writer
should immediately starve out all future readers, so it's not unbound --
but that seems par for the course.
Jason
On Sun, Nov 20, 2022 at 01:53:53AM +0100, Jason A. Donenfeld wrote:
> shouldn't fork or something, but that seems disappointing. Or more state
> could be allocated in the zeroing region, to hold a chacha state, so
> another 64 bytes, which would be sort of unfortunate. Or something else?
> I'd be interested to hear your impression of this quandary.
Another 128 bytes, actually. And the current chacha in there isn't
cleaning up its stack as one might hope. So maybe the cleanest solution
would be to just bite the bullet and allocate another 128 bytes per
state and make a mini chacha that operates over that? (And I guess hope
it doesn't need to spill and such...)
Jason
On Sun, Nov 20, 2022 at 01:53:53AM +0100, Jason A. Donenfeld wrote:
> Hi Eric,
>
> On Sat, Nov 19, 2022 at 03:10:12PM -0800, Eric Biggers wrote:
> > > + if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
> > > + smp_store_release(&_vdso_rng_data.generation, next_gen + 1);
> >
> > Is the purpose of the smp_store_release() here to order the writes of
> > base_crng.generation and _vdso_rng_data.generation? It could use a comment.
> >
> > > + if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
> > > + smp_store_release(&_vdso_rng_data.is_ready, true);
> >
> > Similarly, is the purpose of this smp_store_release() to order the writes to the
> > the generation counters and is_ready? It could use a comment.
>
> Yes, I guess so. Actually this comes from an unexplored IRC comment from
> Andy back in July:
>
> 2022-07-29 21:21:56 <amluto> zx2c4: WRITE_ONCE(_vdso_rng_data.generation, next_gen + 1);
> 2022-07-29 21:22:23 <amluto> For x86 it shouldn’t matter much. For portability, smp_store_release
>
> Though maybe that doesn't actually matter much? When the userspace CPU
> learns about a change to vdso_rng_data, it's only course of action is
> make a syscall to getrandom(), anyway, and those paths should be
> consistent with themselves, thanks to the same locking and
> synchronization there's always been there. So maybe I actually should
> move back to WRITE_ONCE() here? Hm?
Well, sys_getrandom() will just do:
if (unlikely(crng->generation != READ_ONCE(base_crng.generation)))
So I think you do need ordering between base_crng.generation and
_vdso_rng_data.generation. If _vdso_rng_data.generation is changed, the change
in base_crng.generation needs to be visible too.
> > One question I have is about forking. So, when a thread calls fork(), in the
> > child the kernel automatically replaces all vgetrandom_state pages with zeroed
> > pages (due to MADV_WIPEONFORK). If the child calls __cvdso_getrandom_data()
> > afterwards, it sees the zeroed state. But that's indistinguishable from the
> > state at the very beginning, after sys_vgetrandom_alloc() was just called,
> > right? So as long as this code handles initializing the state at the beginning,
> > then I'd think it would naturally handle fork() as well.
>
> Right, for this simple fork() case, it works fine. There are other cases
> though that are trickier...
>
> > However, it seems you have something a bit more subtle in mind, where the thread
> > calls fork() *while* it's in the middle of __cvdso_getrandom_data(). I guess
> > you are thinking of the case where a signal is sent to the thread while it's
> > executing __cvdso_getrandom_data(), and then the signal handler calls fork()?
> > Note that it doesn't matter if a different thread in the *process* calls fork().
> >
> > If it's possible for the thread to fork() (and hence for the vgetrandom_state to
> > be zeroed) at absolutely any time, it probably would be a good idea to mark that
> > whole struct as volatile.
>
> Actually, this isn't something that matters, I don't think. If
> state->key_batch is zeroed, the result will be wrong, but the function
> logic will be fine. If state->pos is zeroed, it'll write to the
> beginning of the batch, which might be wrong, but the function logic
> will still be fine. That is, in both of these cases, even if the
> calculation is wrong, there's no memory corruption or anything. So then,
> the remaining member is state->generation. If this is zeroed, then it's
> actually something we detect with that READ_ONCE()! And in this case,
> it's a sign that something is off -- we forked -- and so we should start
> over from the beginning. So I don't think there's a reason to mark the
> whole struct as volatile. The one we care about is state->generation,
> and for that we READ_ONCE() it at the place that matters.
It's undefined behavior for C code to be working on values that can be mutated
underneath it, though, unless they are volatile. Granted, people still do this
all the time, but I'd hope we can be a bit more careful here...
> There's actually a different scenario, though, that I'm concerned about,
> and this is the case in which a multithreaded program forks in the
> middle of one of its threads running this. Indeed, only the calling
> thread will carry forward into the child process, but all the memory is
> still left around from any concurrent threads in the middle of
> vgetrandom(). And if they're in the middle of a vgetrandom() call, that
> means they haven't yet done erasure and cleaned up the stack to prevent
> their state from leaking, and so forward secrecy is potentially lost,
> since the child process now has some state from the parent.
That is a separate problem though, right? It does *not* mean that the
vgetrandom_state can be zeroed out from underneath __cvdso_getrandom_data().
- Eric
On Sun, Nov 20, 2022 at 02:43:31AM +0100, Jason A. Donenfeld wrote:
> On Sun, Nov 20, 2022 at 01:53:53AM +0100, Jason A. Donenfeld wrote:
> > shouldn't fork or something, but that seems disappointing. Or more state
> > could be allocated in the zeroing region, to hold a chacha state, so
> > another 64 bytes, which would be sort of unfortunate. Or something else?
> > I'd be interested to hear your impression of this quandary.
>
> Another 128 bytes, actually. And the current chacha in there isn't
> cleaning up its stack as one might hope. So maybe the cleanest solution
> would be to just bite the bullet and allocate another 128 bytes per
> state and make a mini chacha that operates over that? (And I guess hope
> it doesn't need to spill and such...)
I've got it implemented without using any stack now. Wasn't so bad. So
all of the additional concerns I added will be addressed in v+1.
Jason
On Sun, Nov 20, 2022 at 02:04:49AM +0100, Jason A. Donenfeld wrote:
> On Sun, Nov 20, 2022 at 01:53:53AM +0100, Jason A. Donenfeld wrote:
> > I'm not quite sure what the best approach here is. One idea would be to
> > just note that libcs should wait until vgetrandom() has returned
> > everywhere before forking, using its atfork functionality.
>
> To elaborate on this idea a bit, the way this looks is:
>
> rwlock_t l;
> pid_t fork(void)
> {
> pid_t pid;
> write_lock(&l);
> pid = syscall_fork();
> write_unlock(&l);
> return pid;
> }
> ssize_t getrandom(...)
> {
> ssize_t ret;
> ...
> if (!read_try_lock(&l))
> return syscall_getrandom(...);
> ret = vdso_getrandom(...);
> read_unlock(&l);
> return ret;
> }
>
> So maybe that doesn't seem that bad, especially considering libc already
> has the kind of infrastructure in place to do that somewhat easily.
> Maybe there's a priority locking thing to get right here -- the writer
> should immediately starve out all future readers, so it's not unbound --
> but that seems par for the course.
Fortunately none of this was necessary, and I've got things implemented
without needing to resort to that, for v+1.
Jason
@@ -17287,6 +17287,7 @@ T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git
S: Maintained
F: drivers/char/random.c
F: drivers/virt/vmgenid.c
+F: lib/vdso/getrandom.c
F: lib/vdso/getrandom.h
RAPIDIO SUBSYSTEM
@@ -61,6 +61,7 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
+#include <vdso/datapage.h>
#include "../../lib/vdso/getrandom.h"
/*********************************************************************
@@ -307,6 +308,8 @@ static void crng_reseed(struct work_struct *work)
if (next_gen == ULONG_MAX)
++next_gen;
WRITE_ONCE(base_crng.generation, next_gen);
+ if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
+ smp_store_release(&_vdso_rng_data.generation, next_gen + 1);
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -756,6 +759,8 @@ static void __cold _credit_init_bits(size_t bits)
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
if (static_key_initialized)
execute_in_process_context(crng_set_ready, &set_ready);
+ if (IS_ENABLED(CONFIG_HAVE_VDSO_GETRANDOM))
+ smp_store_release(&_vdso_rng_data.is_ready, true);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
@@ -109,6 +109,11 @@ struct vdso_data {
struct arch_vdso_data arch_data;
};
+struct vdso_rng_data {
+ unsigned long generation;
+ bool is_ready;
+};
+
/*
* We use the hidden visibility to prevent the compiler from generating a GOT
* relocation. Not only is going through a GOT useless (the entry couldn't and
@@ -120,6 +125,7 @@ struct vdso_data {
*/
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
+extern struct vdso_rng_data _vdso_rng_data __attribute__((visibility("hidden")));
/*
* The generic vDSO implementation requires that gettimeofday.h
@@ -17,8 +17,10 @@ static void chacha_permute(u32 *x, int nrounds)
{
int i;
+#ifndef CHACHA_FOR_VDSO_INCLUDE
/* whitelist the allowed round counts */
WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
+#endif
for (i = 0; i < nrounds; i += 2) {
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
@@ -87,6 +89,7 @@ void chacha_block_generic(u32 *state, u8 *stream, int nrounds)
state[12]++;
}
+#ifndef CHACHA_FOR_VDSO_INCLUDE
EXPORT_SYMBOL(chacha_block_generic);
/**
@@ -112,3 +115,4 @@ void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds)
memcpy(&stream[4], &x[12], 16);
}
EXPORT_SYMBOL(hchacha_block_generic);
+#endif
@@ -30,4 +30,9 @@ config GENERIC_VDSO_TIME_NS
Selected by architectures which support time namespaces in the
VDSO
+config HAVE_VDSO_GETRANDOM
+ bool
+ help
+ Selected by architectures that support vDSO getrandom().
+
endif
new file mode 100644
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/fs.h>
+#include <vdso/datapage.h>
+#include <asm/vdso/getrandom.h>
+#include <asm/vdso/vsyscall.h>
+#include "getrandom.h"
+
+#undef memcpy
+#define memcpy(d,s,l) __builtin_memcpy(d,s,l)
+#undef memset
+#define memset(d,c,l) __builtin_memset(d,c,l)
+
+#define CHACHA_FOR_VDSO_INCLUDE
+#include "../crypto/chacha.c"
+
+static void memcpy_and_zero(void *dst, void *src, size_t len)
+{
+#define CASCADE(type) \
+ while (len >= sizeof(type)) { \
+ *(type *)dst = *(type *)src; \
+ *(type *)src = 0; \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if BITS_PER_LONG == 64
+ CASCADE(u64);
+#endif
+ CASCADE(u32);
+ CASCADE(u16);
+#endif
+ CASCADE(u8);
+#undef CASCADE
+}
+
+static __always_inline ssize_t
+__cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_t len,
+ unsigned int flags, void *opaque_state)
+{
+ ssize_t ret = min_t(size_t, MAX_RW_COUNT, len);
+ struct vgetrandom_state *state = opaque_state;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ unsigned long current_generation;
+ size_t batch_len;
+
+ if (unlikely(!rng_info->is_ready))
+ return getrandom_syscall(buffer, len, flags);
+
+ if (unlikely(!len))
+ return 0;
+
+retry_generation:
+ current_generation = READ_ONCE(rng_info->generation);
+ if (unlikely(state->generation != current_generation)) {
+ if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key))
+ return getrandom_syscall(buffer, len, flags);
+ WRITE_ONCE(state->generation, current_generation);
+ state->pos = sizeof(state->batch);
+ }
+
+ len = ret;
+more_batch:
+ batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
+ if (batch_len) {
+ memcpy_and_zero(buffer, state->batch + state->pos, batch_len);
+ state->pos += batch_len;
+ buffer += batch_len;
+ len -= batch_len;
+ }
+ if (!len) {
+ /*
+ * Since rng_info->generation will never be 0, we re-read state->generation,
+ * rather than using the local current_generation variable, to learn whether
+ * we forked.
+ */
+ if (unlikely(READ_ONCE(state->generation) != READ_ONCE(rng_info->generation))) {
+ buffer -= ret;
+ goto retry_generation;
+ }
+ return ret;
+ }
+
+ chacha_init_consts(chacha_state);
+ memcpy(&chacha_state[4], state->key, CHACHA_KEY_SIZE);
+ memset(&chacha_state[12], 0, sizeof(u32) * 4);
+
+ while (len >= CHACHA_BLOCK_SIZE) {
+ chacha20_block(chacha_state, buffer);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ buffer += CHACHA_BLOCK_SIZE;
+ len -= CHACHA_BLOCK_SIZE;
+ }
+
+ chacha20_block(chacha_state, state->key_batch);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ chacha20_block(chacha_state, state->key_batch + CHACHA_BLOCK_SIZE);
+ state->pos = 0;
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ goto more_batch;
+}
+
+static __always_inline ssize_t
+__cvdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state)
+{
+ return __cvdso_getrandom_data(__arch_get_vdso_rng_data(), buffer, len, flags, opaque_state);
+}