[v7,2/3] random: introduce generic vDSO getrandom() implementation
Commit Message
Provide a generic C vDSO getrandom() implementation, which operates on
an opaque state returned by vgetrandom_alloc() and produces random bytes
the same way as getrandom(). This has a the API signature:
ssize_t vgetrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state);
The return value and the first 3 arguments are the same as ordinary
getrandom(), while the last argument is a pointer to the opaque
allocated state. Were all four arguments passed to the getrandom()
syscall, nothing different would happen, and the functions would have
the exact same behavior.
The actual vDSO RNG algorithm implemented is the same one implemented by
drivers/char/random.c, using the same fast-erasure techniques as that.
Should the in-kernel implementation change, so too will the vDSO one.
It requires an implementation of ChaCha20 that does not use any stack,
in order to maintain forward secrecy, so this is left as an
architecture-specific fill-in. Stack-less ChaCha20 is an easy algorithm
to implement on a variety of architectures, so this shouldn't be too
onerous.
Initially, the state is keyless, and so the first call makes a
getrandom() syscall to generate that key, and then uses it for
subsequent calls. By keeping track of a generation counter, it knows
when its key is invalidated and it should fetch a new one using the
syscall. Later, more than just a generation counter might be used.
Since MADV_WIPEONFORK is set on the opaque state, the key and related
state is wiped during a fork(), so secrets don't roll over into new
processes, and the same state doesn't accidentally generate the same
random stream. The generation counter, as well, is always >0, so that
the 0 counter is a useful indication of a fork() or otherwise
uninitialized state.
If the kernel RNG is not yet initialized, then the vDSO always calls the
syscall, because that behavior cannot be emulated in userspace, but
fortunately that state is short lived and only during early boot. If it
has been initialized, then there is no need to inspect the `flags`
argument, because the behavior does not change post-initialization
regardless of the `flags` value.
Since the opaque state passed to it is mutated, vDSO getrandom() is not
reentrant, when used with the same opaque state, which libc should be
mindful of.
Together with the previous commit that introduces vgetrandom_alloc(),
this functionality is intended to be integrated into libc's thread
management. As an illustrative example, the following code might be used
to do the same outside of libc. All of the static functions are to be
considered implementation private, including the vgetrandom_alloc()
syscall wrapper, which generally shouldn't be exposed outside of libc,
with the non-static vgetrandom() function at the end being the exported
interface. The various pthread-isms are expected to be elided into libc
internals. This per-thread allocation scheme is very naive and does not
shrink; other implementations may choose to be more complex.
static void *vgetrandom_alloc(unsigned int *num, unsigned int *size_per_each, unsigned int flags)
{
long ret = syscall(__NR_vgetrandom_alloc, &num, &size_per_each, flags);
return ret == -1 ? NULL : (void *)ret;
}
static struct {
pthread_mutex_t lock;
void **states;
size_t len, cap;
} grnd_allocator = {
.lock = PTHREAD_MUTEX_INITIALIZER
};
static void *vgetrandom_get_state(void)
{
void *state = NULL;
pthread_mutex_lock(&grnd_allocator.lock);
if (!grnd_allocator.len) {
size_t new_cap;
unsigned int size_per_each, num = 16; /* Just a hint. Could also be nr_cpus. */
void *new_block = vgetrandom_alloc(&num, &size_per_each, 0), *new_states;
if (!new_block)
goto out;
new_cap = grnd_allocator.cap + num;
new_states = reallocarray(grnd_allocator.states, new_cap, sizeof(*grnd_allocator.states));
if (!new_states) {
munmap(new_block, num * size_per_each);
goto out;
}
grnd_allocator.cap = new_cap;
grnd_allocator.states = new_states;
for (size_t i = 0; i < num; ++i) {
grnd_allocator.states[i] = new_block;
new_block += size_per_each;
}
grnd_allocator.len = num;
}
state = grnd_allocator.states[--grnd_allocator.len];
out:
pthread_mutex_unlock(&grnd_allocator.lock);
return state;
}
static void vgetrandom_put_state(void *state)
{
if (!state)
return;
pthread_mutex_lock(&grnd_allocator.lock);
grnd_allocator.states[grnd_allocator.len++] = state;
pthread_mutex_unlock(&grnd_allocator.lock);
}
static struct {
ssize_t(*fn)(void *buf, size_t len, unsigned long flags, void *state);
pthread_key_t key;
pthread_once_t initialized;
} grnd_ctx = {
.initialized = PTHREAD_ONCE_INIT
};
static void vgetrandom_init(void)
{
if (pthread_key_create(&grnd_ctx.key, vgetrandom_put_state) != 0)
return;
grnd_ctx.fn = __vdsosym("LINUX_2.6", "__vdso_getrandom");
}
ssize_t vgetrandom(void *buf, size_t len, unsigned long flags)
{
void *state;
pthread_once(&grnd_ctx.initialized, vgetrandom_init);
if (!grnd_ctx.fn)
return getrandom(buf, len, flags);
state = pthread_getspecific(grnd_ctx.key);
if (!state) {
state = vgetrandom_get_state();
if (pthread_setspecific(grnd_ctx.key, state) != 0) {
vgetrandom_put_state(state);
state = NULL;
}
if (!state)
return getrandom(buf, len, flags);
}
return grnd_ctx.fn(buf, len, flags, state);
}
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
MAINTAINERS | 1 +
drivers/char/random.c | 9 ++++
include/vdso/datapage.h | 6 +++
lib/vdso/Kconfig | 5 ++
lib/vdso/getrandom.c | 114 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 135 insertions(+)
create mode 100644 lib/vdso/getrandom.c
Comments
Jason!
On Thu, Nov 24 2022 at 17:55, Jason A. Donenfeld wrote:
>
> Together with the previous commit that introduces vgetrandom_alloc(),
Together with the previous commit is just a complete useless
information.
> this functionality is intended to be integrated into libc's thread
> management.
vdso_getrandom() and sys_vgetrandom_alloc() provide ..... which is
intended to be integrated ....
> diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
> index 73eb622e7663..cbacfd923a5c 100644
> --- a/include/vdso/datapage.h
> +++ b/include/vdso/datapage.h
> @@ -109,6 +109,11 @@ struct vdso_data {
> struct arch_vdso_data arch_data;
> };
>
> +struct vdso_rng_data {
> + unsigned long generation;
> + bool is_ready;
> +};
Please follow the coding style in this header:
- make the struct definition tabular aligned
- provide kernel doc which explains the struct members
> +config HAVE_VDSO_GETRANDOM
> + bool
> + help
> + Selected by architectures that support vDSO getrandom().
See ?
> endif
> diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
> new file mode 100644
> index 000000000000..2c4ef5ef212c
> --- /dev/null
> +++ b/lib/vdso/getrandom.c
> @@ -0,0 +1,114 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
> + */
> +
> +#include <linux/kernel.h>
> +#include <linux/atomic.h>
> +#include <linux/fs.h>
No. This is VDSO and builds a userspace library. You cannot include
random kernel headers, which in turn include the world and some more. It
might build today, but any legitimate change to fs.h or one of the
headers included by it will make it explode.
I just fixed up some other instance which included world and I have zero
interest to get more of this "works by chance" things.
If you really need anything from fs.h then please isolate it out into a
separate header file which is included by fs.h and here.
> +#include <vdso/datapage.h>
> +#include <asm/vdso/getrandom.h>
> +#include <asm/vdso/vsyscall.h>
> +#include "getrandom.h"
> +
> +static void memcpy_and_zero(void *dst, void *src, size_t len)
> +{
> +#define CASCADE(type) \
> + while (len >= sizeof(type)) { \
> + __put_unaligned_t(type, __get_unaligned_t(type, src), dst); \
> + __put_unaligned_t(type, 0, src); \
> + dst += sizeof(type); \
> + src += sizeof(type); \
> + len -= sizeof(type); \
> + }
No. Defines inside of functions are a horrible habit. This is not the
obfuscated C-code contest.
> +#if IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
> +#if BITS_PER_LONG == 64
> + CASCADE(u64);
> +#endif
> + CASCADE(u32);
> + CASCADE(u16);
> +#endif
> + CASCADE(u8);
> +#undef CASCADE
This is equaly unreadable. I had to reread it 4 times to grok it.
#define MEMCPY_AND_ZERO_SRC(type, dst, src, len) \
while (len >= sizeof(type)) { \
__put_unaligned_t(type, __get_unaligned_t(type, src), dst); \
__put_unaligned_t(type, 0, src); \
dst += sizeof(type); \
src += sizeof(type); \
len -= sizeof(type); \
}
static void memcpy_and_zero_src(void *dst, void *src, size_t len)
{
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
if (IS_ENABLED(CONFIG_64BIT))
MEMCPY_AND_ZERO_SRC(u64, dst, src, len);
MEMCPY_AND_ZERO_SRC(u32, dst, src, len);
MEMCPY_AND_ZERO_SRC(u16, dst, src, len);
}
MEMCPY_AND_ZERO_SRC(u8, dst, src, len);
}
Can you spot the difference in readability and formatting?
> +
> +static __always_inline ssize_t
> +__cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_t len,
> + unsigned int flags, void *opaque_state)
Lacks kernel-doc explaining the arguments and the functionality.
> +{
> + ssize_t ret = min_t(size_t, MAX_RW_COUNT, len);
> + struct vgetrandom_state *state = opaque_state;
> + size_t batch_len, nblocks, orig_len = len;
> + unsigned long current_generation;
> + void *orig_buffer = buffer;
> + u32 counter[2] = { 0 };
> +
> + /*
> + * If the kernel isn't yet initialized, then the various flags might have some effect
> + * that we can't emulate in userspace, so use the syscall. Otherwise, the flags have
> + * no effect, and can continue.
Sorry, this is word salad which raises a -ENOPARSE here.
@rng_info is a user supplied pointer, but there is zero information how
rng_info->is_ready is set to true because obviously getrandom_syscall()
does not know about it.
I know you described it in the changelog with an example to some extent,
but that does not help the casual reader of this code at all.
> + */
> + if (unlikely(!rng_info->is_ready))
> + return getrandom_syscall(orig_buffer, orig_len, flags);
What's the point of using orig_buffer and orig_len at this place? I can
understand it below, but here it does not make sense.
> +
> + if (unlikely(!len))
> + return 0;
Why go into the kernel above when len == 0?
> +
> +retry_generation:
> + current_generation = READ_ONCE(rng_info->generation);
READ_ONCE() and WRITE_ONCE() require comments explaining why they are
used and what the actual counterpart of each operation is.
> + if (unlikely(state->generation != current_generation)) {
> + /* Write the generation before filling the key, in case there's a fork before. */
> + WRITE_ONCE(state->generation, current_generation);
> + /* If the generation is wrong, the kernel has reseeded, so we should too. */
> + if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key))
> + return getrandom_syscall(orig_buffer, orig_len, flags);
> + /* Set state->pos so that the batch is considered emptied. */
> + state->pos = sizeof(state->batch);
Can you please add newlines into this so the various steps are visually
seperated?
if (unlikely(state->generation != current_generation)) {
/* Write the generation before filling the key, in case there's a fork before. */
WRITE_ONCE(state->generation, current_generation);
/* If the generation is wrong, the kernel has reseeded, so we should too. */
if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key))
return getrandom_syscall(orig_buffer, orig_len, flags);
/* Set state->pos so that the batch is considered emptied. */
state->pos = sizeof(state->batch);
}
Again: Can you spot the difference? Please fix that up all over the place.
Now let me comment on the content of this:
> + if (unlikely(state->generation != current_generation)) {
> + /* Write the generation before filling the key, in case there's a fork before. */
There is no space restriction which requires to write comments in a way
that they need crystal balls to decode.
/*
* Update @state->generation before invoking the syscall,
* which fills the key, to protect against a fork FOR
* WHATEVER EXPLICIT REASON
*/
This might be completely obvious to you today, but it's not obvious to
me or anyone else and I'm sure that you would curse these comments six
month down the road yourself.
> + WRITE_ONCE(state->generation, current_generation);
> + /* If the generation is wrong, the kernel has reseeded, so we should too. */
Which generation is wrong? Your's or mine? Please spell things
out. There is enough space to do so.
Also please refrain from 'we should ...'. 'We' has no place here.
I know it is a common habit to impersonate code and code execution, but
it's a horrible habit. Aside of being non-factual there are people from
other cultures who really have a hard time to understand this.
Neither is 'should' a proper term here. 'should' is not mandatory.
"If there is a state generation mismatch, which means the kernel has
reseeded the random generator, then it is _required_ to reseed the
vdso buffers too."
Sorry, I really fail to understand this sloppy wording coming from
someone who educates everyone else about the importance of correctness.
If you can't be bothered to express yourself in correct terms
consistently then why do you expect that anyone else understands
randomness or cryptography correctly?
I really appreciate your efforts to make all of this more accessible to
the average user, but please get your act together.
> + if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key))
> + return getrandom_syscall(orig_buffer, orig_len, flags);
This lacks an explanation why this might invoke the syscall twice.
> + /* Set state->pos so that the batch is considered emptied. */
Considered? There is nothing to consider here, right?
/*
* Advance state->pos beyond the end of the batch buffer to
* signal that the batch needs to be refilled.
*/
Hmm?
> + state->pos = sizeof(state->batch);
> + }
> +
> + len = ret;
> +more_batch:
> + /* First use whatever is left from the last call. */
Last call of what?
> + batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
Where is the sanity check for state->pos <= sizeof(state->batch)?
> + if (batch_len) {
> + /* Zero out bytes as they're copied out, to preserve forward secrecy. */
Please name the function so it is self explaining and add an
comprehensive comment to it so you can spare the comment here.
> + memcpy_and_zero(buffer, state->batch + state->pos, batch_len);
> + state->pos += batch_len;
> + buffer += batch_len;
> + len -= batch_len;
buffer and state->pos could be updated by that non-fail copy function to
aid the compiler, but I might be wrong about the ability of compilers to
optimize code like that.
> + }
> + if (!len) {
> + /*
> + * Since rng_info->generation will never be 0, we re-read state->generation,
s/we// ....
> + * rather than using the local current_generation variable, to learn whether
> + * we forked. Primarily, though, this indicates whether the rng itself has
> + * reseeded, in which case we should generate a new key and start over.
> + */
> + if (unlikely(READ_ONCE(state->generation) != READ_ONCE(rng_info->generation))) {
> + buffer = orig_buffer;
> + goto retry_generation;
> + }
> + return ret;
> + }
> +
> + /* Generate blocks of rng output directly into the buffer while there's enough left. */
> + nblocks = len / CHACHA_BLOCK_SIZE;
> + if (nblocks) {
> + __arch_chacha20_blocks_nostack(buffer, state->key, counter, nblocks);
> + buffer += nblocks * CHACHA_BLOCK_SIZE;
> + len -= nblocks * CHACHA_BLOCK_SIZE;
> + }
> +
> + /* Refill the batch and then overwrite the key, in order to preserve forward secrecy. */
> + BUILD_BUG_ON(sizeof(state->batch_key) % CHACHA_BLOCK_SIZE != 0);
Does this build bug really need to be glued in between the comment
explaining the function call and the function call itself?
> + __arch_chacha20_blocks_nostack(state->batch_key, state->key, counter,
> + sizeof(state->batch_key) / CHACHA_BLOCK_SIZE);
> + state->pos = 0;
This reset of state->pos also lacks a comment for the casual reader...
Thanks,
tglx
> Jason!
Thomas!
On Fri, Nov 25, 2022 at 11:39:15PM +0100, Thomas Gleixner wrote:
> > +struct vdso_rng_data {
> > + unsigned long generation;
> > + bool is_ready;
> > +};
>
> Please follow the coding style in this header:
>
> - make the struct definition tabular aligned
> - provide kernel doc which explains the struct members
Will do.
> > +
> > +#include <linux/kernel.h>
> > +#include <linux/atomic.h>
> > +#include <linux/fs.h>
>
> No. This is VDSO and builds a userspace library. You cannot include
> random kernel headers, which in turn include the world and some more. It
> might build today, but any legitimate change to fs.h or one of the
> headers included by it will make it explode.
>
> I just fixed up some other instance which included world and I have zero
> interest to get more of this "works by chance" things.
>
> If you really need anything from fs.h then please isolate it out into a
> separate header file which is included by fs.h and here.
Hm. I need MAX_RW_COUNT from linux/fs.h. I could just hardcode `(INT_MAX
& PAGE_MASK)`, though, if you'd prefer, and leave a comment. I'll do
that. Or I could move MAX_RW_COUNT into linux/kernel.h? But maybe that's
undesirable.
So:
ssize_t ret = min_t(size_t, INT_MAX & PAGE_MASK /* = MAX_RW_COUNT */, len);
I'll do that, if it's okay with you. Or tell me if you want me to
instead move MAX_RW_COUNT into linux/kernel.h.
Also, if I remove linux/fs.h, I need to include linux/time.h in its
place, because vdso/datapage.h implicitly depends on it. Alternatively,
I could add linux/time.h to vdso/datapage.h, but I don't want to touch
too many files uninvited.
> MEMCPY_AND_ZERO_SRC(u8, dst, src, len);
> }
>
> Can you spot the difference in readability and formatting?
Nice suggestion, will do.
> > +
> > +static __always_inline ssize_t
> > +__cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_t len,
> > + unsigned int flags, void *opaque_state)
>
> Lacks kernel-doc explaining the arguments and the functionality.
I'll add one.
> > +
> > + if (unlikely(!len))
> > + return 0;
>
> Why go into the kernel above when len == 0?
If the kernel's RNG isn't ready, then the behavior here can't easily be
emulated by userspace alone. For the len == 0 case, if flags == 0, then
the syscall will block until it's ready, for example, and that blocking
behavior is something we want to retain.
However, I recognize that the fact you had to ask this question is
simply indicative of the fact that this function is under-documented or
otherwise incomprehensible, per your other comments, so I'll try to make
this more clear for v8.
> Sorry, this is word salad which raises a -ENOPARSE here.
> Sorry, I really fail to understand this sloppy wording coming from
> someone who educates everyone else about the importance of correctness.
> If you can't be bothered to express yourself in correct terms
> consistently then why do you expect that anyone else understands
> randomness or cryptography correctly?
> I really appreciate your efforts to make all of this more accessible to
> the average user, but please get your act together.
Your individual specific points as well as the overarching one are well
taken, and I'll overhaul all of the documentation for v8, in hopes of
making this function a lot more clear.
> > + batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
>
> Where is the sanity check for state->pos <= sizeof(state->batch)?
That condition cannot happen. "Does the compiler or some other checker
prove that as part of the development cycle?" No, unfortunately. So what
would you like to do here? Per Linus' email on an unrelated topic [1],
"We don't test for things that can't happen." And there's no
WARN_ON/BUG_ON primitive that'd be wise to use here -- nobody wants to
emit a ud2 into vDSO code I assume. So what would you like? For me to
add that check and bail out of the function if it's wrong, even if that
should normally never happen? Or adhere to the [1] more strictly and do
nothing, as is the case now? I'll do what you want here.
And please don't tell me, "if you even have to ask what to do, you
shouldn't be anywhere near... bla bla bla", because I *am* cognisant of
defense coding, yet at the same time I'm trying to respect kernel norms,
and balancing the two tends to be more subjective than, say, the results
of a verifier. So, anyway, let me know what behavior you want here --
the sanity check-->fallback path, or doing nothing, or something else.
[1] https://lore.kernel.org/all/CAHk-=wheoU5mkht1xk_4Tyw78oa-8iGvGE9nBdUmGqCykgo1xw@mail.gmail.com/
>
> > + if (batch_len) {
> > + /* Zero out bytes as they're copied out, to preserve forward secrecy. */
>
> Please name the function so it is self explaining and add an
> comprehensive comment to it so you can spare the comment here.
I'll take your memcpy_and_zero_*src* suggestion from earlier.
> > + memcpy_and_zero(buffer, state->batch + state->pos, batch_len);
> > + state->pos += batch_len;
> > + buffer += batch_len;
> > + len -= batch_len;
>
> buffer and state->pos could be updated by that non-fail copy function to
> aid the compiler, but I might be wrong about the ability of compilers to
> optimize code like that.
memcpy_and_zero(_src) is inlined, and the codegen looks okay enough I
think.
> > + /* Refill the batch and then overwrite the key, in order to preserve forward secrecy. */
> > + BUILD_BUG_ON(sizeof(state->batch_key) % CHACHA_BLOCK_SIZE != 0);
>
> Does this build bug really need to be glued in between the comment
> explaining the function call and the function call itself?
I thought of these two lines as sort of one thing together. But I'll
reverse the order of the comment and the BUILD_BUG_ON, per your
suggestion.
Thanks again for the extremely thorough review.
Jason
Jason!
On Sun, Nov 27 2022 at 22:52, Jason A. Donenfeld wrote:
> On Fri, Nov 25, 2022 at 11:39:15PM +0100, Thomas Gleixner wrote:
>> If you really need anything from fs.h then please isolate it out into a
>> separate header file which is included by fs.h and here.
>
> Hm. I need MAX_RW_COUNT from linux/fs.h. I could just hardcode `(INT_MAX
> & PAGE_MASK)`, though, if you'd prefer, and leave a comment. I'll do
> that. Or I could move MAX_RW_COUNT into linux/kernel.h? But maybe that's
> undesirable.
>
> So:
>
> ssize_t ret = min_t(size_t, INT_MAX & PAGE_MASK /* = MAX_RW_COUNT */, len);
>
> I'll do that, if it's okay with you. Or tell me if you want me to
> instead move MAX_RW_COUNT into linux/kernel.h.
>
> Also, if I remove linux/fs.h, I need to include linux/time.h in its
> place, because vdso/datapage.h implicitly depends on it. Alternatively,
> I could add linux/time.h to vdso/datapage.h, but I don't want to touch
> too many files uninvited.
Actually the minimal includes are those:
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -8,8 +8,9 @@
* 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
* sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
*/
-#include <linux/time.h>
+#include <linux/cache.h>
#include <linux/kernel.h>
+#include <linux/time64.h>
#include <linux/types.h>
#include "../../../../lib/vdso/gettimeofday.c"
>> > + batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
>>
>> Where is the sanity check for state->pos <= sizeof(state->batch)?
>
> That condition cannot happen. "Does the compiler or some other checker
> prove that as part of the development cycle?" No, unfortunately. So what
> would you like to do here? Per Linus' email on an unrelated topic [1],
> "We don't test for things that can't happen." And there's no
> WARN_ON/BUG_ON primitive that'd be wise to use here -- nobody wants to
> emit a ud2 into vDSO code I assume. So what would you like? For me to
> add that check and bail out of the function if it's wrong, even if that
> should normally never happen? Or adhere to the [1] more strictly and do
> nothing, as is the case now? I'll do what you want here.
I think we can do without any further checks. If the callsite fiddles
with state then the resulting memcpy will go into lala land and the
process can keep the pieces.
Thanks,
tglx
@@ -17287,6 +17287,7 @@ T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git
S: Maintained
F: drivers/char/random.c
F: drivers/virt/vmgenid.c
+F: lib/vdso/getrandom.c
F: lib/vdso/getrandom.h
RAPIDIO SUBSYSTEM
@@ -61,6 +61,9 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef CONFIG_HAVE_VDSO_GETRANDOM
+#include <vdso/datapage.h>
+#endif
#include "../../lib/vdso/getrandom.h"
/*********************************************************************
@@ -328,6 +331,9 @@ static void crng_reseed(struct work_struct *work)
if (next_gen == ULONG_MAX)
++next_gen;
WRITE_ONCE(base_crng.generation, next_gen);
+#ifdef CONFIG_HAVE_VDSO_GETRANDOM
+ smp_store_release(&_vdso_rng_data.generation, next_gen + 1);
+#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -778,6 +784,9 @@ static void __cold _credit_init_bits(size_t bits)
if (static_key_initialized)
execute_in_process_context(crng_set_ready, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
+#ifdef CONFIG_HAVE_VDSO_GETRANDOM
+ smp_store_release(&_vdso_rng_data.is_ready, true);
+#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
@@ -109,6 +109,11 @@ struct vdso_data {
struct arch_vdso_data arch_data;
};
+struct vdso_rng_data {
+ unsigned long generation;
+ bool is_ready;
+};
+
/*
* We use the hidden visibility to prevent the compiler from generating a GOT
* relocation. Not only is going through a GOT useless (the entry couldn't and
@@ -120,6 +125,7 @@ struct vdso_data {
*/
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
+extern struct vdso_rng_data _vdso_rng_data __attribute__((visibility("hidden")));
/*
* The generic vDSO implementation requires that gettimeofday.h
@@ -30,4 +30,9 @@ config GENERIC_VDSO_TIME_NS
Selected by architectures which support time namespaces in the
VDSO
+config HAVE_VDSO_GETRANDOM
+ bool
+ help
+ Selected by architectures that support vDSO getrandom().
+
endif
new file mode 100644
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/fs.h>
+#include <vdso/datapage.h>
+#include <asm/vdso/getrandom.h>
+#include <asm/vdso/vsyscall.h>
+#include "getrandom.h"
+
+static void memcpy_and_zero(void *dst, void *src, size_t len)
+{
+#define CASCADE(type) \
+ while (len >= sizeof(type)) { \
+ __put_unaligned_t(type, __get_unaligned_t(type, src), dst); \
+ __put_unaligned_t(type, 0, src); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
+#if IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#if BITS_PER_LONG == 64
+ CASCADE(u64);
+#endif
+ CASCADE(u32);
+ CASCADE(u16);
+#endif
+ CASCADE(u8);
+#undef CASCADE
+}
+
+static __always_inline ssize_t
+__cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_t len,
+ unsigned int flags, void *opaque_state)
+{
+ ssize_t ret = min_t(size_t, MAX_RW_COUNT, len);
+ struct vgetrandom_state *state = opaque_state;
+ size_t batch_len, nblocks, orig_len = len;
+ unsigned long current_generation;
+ void *orig_buffer = buffer;
+ u32 counter[2] = { 0 };
+
+ /*
+ * If the kernel isn't yet initialized, then the various flags might have some effect
+ * that we can't emulate in userspace, so use the syscall. Otherwise, the flags have
+ * no effect, and can continue.
+ */
+ if (unlikely(!rng_info->is_ready))
+ return getrandom_syscall(orig_buffer, orig_len, flags);
+
+ if (unlikely(!len))
+ return 0;
+
+retry_generation:
+ current_generation = READ_ONCE(rng_info->generation);
+ if (unlikely(state->generation != current_generation)) {
+ /* Write the generation before filling the key, in case there's a fork before. */
+ WRITE_ONCE(state->generation, current_generation);
+ /* If the generation is wrong, the kernel has reseeded, so we should too. */
+ if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key))
+ return getrandom_syscall(orig_buffer, orig_len, flags);
+ /* Set state->pos so that the batch is considered emptied. */
+ state->pos = sizeof(state->batch);
+ }
+
+ len = ret;
+more_batch:
+ /* First use whatever is left from the last call. */
+ batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
+ if (batch_len) {
+ /* Zero out bytes as they're copied out, to preserve forward secrecy. */
+ memcpy_and_zero(buffer, state->batch + state->pos, batch_len);
+ state->pos += batch_len;
+ buffer += batch_len;
+ len -= batch_len;
+ }
+ if (!len) {
+ /*
+ * Since rng_info->generation will never be 0, we re-read state->generation,
+ * rather than using the local current_generation variable, to learn whether
+ * we forked. Primarily, though, this indicates whether the rng itself has
+ * reseeded, in which case we should generate a new key and start over.
+ */
+ if (unlikely(READ_ONCE(state->generation) != READ_ONCE(rng_info->generation))) {
+ buffer = orig_buffer;
+ goto retry_generation;
+ }
+ return ret;
+ }
+
+ /* Generate blocks of rng output directly into the buffer while there's enough left. */
+ nblocks = len / CHACHA_BLOCK_SIZE;
+ if (nblocks) {
+ __arch_chacha20_blocks_nostack(buffer, state->key, counter, nblocks);
+ buffer += nblocks * CHACHA_BLOCK_SIZE;
+ len -= nblocks * CHACHA_BLOCK_SIZE;
+ }
+
+ /* Refill the batch and then overwrite the key, in order to preserve forward secrecy. */
+ BUILD_BUG_ON(sizeof(state->batch_key) % CHACHA_BLOCK_SIZE != 0);
+ __arch_chacha20_blocks_nostack(state->batch_key, state->key, counter,
+ sizeof(state->batch_key) / CHACHA_BLOCK_SIZE);
+ state->pos = 0;
+ goto more_batch;
+}
+
+static __always_inline ssize_t
+__cvdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state)
+{
+ return __cvdso_getrandom_data(__arch_get_vdso_rng_data(), buffer, len, flags, opaque_state);
+}