[RFT,v5,4/7] fork: Add shadow stack support to clone3()
Commit Message
Unlike with the normal stack there is no API for configuring the the shadow
stack for a new thread, instead the kernel will dynamically allocate a new
shadow stack with the same size as the normal stack. This appears to be due
to the shadow stack series having been in development since before the more
extensible clone3() was added rather than anything more deliberate.
Add a parameter to clone3() specifying the size of a shadow stack for
the newly created process. If no shadow stack is specified then the
existing implicit allocation behaviour is maintained.
If the architecture does not support shadow stacks the shadow stack size
parameter must be zero, architectures that do support the feature are
expected to enforce the same requirement on individual systems that lack
shadow stack support.
Update the existing x86 implementation to pay attention to the newly added
arguments, in order to maintain compatibility we use the existing behaviour
if no shadow stack is specified. Minimal validation is done of the supplied
parameters, detailed enforcement is left to when the thread is executed.
Since we are now using more fields from the kernel_clone_args we pass that
into the shadow stack code rather than individual fields.
At present this implemntation does not consume the shadow stack token
atomically as would be desirable.
Signed-off-by: Mark Brown <broonie@kernel.org>
---
arch/x86/include/asm/shstk.h | 11 ++++--
arch/x86/kernel/process.c | 2 +-
arch/x86/kernel/shstk.c | 91 +++++++++++++++++++++++++++++++++-----------
include/linux/sched/task.h | 2 +
include/uapi/linux/sched.h | 13 +++++--
kernel/fork.c | 61 +++++++++++++++++++++++------
6 files changed, 137 insertions(+), 43 deletions(-)
Comments
On Sat, 2024-02-03 at 00:05 +0000, Mark Brown wrote:
> +static bool shstk_consume_token(struct task_struct *tsk,
> + unsigned long addr)
> +{
> + /*
> + * SSP is aligned, so reserved bits and mode bit are a zero,
> just mark
> + * the token 64-bit.
> + */
> + u64 expected = (addr - SS_FRAME_SIZE) | BIT(0);
> + u64 val;
> +
> + /* This should really be an atomic cpmxchg. It is not. */
> + __get_user(val, (__user u64 *)addr);
> + if (val != expected)
> + return false;
> +
> + if (write_user_shstk_64((u64 __user *)addr, 0))
> + return false;
> +
> + return true;
> +}
So, don't we want to consume the token on the *new* task's MM, which
was already duplicated but still unmapped? In which case I think the
other arch's would need to GUP regardless of the existence of shadow
stack atomic ops.
If so, my question is, can we GUP on the new MM at this point? There is
a lot going in copy_process(). My first suspicion of complication is
the work on the child that happens in cgroup_post_fork().
I wonder about adding a shstk_post_fork() to make it easier to think
about and maintain, even if there are no issues today.
On Fri, 2024-02-09 at 12:18 -0800, Rick Edgecombe wrote:
>
> So, don't we want to consume the token on the *new* task's MM, which
> was already duplicated but still unmapped? In which case I think the
> other arch's would need to GUP regardless of the existence of shadow
> stack atomic ops.
I mean for the !CLONE_VM case.
On Sat, 2024-02-03 at 00:05 +0000, Mark Brown wrote:
> + if (args->shadow_stack) {
> + addr = args->shadow_stack;
> + size = args->shadow_stack_size;
>
> - size = adjust_shstk_size(stack_size);
> - addr = alloc_shstk(0, size, 0, false);
> - if (IS_ERR_VALUE(addr))
> - return addr;
> + /* There should be a valid token at the top of the
> stack. */
> + if (!shstk_consume_token(tsk, addr + size -
> sizeof(u64)))
> + return (unsigned long)ERR_PTR(-EINVAL);
I think for this case, it needs:
shstk->base = 0;
shstk->size = 0;
To prevent trying to free the parents shadow stack when the child
exits.
On Fri, Feb 09, 2024 at 08:18:11PM +0000, Edgecombe, Rick P wrote:
> On Sat, 2024-02-03 at 00:05 +0000, Mark Brown wrote:
> > + if (write_user_shstk_64((u64 __user *)addr, 0))
> > + return false;
> > +
> > + return true;
> > +}
> So, don't we want to consume the token on the *new* task's MM, which
> was already duplicated but still unmapped? In which case I think the
> other arch's would need to GUP regardless of the existence of shadow
> stack atomic ops.
Yes, that would be better - if nothing else it allows reuse of the same
shadow stack for multiple !CLONE_VM clone3()s.
> I wonder about adding a shstk_post_fork() to make it easier to think
> about and maintain, even if there are no issues today.
I agree.
@@ -6,6 +6,7 @@
#include <linux/types.h>
struct task_struct;
+struct kernel_clone_args;
struct ksignal;
#ifdef CONFIG_X86_USER_SHADOW_STACK
@@ -16,8 +17,8 @@ struct thread_shstk {
long shstk_prctl(struct task_struct *task, int option, unsigned long arg2);
void reset_thread_features(void);
-unsigned long shstk_alloc_thread_stack(struct task_struct *p, unsigned long clone_flags,
- unsigned long stack_size);
+unsigned long shstk_alloc_thread_stack(struct task_struct *p,
+ const struct kernel_clone_args *args);
void shstk_free(struct task_struct *p);
int setup_signal_shadow_stack(struct ksignal *ksig);
int restore_signal_shadow_stack(void);
@@ -26,8 +27,10 @@ static inline long shstk_prctl(struct task_struct *task, int option,
unsigned long arg2) { return -EINVAL; }
static inline void reset_thread_features(void) {}
static inline unsigned long shstk_alloc_thread_stack(struct task_struct *p,
- unsigned long clone_flags,
- unsigned long stack_size) { return 0; }
+ const struct kernel_clone_args *args)
+{
+ return 0;
+}
static inline void shstk_free(struct task_struct *p) {}
static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; }
static inline int restore_signal_shadow_stack(void) { return 0; }
@@ -207,7 +207,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
* is disabled, new_ssp will remain 0, and fpu_clone() will know not to
* update it.
*/
- new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size);
+ new_ssp = shstk_alloc_thread_stack(p, args);
if (IS_ERR_VALUE(new_ssp))
return PTR_ERR((void *)new_ssp);
@@ -191,44 +191,89 @@ void reset_thread_features(void)
current->thread.features_locked = 0;
}
-unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags,
- unsigned long stack_size)
+static bool shstk_consume_token(struct task_struct *tsk,
+ unsigned long addr)
+{
+ /*
+ * SSP is aligned, so reserved bits and mode bit are a zero, just mark
+ * the token 64-bit.
+ */
+ u64 expected = (addr - SS_FRAME_SIZE) | BIT(0);
+ u64 val;
+
+ /* This should really be an atomic cpmxchg. It is not. */
+ __get_user(val, (__user u64 *)addr);
+ if (val != expected)
+ return false;
+
+ if (write_user_shstk_64((u64 __user *)addr, 0))
+ return false;
+
+ return true;
+}
+
+unsigned long shstk_alloc_thread_stack(struct task_struct *tsk,
+ const struct kernel_clone_args *args)
{
struct thread_shstk *shstk = &tsk->thread.shstk;
+ unsigned long clone_flags = args->flags;
unsigned long addr, size;
/*
* If shadow stack is not enabled on the new thread, skip any
- * switch to a new shadow stack.
+ * implicit switch to a new shadow stack and reject attempts to
+ * explciitly specify one.
*/
- if (!features_enabled(ARCH_SHSTK_SHSTK))
- return 0;
+ if (!features_enabled(ARCH_SHSTK_SHSTK)) {
+ if (args->shadow_stack || args->shadow_stack_size)
+ return (unsigned long)ERR_PTR(-EINVAL);
- /*
- * For CLONE_VFORK the child will share the parents shadow stack.
- * Make sure to clear the internal tracking of the thread shadow
- * stack so the freeing logic run for child knows to leave it alone.
- */
- if (clone_flags & CLONE_VFORK) {
- shstk->base = 0;
- shstk->size = 0;
return 0;
}
/*
- * For !CLONE_VM the child will use a copy of the parents shadow
- * stack.
+ * If the user specified a shadow stack then do some basic
+ * validation and use it, otherwise fall back to a default
+ * shadow stack size if the clone_flags don't indicate an
+ * allocation is unneeded.
*/
- if (!(clone_flags & CLONE_VM))
- return 0;
+ if (args->shadow_stack) {
+ addr = args->shadow_stack;
+ size = args->shadow_stack_size;
- size = adjust_shstk_size(stack_size);
- addr = alloc_shstk(0, size, 0, false);
- if (IS_ERR_VALUE(addr))
- return addr;
+ /* There should be a valid token at the top of the stack. */
+ if (!shstk_consume_token(tsk, addr + size - sizeof(u64)))
+ return (unsigned long)ERR_PTR(-EINVAL);
+ } else {
+ /*
+ * For CLONE_VFORK the child will share the parents
+ * shadow stack. Make sure to clear the internal
+ * tracking of the thread shadow stack so the freeing
+ * logic run for child knows to leave it alone.
+ */
+ if (clone_flags & CLONE_VFORK) {
+ shstk->base = 0;
+ shstk->size = 0;
+ return 0;
+ }
- shstk->base = addr;
- shstk->size = size;
+ /*
+ * For !CLONE_VM the child will use a copy of the
+ * parents shadow stack.
+ */
+ if (!(clone_flags & CLONE_VM))
+ return 0;
+
+ size = args->stack_size;
+ size = adjust_shstk_size(size);
+ addr = alloc_shstk(0, size, 0, false);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ /* We allocated the shadow stack, we should deallocate it. */
+ shstk->base = addr;
+ shstk->size = size;
+ }
return addr + size;
}
@@ -43,6 +43,8 @@ struct kernel_clone_args {
void *fn_arg;
struct cgroup *cgrp;
struct css_set *cset;
+ unsigned long shadow_stack;
+ unsigned long shadow_stack_size;
};
/*
@@ -84,6 +84,10 @@
* kernel's limit of nested PID namespaces.
* @cgroup: If CLONE_INTO_CGROUP is specified set this to
* a file descriptor for the cgroup.
+ * @shadow_stack: Pointer to the memory allocated for the child
+ * shadow stack.
+ * @shadow_stack_size: Specify the size of the shadow stack for
+ * the child process.
*
* The structure is versioned by size and thus extensible.
* New struct members must go at the end of the struct and
@@ -101,12 +105,15 @@ struct clone_args {
__aligned_u64 set_tid;
__aligned_u64 set_tid_size;
__aligned_u64 cgroup;
+ __aligned_u64 shadow_stack;
+ __aligned_u64 shadow_stack_size;
};
#endif
-#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
-#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
-#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
+#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
+#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
+#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
+#define CLONE_ARGS_SIZE_VER3 104 /* sizeof fourth published struct */
/*
* Scheduling policies
@@ -123,6 +123,11 @@
*/
#define MAX_THREADS FUTEX_TID_MASK
+/*
+ * Require that shadow stacks can store at least one element
+ */
+#define SHADOW_STACK_SIZE_MIN sizeof(void *)
+
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
@@ -3062,7 +3067,9 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
CLONE_ARGS_SIZE_VER1);
BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
CLONE_ARGS_SIZE_VER2);
- BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
+ BUILD_BUG_ON(offsetofend(struct clone_args, shadow_stack_size) !=
+ CLONE_ARGS_SIZE_VER3);
+ BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER3);
if (unlikely(usize > PAGE_SIZE))
return -E2BIG;
@@ -3095,16 +3102,18 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
return -EINVAL;
*kargs = (struct kernel_clone_args){
- .flags = args.flags,
- .pidfd = u64_to_user_ptr(args.pidfd),
- .child_tid = u64_to_user_ptr(args.child_tid),
- .parent_tid = u64_to_user_ptr(args.parent_tid),
- .exit_signal = args.exit_signal,
- .stack = args.stack,
- .stack_size = args.stack_size,
- .tls = args.tls,
- .set_tid_size = args.set_tid_size,
- .cgroup = args.cgroup,
+ .flags = args.flags,
+ .pidfd = u64_to_user_ptr(args.pidfd),
+ .child_tid = u64_to_user_ptr(args.child_tid),
+ .parent_tid = u64_to_user_ptr(args.parent_tid),
+ .exit_signal = args.exit_signal,
+ .stack = args.stack,
+ .stack_size = args.stack_size,
+ .tls = args.tls,
+ .set_tid_size = args.set_tid_size,
+ .cgroup = args.cgroup,
+ .shadow_stack = args.shadow_stack,
+ .shadow_stack_size = args.shadow_stack_size,
};
if (args.set_tid &&
@@ -3145,6 +3154,34 @@ static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
return true;
}
+/**
+ * clone3_shadow_stack_valid - check and prepare shadow stack
+ * @kargs: kernel clone args
+ *
+ * Verify that shadow stacks are only enabled if supported.
+ */
+static inline bool clone3_shadow_stack_valid(struct kernel_clone_args *kargs)
+{
+ if (kargs->shadow_stack) {
+ if (!kargs->shadow_stack_size)
+ return false;
+
+ if (kargs->shadow_stack_size < SHADOW_STACK_SIZE_MIN)
+ return false;
+
+ if (kargs->shadow_stack_size > rlimit(RLIMIT_STACK))
+ return false;
+
+ /*
+ * The architecture must check support on the specific
+ * machine.
+ */
+ return IS_ENABLED(CONFIG_ARCH_HAS_USER_SHADOW_STACK);
+ } else {
+ return !kargs->shadow_stack_size;
+ }
+}
+
static bool clone3_args_valid(struct kernel_clone_args *kargs)
{
/* Verify that no unknown flags are passed along. */
@@ -3167,7 +3204,7 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs)
kargs->exit_signal)
return false;
- if (!clone3_stack_valid(kargs))
+ if (!clone3_stack_valid(kargs) || !clone3_shadow_stack_valid(kargs))
return false;
return true;