[v3,2/2] io_uring: Add KASAN support for alloc_caches

Message ID 20230223164353.2839177-3-leitao@debian.org
State New
Headers
Series io_uring: Add KASAN support for alloc caches |

Commit Message

Breno Leitao Feb. 23, 2023, 4:43 p.m. UTC
  Add support for KASAN in the alloc_caches (apoll and netmsg_cache).
Thus, if something touches the unused caches, it will raise a KASAN
warning/exception.

It poisons the object when the object is put to the cache, and unpoisons
it when the object is gotten or freed.

Signed-off-by: Breno Leitao <leitao@debian.org>
---
 include/linux/io_uring_types.h | 1 +
 io_uring/alloc_cache.h         | 6 +++++-
 io_uring/io_uring.c            | 4 ++--
 io_uring/net.h                 | 5 ++++-
 4 files changed, 12 insertions(+), 4 deletions(-)
  

Comments

Gabriel Krisman Bertazi Feb. 23, 2023, 7:09 p.m. UTC | #1
Breno Leitao <leitao@debian.org> writes:

> Add support for KASAN in the alloc_caches (apoll and netmsg_cache).
> Thus, if something touches the unused caches, it will raise a KASAN
> warning/exception.
>
> It poisons the object when the object is put to the cache, and unpoisons
> it when the object is gotten or freed.
>
> Signed-off-by: Breno Leitao <leitao@debian.org>
> ---
>  include/linux/io_uring_types.h | 1 +
>  io_uring/alloc_cache.h         | 6 +++++-
>  io_uring/io_uring.c            | 4 ++--
>  io_uring/net.h                 | 5 ++++-
>  4 files changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
> index efa66b6c32c9..35ebcfb46047 100644
> --- a/include/linux/io_uring_types.h
> +++ b/include/linux/io_uring_types.h
> @@ -190,6 +190,7 @@ struct io_ev_fd {
>  struct io_alloc_cache {
>  	struct io_wq_work_node	list;
>  	unsigned int		nr_cached;
> +	size_t			elem_size;
>  };
>  
>  struct io_ring_ctx {
> diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
> index 301855e94309..3aba7b356320 100644
> --- a/io_uring/alloc_cache.h
> +++ b/io_uring/alloc_cache.h
> @@ -16,6 +16,8 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
>  	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
>  		cache->nr_cached++;
>  		wq_stack_add_head(&entry->node, &cache->list);
> +		/* KASAN poisons object */
> +		kasan_slab_free_mempool(entry);
>  		return true;
>  	}
>  	return false;
> @@ -27,6 +29,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
>  		struct io_cache_entry *entry;
>  
>  		entry = container_of(cache->list.next, struct io_cache_entry, node);
> +		kasan_unpoison_range(entry, cache->elem_size);

I kind of worry there is no type checking at the same time we are
unpoisoning a constant-size range.  Seems easy to misuse the API.  But it
does look much better now with elem_size cached inside io_alloc_cache.

>  
> -#if defined(CONFIG_NET)
>  struct io_async_msghdr {
> +#if defined(CONFIG_NET)
>  	union {
>  		struct iovec		fast_iov[UIO_FASTIOV];
>  		struct {
> @@ -22,8 +22,11 @@ struct io_async_msghdr {
>  	struct sockaddr __user		*uaddr;
>  	struct msghdr			msg;
>  	struct sockaddr_storage		addr;
> +#endif
>  };
>  
> +#if defined(CONFIG_NET)
> +

Nit, but you could have added an empty definition in the #else section
that already exists in the file, or just guarded the caching code
entirely when CONFIG_NET=n.

Just nits, and overall it is good to have this KASAN support!

Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
  

Patch

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index efa66b6c32c9..35ebcfb46047 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -190,6 +190,7 @@  struct io_ev_fd {
 struct io_alloc_cache {
 	struct io_wq_work_node	list;
 	unsigned int		nr_cached;
+	size_t			elem_size;
 };
 
 struct io_ring_ctx {
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 301855e94309..3aba7b356320 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -16,6 +16,8 @@  static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
 		cache->nr_cached++;
 		wq_stack_add_head(&entry->node, &cache->list);
+		/* KASAN poisons object */
+		kasan_slab_free_mempool(entry);
 		return true;
 	}
 	return false;
@@ -27,6 +29,7 @@  static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
 		struct io_cache_entry *entry;
 
 		entry = container_of(cache->list.next, struct io_cache_entry, node);
+		kasan_unpoison_range(entry, cache->elem_size);
 		cache->list.next = cache->list.next->next;
 		return entry;
 	}
@@ -34,10 +37,11 @@  static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
 	return NULL;
 }
 
-static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
+static inline void io_alloc_cache_init(struct io_alloc_cache *cache, size_t size)
 {
 	cache->list.next = NULL;
 	cache->nr_cached = 0;
+	cache->elem_size = size;
 }
 
 static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 80b6204769e8..7a30a3e72fcc 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -309,8 +309,8 @@  static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	INIT_LIST_HEAD(&ctx->sqd_list);
 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
 	INIT_LIST_HEAD(&ctx->io_buffers_cache);
-	io_alloc_cache_init(&ctx->apoll_cache);
-	io_alloc_cache_init(&ctx->netmsg_cache);
+	io_alloc_cache_init(&ctx->apoll_cache, sizeof(struct async_poll));
+	io_alloc_cache_init(&ctx->netmsg_cache, sizeof(struct io_async_msghdr));
 	init_completion(&ctx->ref_comp);
 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
 	mutex_init(&ctx->uring_lock);
diff --git a/io_uring/net.h b/io_uring/net.h
index 5ffa11bf5d2e..191009979bcb 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -5,8 +5,8 @@ 
 
 #include "alloc_cache.h"
 
-#if defined(CONFIG_NET)
 struct io_async_msghdr {
+#if defined(CONFIG_NET)
 	union {
 		struct iovec		fast_iov[UIO_FASTIOV];
 		struct {
@@ -22,8 +22,11 @@  struct io_async_msghdr {
 	struct sockaddr __user		*uaddr;
 	struct msghdr			msg;
 	struct sockaddr_storage		addr;
+#endif
 };
 
+#if defined(CONFIG_NET)
+
 struct io_async_connect {
 	struct sockaddr_storage		address;
 };