[mm,08/21] kasan: clean up __kasan_mempool_poison_object

Message ID 4f6fc8840512286c1a96e16e86901082c671677d.1703024586.git.andreyknvl@google.com
State New
Headers
Series kasan: save mempool stack traces |

Commit Message

andrey.konovalov@linux.dev Dec. 19, 2023, 10:28 p.m. UTC
  From: Andrey Konovalov <andreyknvl@google.com>

Reorganize the code and reword the comment in
__kasan_mempool_poison_object to improve the code readability.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/kasan/common.c | 19 +++++++------------
 1 file changed, 7 insertions(+), 12 deletions(-)
  

Patch

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7ebc001d0fcd..3f4a1ed69e03 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -457,27 +457,22 @@  void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
 
 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
 {
-	struct folio *folio;
-
-	folio = virt_to_folio(ptr);
+	struct folio *folio = virt_to_folio(ptr);
+	struct slab *slab;
 
 	/*
-	 * Even though this function is only called for kmem_cache_alloc and
-	 * kmalloc backed mempool allocations, those allocations can still be
-	 * !PageSlab() when the size provided to kmalloc is larger than
-	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+	 * This function can be called for large kmalloc allocation that get
+	 * their memory from page_alloc. Thus, the folio might not be a slab.
 	 */
 	if (unlikely(!folio_test_slab(folio))) {
 		if (check_page_allocation(ptr, ip))
 			return false;
 		kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
 		return true;
-	} else {
-		struct slab *slab = folio_slab(folio);
-
-		return !____kasan_slab_free(slab->slab_cache, ptr, ip,
-						false, false);
 	}
+
+	slab = folio_slab(folio);
+	return !____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
 }
 
 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)