From: Andrey Konovalov <andreyknvl@google.com>
Reorganize the code and reword the comment in
__kasan_mempool_poison_object to improve the code readability.
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
mm/kasan/common.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)
@@ -447,27 +447,22 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{
- struct folio *folio;
-
- folio = virt_to_folio(ptr);
+ struct folio *folio = virt_to_folio(ptr);
+ struct slab *slab;
/*
- * Even though this function is only called for kmem_cache_alloc and
- * kmalloc backed mempool allocations, those allocations can still be
- * !PageSlab() when the size provided to kmalloc is larger than
- * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc. Thus, the folio might not be a slab.
*/
if (unlikely(!folio_test_slab(folio))) {
if (check_page_allocation(ptr, ip))
return false;
kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
return true;
- } else {
- struct slab *slab = folio_slab(folio);
-
- return !____kasan_slab_free(slab->slab_cache, ptr, ip,
- false, false);
}
+
+ slab = folio_slab(folio);
+ return !____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
}
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)