[v2] mm: Make ksize() a reporting-only function

Message ID 20221118035656.gonna.698-kees@kernel.org
State New
Headers
Series [v2] mm: Make ksize() a reporting-only function |

Commit Message

Kees Cook Nov. 18, 2022, 3:56 a.m. UTC
  With all "silently resizing" callers of ksize() refactored, remove the
logic in ksize() that would allow it to be used to effectively change
the size of an allocation (bypassing __alloc_size hints, etc). Users
wanting this feature need to either use kmalloc_size_roundup() before an
allocation, or use krealloc() directly.

For kfree_sensitive(), move the unpoisoning logic inline. Replace the
some of the partially open-coded ksize() in __do_krealloc with ksize()
now that it doesn't perform unpoisoning.

Adjust the KUnit tests to match the new ksize() behavior.

Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: linux-mm@kvack.org
Cc: kasan-dev@googlegroups.com
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Kees Cook <keescook@chromium.org>
---
v2:
- improve kunit test precision (andreyknvl)
- add Ack (vbabka)
v1: https://lore.kernel.org/all/20221022180455.never.023-kees@kernel.org
---
 mm/kasan/kasan_test.c | 14 +++++++++-----
 mm/slab_common.c      | 26 ++++++++++----------------
 2 files changed, 19 insertions(+), 21 deletions(-)
  

Comments

Vlastimil Babka Nov. 18, 2022, 10:32 a.m. UTC | #1
On 11/18/22 04:56, Kees Cook wrote:
> With all "silently resizing" callers of ksize() refactored, remove the

At cursory look seems it's true now in -next (but not mainline?) can you
confirm?
That would probably be safe enough to have slab.git expose this to -next now
and time a PR appropriately in the next merge window?

> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
> 
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
> 
> Adjust the KUnit tests to match the new ksize() behavior.
> 
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: linux-mm@kvack.org
> Cc: kasan-dev@googlegroups.com
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
> v2:
> - improve kunit test precision (andreyknvl)
> - add Ack (vbabka)
> v1: https://lore.kernel.org/all/20221022180455.never.023-kees@kernel.org
> ---
>  mm/kasan/kasan_test.c | 14 +++++++++-----
>  mm/slab_common.c      | 26 ++++++++++----------------
>  2 files changed, 19 insertions(+), 21 deletions(-)
> 
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 7502f03c807c..fc4b22916587 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
>  	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
>  }
>  
> -/* Check that ksize() makes the whole object accessible. */
> +/* Check that ksize() does NOT unpoison whole object. */
>  static void ksize_unpoisons_memory(struct kunit *test)
>  {
>  	char *ptr;
> @@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
>  
>  	ptr = kmalloc(size, GFP_KERNEL);
>  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
>  	real_size = ksize(ptr);
> +	KUNIT_EXPECT_GT(test, real_size, size);
>  
>  	OPTIMIZER_HIDE_VAR(ptr);
>  
> -	/* This access shouldn't trigger a KASAN report. */
> -	ptr[size] = 'x';
> +	/* These accesses shouldn't trigger a KASAN report. */
> +	ptr[0] = 'x';
> +	ptr[size - 1] = 'x';
>  
> -	/* This one must. */
> -	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> +	/* These must trigger a KASAN report. */
> +	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> +	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
>  
>  	kfree(ptr);
>  }
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8276022f0da4..27caa57af070 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1335,11 +1335,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>  	void *ret;
>  	size_t ks;
>  
> -	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
> +	/* Check for double-free before calling ksize. */
>  	if (likely(!ZERO_OR_NULL_PTR(p))) {
>  		if (!kasan_check_byte(p))
>  			return NULL;
> -		ks = kfence_ksize(p) ?: __ksize(p);
> +		ks = ksize(p);
>  	} else
>  		ks = 0;
>  
> @@ -1407,21 +1407,21 @@ void kfree_sensitive(const void *p)
>  	void *mem = (void *)p;
>  
>  	ks = ksize(mem);
> -	if (ks)
> +	if (ks) {
> +		kasan_unpoison_range(mem, ks);
>  		memzero_explicit(mem, ks);
> +	}
>  	kfree(mem);
>  }
>  EXPORT_SYMBOL(kfree_sensitive);
>  
>  size_t ksize(const void *objp)
>  {
> -	size_t size;
> -
>  	/*
> -	 * We need to first check that the pointer to the object is valid, and
> -	 * only then unpoison the memory. The report printed from ksize() is
> -	 * more useful, then when it's printed later when the behaviour could
> -	 * be undefined due to a potential use-after-free or double-free.
> +	 * We need to first check that the pointer to the object is valid.
> +	 * The KASAN report printed from ksize() is more useful, then when
> +	 * it's printed later when the behaviour could be undefined due to
> +	 * a potential use-after-free or double-free.
>  	 *
>  	 * We use kasan_check_byte(), which is supported for the hardware
>  	 * tag-based KASAN mode, unlike kasan_check_read/write().
> @@ -1435,13 +1435,7 @@ size_t ksize(const void *objp)
>  	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
>  		return 0;
>  
> -	size = kfence_ksize(objp) ?: __ksize(objp);
> -	/*
> -	 * We assume that ksize callers could use whole allocated area,
> -	 * so we need to unpoison this area.
> -	 */
> -	kasan_unpoison_range(objp, size);
> -	return size;
> +	return kfence_ksize(objp) ?: __ksize(objp);
>  }
>  EXPORT_SYMBOL(ksize);
>
  
Kees Cook Nov. 18, 2022, 5:11 p.m. UTC | #2
On Fri, Nov 18, 2022 at 11:32:36AM +0100, Vlastimil Babka wrote:
> On 11/18/22 04:56, Kees Cook wrote:
> > With all "silently resizing" callers of ksize() refactored, remove the
> 
> At cursory look seems it's true now in -next (but not mainline?) can you
> confirm?

Almost, yes. I realized there is 1 case in the BPF verifier that
remains. (I thought it was picked up, but only a prereq patch was.) I'm
going to resend that one today, but I would expect it to be picked
up soon. (But, yes, definitely not for mainline.)

> That would probably be safe enough to have slab.git expose this to -next now
> and time a PR appropriately in the next merge window?

Possibly. I suspect syzkaller might trip KASAN on any larger BPF tests
until I get the last one landed. And if you don't want to do the timing
of the PR, I can carry this patch in my hardening tree, since I already
have to do a two-part early/late-merge-window PR there.
  
Vlastimil Babka Nov. 20, 2022, 4:50 p.m. UTC | #3
On 11/18/22 18:11, Kees Cook wrote:
> On Fri, Nov 18, 2022 at 11:32:36AM +0100, Vlastimil Babka wrote:
>> On 11/18/22 04:56, Kees Cook wrote:
>> > With all "silently resizing" callers of ksize() refactored, remove the
>> 
>> At cursory look seems it's true now in -next (but not mainline?) can you
>> confirm?
> 
> Almost, yes. I realized there is 1 case in the BPF verifier that
> remains. (I thought it was picked up, but only a prereq patch was.) I'm
> going to resend that one today, but I would expect it to be picked
> up soon. (But, yes, definitely not for mainline.)
> 
>> That would probably be safe enough to have slab.git expose this to -next now
>> and time a PR appropriately in the next merge window?
> 
> Possibly. I suspect syzkaller might trip KASAN on any larger BPF tests
> until I get the last one landed. And if you don't want to do the timing
> of the PR, I can carry this patch in my hardening tree, since I already
> have to do a two-part early/late-merge-window PR there.

OK I'm fine with you doing that, there's my ack already, hopefully Andrey is
now also happy :)

Vlastimil
  
David Rientjes Nov. 23, 2022, 1:30 a.m. UTC | #4
On Thu, 17 Nov 2022, Kees Cook wrote:

> With all "silently resizing" callers of ksize() refactored, remove the
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
> 
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
> 
> Adjust the KUnit tests to match the new ksize() behavior.
> 
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: linux-mm@kvack.org
> Cc: kasan-dev@googlegroups.com
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Kees Cook <keescook@chromium.org>

Acked-by: David Rientjes <rientjes@google.com>
  
Andrey Konovalov Nov. 26, 2022, 5:04 p.m. UTC | #5
On Fri, Nov 18, 2022 at 4:57 AM Kees Cook <keescook@chromium.org> wrote:
>
> With all "silently resizing" callers of ksize() refactored, remove the
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
>
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
>
> Adjust the KUnit tests to match the new ksize() behavior.
>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: linux-mm@kvack.org
> Cc: kasan-dev@googlegroups.com
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
> v2:
> - improve kunit test precision (andreyknvl)
> - add Ack (vbabka)
> v1: https://lore.kernel.org/all/20221022180455.never.023-kees@kernel.org
> ---
>  mm/kasan/kasan_test.c | 14 +++++++++-----
>  mm/slab_common.c      | 26 ++++++++++----------------
>  2 files changed, 19 insertions(+), 21 deletions(-)
>
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 7502f03c807c..fc4b22916587 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
>         KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
>  }
>
> -/* Check that ksize() makes the whole object accessible. */
> +/* Check that ksize() does NOT unpoison whole object. */
>  static void ksize_unpoisons_memory(struct kunit *test)
>  {
>         char *ptr;
> @@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
>
>         ptr = kmalloc(size, GFP_KERNEL);
>         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
>         real_size = ksize(ptr);
> +       KUNIT_EXPECT_GT(test, real_size, size);
>
>         OPTIMIZER_HIDE_VAR(ptr);
>
> -       /* This access shouldn't trigger a KASAN report. */
> -       ptr[size] = 'x';
> +       /* These accesses shouldn't trigger a KASAN report. */
> +       ptr[0] = 'x';
> +       ptr[size - 1] = 'x';
>
> -       /* This one must. */
> -       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> +       /* These must trigger a KASAN report. */
> +       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> +       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);

Hi Kees,

I just realized there's an issue here with the tag-based modes, as
they align the unpoisoned area to 16 bytes.

One solution would be to change the allocation size to 128 -
KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
so that the last 16-byte granule won't get unpoisoned for the
tag-based modes. And then check that the ptr[size] access fails only
for the Generic mode.

Thanks!

>
>         kfree(ptr);
>  }
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8276022f0da4..27caa57af070 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1335,11 +1335,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>         void *ret;
>         size_t ks;
>
> -       /* Don't use instrumented ksize to allow precise KASAN poisoning. */
> +       /* Check for double-free before calling ksize. */
>         if (likely(!ZERO_OR_NULL_PTR(p))) {
>                 if (!kasan_check_byte(p))
>                         return NULL;
> -               ks = kfence_ksize(p) ?: __ksize(p);
> +               ks = ksize(p);
>         } else
>                 ks = 0;
>
> @@ -1407,21 +1407,21 @@ void kfree_sensitive(const void *p)
>         void *mem = (void *)p;
>
>         ks = ksize(mem);
> -       if (ks)
> +       if (ks) {
> +               kasan_unpoison_range(mem, ks);
>                 memzero_explicit(mem, ks);
> +       }
>         kfree(mem);
>  }
>  EXPORT_SYMBOL(kfree_sensitive);
>
>  size_t ksize(const void *objp)
>  {
> -       size_t size;
> -
>         /*
> -        * We need to first check that the pointer to the object is valid, and
> -        * only then unpoison the memory. The report printed from ksize() is
> -        * more useful, then when it's printed later when the behaviour could
> -        * be undefined due to a potential use-after-free or double-free.
> +        * We need to first check that the pointer to the object is valid.
> +        * The KASAN report printed from ksize() is more useful, then when
> +        * it's printed later when the behaviour could be undefined due to
> +        * a potential use-after-free or double-free.
>          *
>          * We use kasan_check_byte(), which is supported for the hardware
>          * tag-based KASAN mode, unlike kasan_check_read/write().
> @@ -1435,13 +1435,7 @@ size_t ksize(const void *objp)
>         if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
>                 return 0;
>
> -       size = kfence_ksize(objp) ?: __ksize(objp);
> -       /*
> -        * We assume that ksize callers could use whole allocated area,
> -        * so we need to unpoison this area.
> -        */
> -       kasan_unpoison_range(objp, size);
> -       return size;
> +       return kfence_ksize(objp) ?: __ksize(objp);
>  }
>  EXPORT_SYMBOL(ksize);
>
> --
> 2.34.1
>
  
Kees Cook Nov. 27, 2022, 12:55 a.m. UTC | #6
On Sat, Nov 26, 2022 at 06:04:39PM +0100, Andrey Konovalov wrote:
> On Fri, Nov 18, 2022 at 4:57 AM Kees Cook <keescook@chromium.org> wrote:
> >
> > With all "silently resizing" callers of ksize() refactored, remove the
> > logic in ksize() that would allow it to be used to effectively change
> > the size of an allocation (bypassing __alloc_size hints, etc). Users
> > wanting this feature need to either use kmalloc_size_roundup() before an
> > allocation, or use krealloc() directly.
> >
> > For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> > some of the partially open-coded ksize() in __do_krealloc with ksize()
> > now that it doesn't perform unpoisoning.
> >
> > Adjust the KUnit tests to match the new ksize() behavior.
> >
> > Cc: Andrey Konovalov <andreyknvl@gmail.com>
> > Cc: Christoph Lameter <cl@linux.com>
> > Cc: Pekka Enberg <penberg@kernel.org>
> > Cc: David Rientjes <rientjes@google.com>
> > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Roman Gushchin <roman.gushchin@linux.dev>
> > Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> > Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> > Cc: Alexander Potapenko <glider@google.com>
> > Cc: Dmitry Vyukov <dvyukov@google.com>
> > Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> > Cc: linux-mm@kvack.org
> > Cc: kasan-dev@googlegroups.com
> > Acked-by: Vlastimil Babka <vbabka@suse.cz>
> > Signed-off-by: Kees Cook <keescook@chromium.org>
> > ---
> > v2:
> > - improve kunit test precision (andreyknvl)
> > - add Ack (vbabka)
> > v1: https://lore.kernel.org/all/20221022180455.never.023-kees@kernel.org
> > ---
> >  mm/kasan/kasan_test.c | 14 +++++++++-----
> >  mm/slab_common.c      | 26 ++++++++++----------------
> >  2 files changed, 19 insertions(+), 21 deletions(-)
> >
> > diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> > index 7502f03c807c..fc4b22916587 100644
> > --- a/mm/kasan/kasan_test.c
> > +++ b/mm/kasan/kasan_test.c
> > @@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
> >         KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
> >  }
> >
> > -/* Check that ksize() makes the whole object accessible. */
> > +/* Check that ksize() does NOT unpoison whole object. */
> >  static void ksize_unpoisons_memory(struct kunit *test)
> >  {
> >         char *ptr;
> > @@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
> >
> >         ptr = kmalloc(size, GFP_KERNEL);
> >         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> > +
> >         real_size = ksize(ptr);
> > +       KUNIT_EXPECT_GT(test, real_size, size);
> >
> >         OPTIMIZER_HIDE_VAR(ptr);
> >
> > -       /* This access shouldn't trigger a KASAN report. */
> > -       ptr[size] = 'x';
> > +       /* These accesses shouldn't trigger a KASAN report. */
> > +       ptr[0] = 'x';
> > +       ptr[size - 1] = 'x';
> >
> > -       /* This one must. */
> > -       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> > +       /* These must trigger a KASAN report. */
> > +       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> > +       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
> 
> Hi Kees,
> 
> I just realized there's an issue here with the tag-based modes, as
> they align the unpoisoned area to 16 bytes.
> 
> One solution would be to change the allocation size to 128 -
> KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
> so that the last 16-byte granule won't get unpoisoned for the
> tag-based modes. And then check that the ptr[size] access fails only
> for the Generic mode.

Ah! Good point. Are you able to send a patch? I suspect you know exactly
what to change; it might take me a bit longer to double-check all of
those details.

-Kees
  
Andrey Konovalov Nov. 30, 2022, 2:11 p.m. UTC | #7
On Sun, Nov 27, 2022 at 1:55 AM Kees Cook <keescook@chromium.org> wrote:
>
> > I just realized there's an issue here with the tag-based modes, as
> > they align the unpoisoned area to 16 bytes.
> >
> > One solution would be to change the allocation size to 128 -
> > KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
> > so that the last 16-byte granule won't get unpoisoned for the
> > tag-based modes. And then check that the ptr[size] access fails only
> > for the Generic mode.
>
> Ah! Good point. Are you able to send a patch? I suspect you know exactly
> what to change; it might take me a bit longer to double-check all of
> those details.

Let's do it like this:

size_t size = 128 - KASAN_GRANULE_SIZE - 5, real_size.

...

/* These must trigger a KASAN report. */
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
    KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);

Thanks!
  
Kees Cook Dec. 1, 2022, 4:51 p.m. UTC | #8
On Wed, Nov 30, 2022 at 03:11:35PM +0100, Andrey Konovalov wrote:
> On Sun, Nov 27, 2022 at 1:55 AM Kees Cook <keescook@chromium.org> wrote:
> >
> > > I just realized there's an issue here with the tag-based modes, as
> > > they align the unpoisoned area to 16 bytes.
> > >
> > > One solution would be to change the allocation size to 128 -
> > > KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
> > > so that the last 16-byte granule won't get unpoisoned for the
> > > tag-based modes. And then check that the ptr[size] access fails only
> > > for the Generic mode.
> >
> > Ah! Good point. Are you able to send a patch? I suspect you know exactly
> > what to change; it might take me a bit longer to double-check all of
> > those details.
> 
> Let's do it like this:
> 
> size_t size = 128 - KASAN_GRANULE_SIZE - 5, real_size.
> 
> ...
> 
> /* These must trigger a KASAN report. */
> if (IS_ENABLED(CONFIG_KASAN_GENERIC))
>     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
> KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);

Done, and tested! Thanks :)
  

Patch

diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 7502f03c807c..fc4b22916587 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -821,7 +821,7 @@  static void kasan_global_oob_left(struct kunit *test)
 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
 }
 
-/* Check that ksize() makes the whole object accessible. */
+/* Check that ksize() does NOT unpoison whole object. */
 static void ksize_unpoisons_memory(struct kunit *test)
 {
 	char *ptr;
@@ -829,15 +829,19 @@  static void ksize_unpoisons_memory(struct kunit *test)
 
 	ptr = kmalloc(size, GFP_KERNEL);
 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
 	real_size = ksize(ptr);
+	KUNIT_EXPECT_GT(test, real_size, size);
 
 	OPTIMIZER_HIDE_VAR(ptr);
 
-	/* This access shouldn't trigger a KASAN report. */
-	ptr[size] = 'x';
+	/* These accesses shouldn't trigger a KASAN report. */
+	ptr[0] = 'x';
+	ptr[size - 1] = 'x';
 
-	/* This one must. */
-	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
+	/* These must trigger a KASAN report. */
+	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
 
 	kfree(ptr);
 }
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8276022f0da4..27caa57af070 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1335,11 +1335,11 @@  __do_krealloc(const void *p, size_t new_size, gfp_t flags)
 	void *ret;
 	size_t ks;
 
-	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
+	/* Check for double-free before calling ksize. */
 	if (likely(!ZERO_OR_NULL_PTR(p))) {
 		if (!kasan_check_byte(p))
 			return NULL;
-		ks = kfence_ksize(p) ?: __ksize(p);
+		ks = ksize(p);
 	} else
 		ks = 0;
 
@@ -1407,21 +1407,21 @@  void kfree_sensitive(const void *p)
 	void *mem = (void *)p;
 
 	ks = ksize(mem);
-	if (ks)
+	if (ks) {
+		kasan_unpoison_range(mem, ks);
 		memzero_explicit(mem, ks);
+	}
 	kfree(mem);
 }
 EXPORT_SYMBOL(kfree_sensitive);
 
 size_t ksize(const void *objp)
 {
-	size_t size;
-
 	/*
-	 * We need to first check that the pointer to the object is valid, and
-	 * only then unpoison the memory. The report printed from ksize() is
-	 * more useful, then when it's printed later when the behaviour could
-	 * be undefined due to a potential use-after-free or double-free.
+	 * We need to first check that the pointer to the object is valid.
+	 * The KASAN report printed from ksize() is more useful, then when
+	 * it's printed later when the behaviour could be undefined due to
+	 * a potential use-after-free or double-free.
 	 *
 	 * We use kasan_check_byte(), which is supported for the hardware
 	 * tag-based KASAN mode, unlike kasan_check_read/write().
@@ -1435,13 +1435,7 @@  size_t ksize(const void *objp)
 	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
 		return 0;
 
-	size = kfence_ksize(objp) ?: __ksize(objp);
-	/*
-	 * We assume that ksize callers could use whole allocated area,
-	 * so we need to unpoison this area.
-	 */
-	kasan_unpoison_range(objp, size);
-	return size;
+	return kfence_ksize(objp) ?: __ksize(objp);
 }
 EXPORT_SYMBOL(ksize);