ftrace: Add ftrace_page to list after the index is calculated

Message ID 20230308070844.58180-1-chenzhongjin@huawei.com
State New
Headers
Series ftrace: Add ftrace_page to list after the index is calculated |

Commit Message

Chen Zhongjin March 8, 2023, 7:08 a.m. UTC
  KASAN reported follow problem:

 BUG: KASAN: use-after-free in lookup_rec
 Read of size 8 at addr ffff000199270ff0 by task modprobe

 CPU: 2 Comm: modprobe
 Hardware name: QEMU KVM Virtual Machine
 Call trace:
  kasan_report
  __asan_load8
  lookup_rec
  ftrace_location
  arch_check_ftrace_location
  check_kprobe_address_safe
  register_kprobe.part.0
  register_kprobe

This happened when lookup_rec accessing pg->records[pg->index - 1].
The accessed position is not a valid records address, it has -16 offset
to the last allocated records page.

In ftrace_process_locs, ftrace_page will be added to ftrace_pages_start
list fist, then its pg->index will be calculated. Before pg->index++,
pg->index == 0.

lookup_rec iterates the whole list if it doesn't find a record. When
there is a page with pg->index == 0, getting pg->records[-1].ip causes
this problem.

Add ftrace_page to the ftrace_pages_start list after pg->index is
calculated, to fix this.

Fixes: 3208230983a0 ("ftrace: Remove usage of "freed" records")
Signed-off-by: Chen Zhongjin <chenzhongjin@huawei.com>
---
 kernel/trace/ftrace.c | 35 ++++++++++++++++++-----------------
 1 file changed, 18 insertions(+), 17 deletions(-)
  

Comments

Steven Rostedt March 8, 2023, 2:53 p.m. UTC | #1
On Wed, 8 Mar 2023 15:08:44 +0800
Chen Zhongjin <chenzhongjin@huawei.com> wrote:

> Fixes: 3208230983a0 ("ftrace: Remove usage of "freed" records")
> Signed-off-by: Chen Zhongjin <chenzhongjin@huawei.com>
> ---
>  kernel/trace/ftrace.c | 35 ++++++++++++++++++-----------------
>  1 file changed, 18 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index 29baa97d0d53..a258c48ad91e 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -6804,28 +6804,14 @@ static int ftrace_process_locs(struct module *mod,
>  
>  	mutex_lock(&ftrace_lock);
>  
> +	if (WARN_ON(mod && !ftrace_pages))

This is wrong. I have no issues with mod && !ftrace_pages. That's not the
same as !mod && ftrace_pages, which I want to warn on.

The mod && !ftrace_pages means that the system had no ftrace functions but
a module does. Strange, but not something to warn about.

The !mod && ftrace_pages, means that this is setting up the builtin ftrace
pages, but the ftrace_pages already exist. As the builtin needs to only be
done once, this is a bug.

> +		goto out;
> +
>  	/*
>  	 * Core and each module needs their own pages, as
>  	 * modules will free them when they are removed.
>  	 * Force a new page to be allocated for modules.
>  	 */
> -	if (!mod) {
> -		WARN_ON(ftrace_pages || ftrace_pages_start);
> -		/* First initialization */
> -		ftrace_pages = ftrace_pages_start = start_pg;

Since the above only happens at boot up, and before anything can call into
it, this is not a problem.

> -	} else {
> -		if (!ftrace_pages)
> -			goto out;
> -
> -		if (WARN_ON(ftrace_pages->next)) {
> -			/* Hmm, we have free pages? */
> -			while (ftrace_pages->next)
> -				ftrace_pages = ftrace_pages->next;
> -		}
> -
> -		ftrace_pages->next = start_pg;

Basically, what you are saying is that once we add ftrace_pages->next to
point to the new start_pg, it becomes visible to others and that could be a
problem. And moving this code around is not really going to solve that, as
then we would need to add memory barriers.

> -	}
> -
>  	p = start;
>  	pg = start_pg;
>  	while (p < end) {
> @@ -6855,6 +6841,21 @@ static int ftrace_process_locs(struct module *mod,
>  	/* We should have used all pages */
>  	WARN_ON(pg->next);
>  
> +	/* Add pages to ftrace_pages list */
> +	if (!mod) {
> +		WARN_ON(ftrace_pages || ftrace_pages_start);
> +		/* First initialization */
> +		ftrace_pages_start = start_pg;
> +	} else {
> +		if (WARN_ON(ftrace_pages->next)) {
> +			/* Hmm, we have free pages? */
> +			while (ftrace_pages->next)
> +				ftrace_pages = ftrace_pages->next;
> +		}
> +
> +		ftrace_pages->next = start_pg;
> +	}
> +
>  	/* Assign the last page to ftrace_pages */
>  	ftrace_pages = pg;
>  

Why not just test for it?

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 29baa97d0d53..9b2803c7a18f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
 	key.flags = end;	/* overload flags, as it is unsigned long */
 
 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
-		if (end < pg->records[0].ip ||
+		if (pg->index == 0 ||
+		    end < pg->records[0].ip ||
 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
 			continue;
 		rec = bsearch(&key, pg->records, pg->index,


-- Steve
  
Chen Zhongjin March 9, 2023, 6:47 a.m. UTC | #2
On 2023/3/8 22:53, Steven Rostedt wrote:
>
>> -	} else {
>> -		if (!ftrace_pages)
>> -			goto out;
>> -
>> -		if (WARN_ON(ftrace_pages->next)) {
>> -			/* Hmm, we have free pages? */
>> -			while (ftrace_pages->next)
>> -				ftrace_pages = ftrace_pages->next;
>> -		}
>> -
>> -		ftrace_pages->next = start_pg;
> Basically, what you are saying is that once we add ftrace_pages->next to
> point to the new start_pg, it becomes visible to others and that could be a
> problem. And moving this code around is not really going to solve that, as
> then we would need to add memory barriers.
>
>> -	}
>> -
>>   	p = start;
>>   	pg = start_pg;
>>   	while (p < end) {
>> @@ -6855,6 +6841,21 @@ static int ftrace_process_locs(struct module *mod,
>>   	/* We should have used all pages */
>>   	WARN_ON(pg->next);
>>   
>> +	/* Add pages to ftrace_pages list */
>> +	if (!mod) {
>> +		WARN_ON(ftrace_pages || ftrace_pages_start);
>> +		/* First initialization */
>> +		ftrace_pages_start = start_pg;
>> +	} else {
>> +		if (WARN_ON(ftrace_pages->next)) {
>> +			/* Hmm, we have free pages? */
>> +			while (ftrace_pages->next)
>> +				ftrace_pages = ftrace_pages->next;
>> +		}
>> +
>> +		ftrace_pages->next = start_pg;
>> +	}
>> +
>>   	/* Assign the last page to ftrace_pages */
>>   	ftrace_pages = pg;
>>   
> Why not just test for it?
>
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index 29baa97d0d53..9b2803c7a18f 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
>   	key.flags = end;	/* overload flags, as it is unsigned long */
>   
>   	for (pg = ftrace_pages_start; pg; pg = pg->next) {
> -		if (end < pg->records[0].ip ||
> +		if (pg->index == 0 ||
> +		    end < pg->records[0].ip ||
>   		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
>   			continue;
>   		rec = bsearch(&key, pg->records, pg->index,
>
>
> -- Steve
Thanks for review!

At first I'm worried that it will cause lookup_rec to return an 
incorrect result
if it issearching a module address going to be added.
But now I found that records are added at MODULE_STATE_UNFORMED, the module
address won't  be searched at this point in any case.

Let's do it this way. I'll send another patch.
  

Patch

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 29baa97d0d53..a258c48ad91e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6804,28 +6804,14 @@  static int ftrace_process_locs(struct module *mod,
 
 	mutex_lock(&ftrace_lock);
 
+	if (WARN_ON(mod && !ftrace_pages))
+		goto out;
+
 	/*
 	 * Core and each module needs their own pages, as
 	 * modules will free them when they are removed.
 	 * Force a new page to be allocated for modules.
 	 */
-	if (!mod) {
-		WARN_ON(ftrace_pages || ftrace_pages_start);
-		/* First initialization */
-		ftrace_pages = ftrace_pages_start = start_pg;
-	} else {
-		if (!ftrace_pages)
-			goto out;
-
-		if (WARN_ON(ftrace_pages->next)) {
-			/* Hmm, we have free pages? */
-			while (ftrace_pages->next)
-				ftrace_pages = ftrace_pages->next;
-		}
-
-		ftrace_pages->next = start_pg;
-	}
-
 	p = start;
 	pg = start_pg;
 	while (p < end) {
@@ -6855,6 +6841,21 @@  static int ftrace_process_locs(struct module *mod,
 	/* We should have used all pages */
 	WARN_ON(pg->next);
 
+	/* Add pages to ftrace_pages list */
+	if (!mod) {
+		WARN_ON(ftrace_pages || ftrace_pages_start);
+		/* First initialization */
+		ftrace_pages_start = start_pg;
+	} else {
+		if (WARN_ON(ftrace_pages->next)) {
+			/* Hmm, we have free pages? */
+			while (ftrace_pages->next)
+				ftrace_pages = ftrace_pages->next;
+		}
+
+		ftrace_pages->next = start_pg;
+	}
+
 	/* Assign the last page to ftrace_pages */
 	ftrace_pages = pg;