x86/kasan: map shadow for percpu pages on demand

Message ID 20221027213105.4905-1-ryabinin.a.a@gmail.com
State New
Headers
Series x86/kasan: map shadow for percpu pages on demand |

Commit Message

Andrey Ryabinin Oct. 27, 2022, 9:31 p.m. UTC
  KASAN maps shadow for the entire CPU-entry-area:
  [CPU_ENTRY_AREA_BASE, CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE]

This explodes after commit 1248fb6a8201 ("x86/mm: Randomize per-cpu entry area")
since it increases CPU_ENTRY_AREA_MAP_SIZE to 512 GB and KASAN fails
to allocate shadow for such big area.

Fix this by allocating KASAN shadow only for really used cpu entry area
addresses mapped by cea_map_percpu_pages()

Fixes: 1248fb6a8201 ("x86/mm: Randomize per-cpu entry area")
Reported-by: kernel test robot <yujie.liu@intel.com>
Link: https://lore.kernel.org/r/202210241508.2e203c3d-yujie.liu@intel.com
Tested-by: Yujie Liu <yujie.liu@intel.com>
Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
---
 arch/x86/include/asm/kasan.h |  3 +++
 arch/x86/mm/cpu_entry_area.c |  8 +++++++-
 arch/x86/mm/kasan_init_64.c  | 15 ++++++++++++---
 3 files changed, 22 insertions(+), 4 deletions(-)
  

Comments

Yin Fengwei Oct. 28, 2022, 2:51 a.m. UTC | #1
Hi Andrey,

On 10/28/2022 5:31 AM, Andrey Ryabinin wrote:
> KASAN maps shadow for the entire CPU-entry-area:
>   [CPU_ENTRY_AREA_BASE, CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE]
> 
> This explodes after commit 1248fb6a8201 ("x86/mm: Randomize per-cpu entry area")
> since it increases CPU_ENTRY_AREA_MAP_SIZE to 512 GB and KASAN fails
> to allocate shadow for such big area.
> 
> Fix this by allocating KASAN shadow only for really used cpu entry area
> addresses mapped by cea_map_percpu_pages()
> 
> Fixes: 1248fb6a8201 ("x86/mm: Randomize per-cpu entry area")
> Reported-by: kernel test robot <yujie.liu@intel.com>
> Link: https://lore.kernel.org/r/202210241508.2e203c3d-yujie.liu@intel.com
> Tested-by: Yujie Liu <yujie.liu@intel.com>
> Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> ---
>  arch/x86/include/asm/kasan.h |  3 +++
>  arch/x86/mm/cpu_entry_area.c |  8 +++++++-
>  arch/x86/mm/kasan_init_64.c  | 15 ++++++++++++---
>  3 files changed, 22 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
> index 13e70da38bed..de75306b932e 100644
> --- a/arch/x86/include/asm/kasan.h
> +++ b/arch/x86/include/asm/kasan.h
> @@ -28,9 +28,12 @@
>  #ifdef CONFIG_KASAN
>  void __init kasan_early_init(void);
>  void __init kasan_init(void);
> +void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
>  #else
>  static inline void kasan_early_init(void) { }
>  static inline void kasan_init(void) { }
> +static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
> +						   int nid) { }
>  #endif
>  
>  #endif
> diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
> index ad1f750517a1..ac2e952186c3 100644
> --- a/arch/x86/mm/cpu_entry_area.c
> +++ b/arch/x86/mm/cpu_entry_area.c
> @@ -9,6 +9,7 @@
>  #include <asm/cpu_entry_area.h>
>  #include <asm/fixmap.h>
>  #include <asm/desc.h>
> +#include <asm/kasan.h>
>  
>  static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
>  
> @@ -91,8 +92,13 @@ void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
>  static void __init
>  cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
>  {
> +	phys_addr_t pa = per_cpu_ptr_to_phys(ptr);
> +
> +	kasan_populate_shadow_for_vaddr(cea_vaddr, pages * PAGE_SIZE,
> +					early_pfn_to_nid(PFN_DOWN(pa)));
> +
>  	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
> -		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
> +		cea_set_pte(cea_vaddr, pa, prot);
>  }
>  
>  static void __init percpu_setup_debug_store(unsigned int cpu)
> diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
> index e7b9b464a82f..d1416926ad52 100644
> --- a/arch/x86/mm/kasan_init_64.c
> +++ b/arch/x86/mm/kasan_init_64.c
> @@ -316,6 +316,18 @@ void __init kasan_early_init(void)
>  	kasan_map_early_shadow(init_top_pgt);
>  }
>  
> +void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
> +{
> +	unsigned long shadow_start, shadow_end;
> +
> +	shadow_start = (unsigned long)kasan_mem_to_shadow(va);
> +	shadow_start = round_down(shadow_start, PAGE_SIZE);
> +	shadow_end = (unsigned long)kasan_mem_to_shadow(va + size);
> +	shadow_end = round_up(shadow_end, PAGE_SIZE);
> +
> +	kasan_populate_shadow(shadow_start, shadow_end, nid);
> +}
> +
>  void __init kasan_init(void)
>  {
>  	int i;
> @@ -393,9 +405,6 @@ void __init kasan_init(void)
>  		kasan_mem_to_shadow((void *)VMALLOC_END + 1),
>  		shadow_cpu_entry_begin);
>  
> -	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
> -			      (unsigned long)shadow_cpu_entry_end, 0);
> -
There will be address in the range (shadow_cpu_entry_begin, shadow_cpu_entry_end)
which has no KASAN shadow mapping populated after the patch. Not sure whether
it could be a problem. Thanks.

Regards
Yin, Fengwei

>  	kasan_populate_early_shadow(shadow_cpu_entry_end,
>  			kasan_mem_to_shadow((void *)__START_KERNEL_map));
>
  
Andrey Ryabinin Oct. 28, 2022, 2:20 p.m. UTC | #2
On 10/28/22 05:51, Yin, Fengwei wrote:
> Hi Andrey,
> 

>>  void __init kasan_init(void)
>>  {
>>  	int i;
>> @@ -393,9 +405,6 @@ void __init kasan_init(void)
>>  		kasan_mem_to_shadow((void *)VMALLOC_END + 1),
>>  		shadow_cpu_entry_begin);
>>  
>> -	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
>> -			      (unsigned long)shadow_cpu_entry_end, 0);
>> -
> There will be address in the range (shadow_cpu_entry_begin, shadow_cpu_entry_end)
> which has no KASAN shadow mapping populated after the patch. Not sure whether
> it could be a problem. Thanks.
> 


This shouldn't be a problem. It's vital to have shadow *only* for addresses with mapped memory.
Shadow address accessed only if the address itself accessed. So the difference between not having shadow
for address with no mapping vs having it, is whether we crash on access to KASAN shadow or crash few
instructions later on access to the address itself.
  
Yin Fengwei Oct. 31, 2022, 12:15 a.m. UTC | #3
On 10/28/2022 10:20 PM, Andrey Ryabinin wrote:
> 
> 
> On 10/28/22 05:51, Yin, Fengwei wrote:
>> Hi Andrey,
>>
> 
>>>  void __init kasan_init(void)
>>>  {
>>>  	int i;
>>> @@ -393,9 +405,6 @@ void __init kasan_init(void)
>>>  		kasan_mem_to_shadow((void *)VMALLOC_END + 1),
>>>  		shadow_cpu_entry_begin);
>>>  
>>> -	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
>>> -			      (unsigned long)shadow_cpu_entry_end, 0);
>>> -
>> There will be address in the range (shadow_cpu_entry_begin, shadow_cpu_entry_end)
>> which has no KASAN shadow mapping populated after the patch. Not sure whether
>> it could be a problem. Thanks.
>>
> 
> 
> This shouldn't be a problem. It's vital to have shadow *only* for addresses with mapped memory.
> Shadow address accessed only if the address itself accessed. So the difference between not having shadow
> for address with no mapping vs having it, is whether we crash on access to KASAN shadow or crash few
> instructions later on access to the address itself.

Thanks for clarification.

Regards
Yin, Fengwei
  

Patch

diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 13e70da38bed..de75306b932e 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -28,9 +28,12 @@ 
 #ifdef CONFIG_KASAN
 void __init kasan_early_init(void);
 void __init kasan_init(void);
+void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
 #else
 static inline void kasan_early_init(void) { }
 static inline void kasan_init(void) { }
+static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
+						   int nid) { }
 #endif
 
 #endif
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index ad1f750517a1..ac2e952186c3 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -9,6 +9,7 @@ 
 #include <asm/cpu_entry_area.h>
 #include <asm/fixmap.h>
 #include <asm/desc.h>
+#include <asm/kasan.h>
 
 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
 
@@ -91,8 +92,13 @@  void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
 static void __init
 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
 {
+	phys_addr_t pa = per_cpu_ptr_to_phys(ptr);
+
+	kasan_populate_shadow_for_vaddr(cea_vaddr, pages * PAGE_SIZE,
+					early_pfn_to_nid(PFN_DOWN(pa)));
+
 	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
-		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
+		cea_set_pte(cea_vaddr, pa, prot);
 }
 
 static void __init percpu_setup_debug_store(unsigned int cpu)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e7b9b464a82f..d1416926ad52 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -316,6 +316,18 @@  void __init kasan_early_init(void)
 	kasan_map_early_shadow(init_top_pgt);
 }
 
+void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
+{
+	unsigned long shadow_start, shadow_end;
+
+	shadow_start = (unsigned long)kasan_mem_to_shadow(va);
+	shadow_start = round_down(shadow_start, PAGE_SIZE);
+	shadow_end = (unsigned long)kasan_mem_to_shadow(va + size);
+	shadow_end = round_up(shadow_end, PAGE_SIZE);
+
+	kasan_populate_shadow(shadow_start, shadow_end, nid);
+}
+
 void __init kasan_init(void)
 {
 	int i;
@@ -393,9 +405,6 @@  void __init kasan_init(void)
 		kasan_mem_to_shadow((void *)VMALLOC_END + 1),
 		shadow_cpu_entry_begin);
 
-	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
-			      (unsigned long)shadow_cpu_entry_end, 0);
-
 	kasan_populate_early_shadow(shadow_cpu_entry_end,
 			kasan_mem_to_shadow((void *)__START_KERNEL_map));