[05/11] x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0

Message ID d89b9ebb592a2436a1969304f826ddf3866682bc.1706307364.git.thomas.lendacky@amd.com
State New
Headers
Series Provide SEV-SNP support for running under an SVSM |

Commit Message

Tom Lendacky Jan. 26, 2024, 10:15 p.m. UTC
  The PVALIDATE instruction can only be performed at VMPL0. An SVSM will
be present when running at VMPL1 or a lower privilege level.

When an SVSM is present, use the SVSM_CORE_PVALIDATE call to perform
memory validation instead of issuing the PVALIDATE instruction directly.

The validation of a single 4K page is now explicitly identified as such
in the function name, pvalidate_4k_page(). The pvalidate_pages() function
is used for validating 1 or more pages at either 4K or 2M in size. Each
function, however, determines whether it can issue the PVALIDATE directly
or whether the SVSM needs to be invoked.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/boot/compressed/sev.c |  42 +++++++-
 arch/x86/include/asm/sev.h     |  22 +++++
 arch/x86/kernel/sev-shared.c   | 176 ++++++++++++++++++++++++++++++++-
 arch/x86/kernel/sev.c          |  25 +++--
 4 files changed, 247 insertions(+), 18 deletions(-)
  

Comments

Dionna Amalie Glaze Jan. 27, 2024, 12:59 a.m. UTC | #1
On Fri, Jan 26, 2024 at 2:18 PM Tom Lendacky <thomas.lendacky@amd.com> wrote:
>
> The PVALIDATE instruction can only be performed at VMPL0. An SVSM will
> be present when running at VMPL1 or a lower privilege level.
>
> When an SVSM is present, use the SVSM_CORE_PVALIDATE call to perform
> memory validation instead of issuing the PVALIDATE instruction directly.
>
> The validation of a single 4K page is now explicitly identified as such
> in the function name, pvalidate_4k_page(). The pvalidate_pages() function
> is used for validating 1 or more pages at either 4K or 2M in size. Each
> function, however, determines whether it can issue the PVALIDATE directly
> or whether the SVSM needs to be invoked.
>
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
>  arch/x86/boot/compressed/sev.c |  42 +++++++-
>  arch/x86/include/asm/sev.h     |  22 +++++
>  arch/x86/kernel/sev-shared.c   | 176 ++++++++++++++++++++++++++++++++-
>  arch/x86/kernel/sev.c          |  25 +++--
>  4 files changed, 247 insertions(+), 18 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
> index 5d2403914ceb..3fbb614c31e0 100644
> --- a/arch/x86/boot/compressed/sev.c
> +++ b/arch/x86/boot/compressed/sev.c
> @@ -38,6 +38,16 @@ static u8 vmpl __section(".data");
>  static u64 boot_svsm_caa_pa __section(".data");
>  static struct svsm_ca *boot_svsm_caa __section(".data");
>
> +static struct svsm_ca *__svsm_get_caa(void)
> +{
> +       return boot_svsm_caa;
> +}
> +
> +static u64 __svsm_get_caa_pa(void)
> +{
> +       return boot_svsm_caa_pa;
> +}
> +
>  /*
>   * Copy a version of this function here - insn-eval.c can't be used in
>   * pre-decompression code.
> @@ -135,6 +145,24 @@ static bool fault_in_kernel_space(unsigned long address)
>  /* Include code for early handlers */
>  #include "../../kernel/sev-shared.c"
>
> +static int svsm_protocol(struct svsm_call *call)
> +{
> +       struct ghcb *ghcb;
> +       int ret;
> +
> +       if (boot_ghcb)
> +               ghcb = boot_ghcb;
> +       else
> +               ghcb = NULL;
> +
> +       do {
> +               ret = ghcb ? __svsm_ghcb_protocol(ghcb, call)
> +                          : __svsm_msr_protocol(call);
> +       } while (ret == SVSM_ERR_BUSY);

Should this loop forever or eventually give up and panic?


> +
> +       return ret;
> +}
> +
>  bool sev_snp_enabled(void)
>  {
>         return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
> @@ -151,8 +179,8 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
>          * If private -> shared then invalidate the page before requesting the
>          * state change in the RMP table.
>          */
> -       if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
> -               sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
> +       if (op == SNP_PAGE_STATE_SHARED)
> +               pvalidate_4k_page(paddr, paddr, 0);
>
>         /* Issue VMGEXIT to change the page state in RMP table. */
>         sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
> @@ -167,8 +195,8 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
>          * Now that page state is changed in the RMP table, validate it so that it is
>          * consistent with the RMP entry.
>          */
> -       if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
> -               sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
> +       if (op == SNP_PAGE_STATE_PRIVATE)
> +               pvalidate_4k_page(paddr, paddr, 1);
>  }
>
>  void snp_set_page_private(unsigned long paddr)
> @@ -261,6 +289,12 @@ void sev_es_shutdown_ghcb(void)
>         if (!sev_es_check_cpu_features())
>                 error("SEV-ES CPU Features missing.");
>
> +       /*
> +        * Ensure that the boot GHCB isn't used for the PVALIDATE when running

Why the definite article? Which PVALIDATE is this referring to?

> +        * under an SVSM.
> +        */
> +       boot_ghcb = NULL;
> +
>         /*
>          * GHCB Page must be flushed from the cache and mapped encrypted again.
>          * Otherwise the running kernel will see strange cache effects when
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index 2f1e583769fc..dbd7fd041689 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -172,6 +172,27 @@ struct svsm_ca {
>  #define SVSM_ERR_INVALID_PARAMETER             0x80000005
>  #define SVSM_ERR_INVALID_REQUEST               0x80000006
>  #define SVSM_ERR_BUSY                          0x80000007
> +#define SVSM_PVALIDATE_FAIL_SIZEMISMATCH       0x80001006
> +
> +/*
> + * The SVSM PVALIDATE related structures
> + */
> +struct svsm_pvalidate_entry {
> +       u64 page_size           : 2,
> +           action              : 1,
> +           ignore_cf           : 1,
> +           rsvd                : 8,
> +           pfn                 : 52;
> +};
> +
> +struct svsm_pvalidate_call {
> +       u16 entries;
> +       u16 next;
> +
> +       u8 rsvd1[4];
> +
> +       struct svsm_pvalidate_entry entry[];
> +};
>
>  /*
>   * SVSM protocol structure
> @@ -192,6 +213,7 @@ struct svsm_call {
>
>  #define SVSM_CORE_CALL(x)              ((0ULL << 32) | (x))
>  #define SVSM_CORE_REMAP_CA             0
> +#define SVSM_CORE_PVALIDATE            1
>
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  extern void __sev_es_ist_enter(struct pt_regs *regs);
> diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
> index 7e9fa5d8889b..f26e872bc5d0 100644
> --- a/arch/x86/kernel/sev-shared.c
> +++ b/arch/x86/kernel/sev-shared.c
> @@ -81,6 +81,8 @@ static u32 cpuid_std_range_max __ro_after_init;
>  static u32 cpuid_hyp_range_max __ro_after_init;
>  static u32 cpuid_ext_range_max __ro_after_init;
>
> +static int svsm_protocol(struct svsm_call *call);
> +
>  static bool __init sev_es_check_cpu_features(void)
>  {
>         if (!has_cpuflag(X86_FEATURE_RDRAND)) {
> @@ -1181,7 +1183,65 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
>         }
>  }
>
> -static void pvalidate_pages(struct snp_psc_desc *desc)
> +static int base_pvalidate_4k_page(unsigned long vaddr, bool validate)
> +{
> +       return pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
> +}
> +
> +static int svsm_pvalidate_4k_page(unsigned long paddr, bool validate)
> +{
> +       struct svsm_pvalidate_call *pvalidate_call;
> +       struct svsm_call call = {};
> +       u64 pvalidate_call_pa;
> +       unsigned long flags;
> +       int ret;
> +
> +       /*
> +        * This can be called very early in the boot, use native functions in
> +        * order to avoid paravirt issues.
> +        */
> +       flags = native_save_fl();
> +       if (flags & X86_EFLAGS_IF)
> +               native_irq_disable();
> +
> +       call.caa = __svsm_get_caa();
> +
> +       pvalidate_call = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
> +       pvalidate_call_pa = __svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
> +
> +       pvalidate_call->entries = 1;
> +       pvalidate_call->next    = 0;
> +       pvalidate_call->entry[0].page_size = RMP_PG_SIZE_4K;
> +       pvalidate_call->entry[0].action    = validate;
> +       pvalidate_call->entry[0].ignore_cf = 0;
> +       pvalidate_call->entry[0].pfn       = paddr >> PAGE_SHIFT;
> +
> +       /* Protocol 0, Call ID 1 */
> +       call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
> +       call.rcx = pvalidate_call_pa;
> +
> +       ret = svsm_protocol(&call);
> +
> +       if (flags & X86_EFLAGS_IF)
> +               native_irq_enable();
> +
> +       return ret;
> +}
> +
> +static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool validate)
> +{
> +       int ret;
> +
> +       ret = vmpl ? svsm_pvalidate_4k_page(paddr, validate)
> +                  : base_pvalidate_4k_page(vaddr, validate);
> +
> +       if (ret) {
> +               WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, ret);
> +               sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
> +       }
> +}
> +
> +static void base_pvalidate_pages(struct snp_psc_desc *desc)
>  {
>         struct psc_entry *e;
>         unsigned long vaddr;
> @@ -1215,6 +1275,120 @@ static void pvalidate_pages(struct snp_psc_desc *desc)
>         }
>  }
>
> +static void svsm_pvalidate_pages(struct snp_psc_desc *desc)
> +{
> +       struct svsm_pvalidate_call *pvalidate_call;
> +       struct svsm_pvalidate_entry *pe;
> +       unsigned int call_count, i;
> +       struct svsm_call call = {};
> +       u64 pvalidate_call_pa;
> +       struct psc_entry *e;
> +       unsigned long flags;
> +       unsigned long vaddr;
> +       bool action;
> +       int ret;
> +
> +       /*
> +        * This can be called very early in the boot, use native functions in
> +        * order to avoid paravirt issues.
> +        */
> +       flags = native_save_fl();
> +       if (flags & X86_EFLAGS_IF)
> +               native_irq_disable();
> +
> +       call.caa = __svsm_get_caa();
> +
> +       pvalidate_call = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
> +       pvalidate_call_pa = __svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
> +
> +       /* Calculate how many entries the CA buffer can hold */
> +       call_count = sizeof(call.caa->svsm_buffer);
> +       call_count -= offsetof(struct svsm_pvalidate_call, entry);
> +       call_count /= sizeof(pvalidate_call->entry[0]);
> +
> +       /* Protocol 0, Call ID 1 */
> +       call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
> +       call.rcx = pvalidate_call_pa;
> +
> +       pvalidate_call->entries = 0;
> +       pvalidate_call->next    = 0;
> +
> +       for (i = 0; i <= desc->hdr.end_entry; i++) {
> +               e = &desc->entries[i];
> +               pe = &pvalidate_call->entry[pvalidate_call->entries];
> +
> +               pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
> +               pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
> +               pe->ignore_cf = 0;
> +               pe->pfn       = e->gfn;
> +
> +               pvalidate_call->entries++;
> +               if (pvalidate_call->entries < call_count && i != desc->hdr.end_entry)
> +                       continue;
> +
> +               ret = svsm_protocol(&call);
> +               if (ret == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
> +                   pvalidate_call->entry[pvalidate_call->next].page_size == RMP_PG_SIZE_2M) {
> +                       u64 pfn, pfn_end;
> +
> +                       /*
> +                        * The "next" field is the index of the failed entry. Calculate the
> +                        * index of the entry after the failed entry before the fields are
> +                        * cleared so that processing can continue on from that point (take
> +                        * into account the for loop adding 1 to the entry).
> +                        */
> +                       i -= pvalidate_call->entries - pvalidate_call->next;
> +                       i += 1;
> +
> +                       action = pvalidate_call->entry[pvalidate_call->next].action;
> +                       pfn = pvalidate_call->entry[pvalidate_call->next].pfn;
> +                       pfn_end = pfn + 511;
> +
> +                       pvalidate_call->entries = 0;
> +                       pvalidate_call->next    = 0;
> +                       for (; pfn <= pfn_end; pfn++) {
> +                               pe = &pvalidate_call->entry[pvalidate_call->entries];
> +
> +                               pe->page_size = RMP_PG_SIZE_4K;
> +                               pe->action    = action;
> +                               pe->ignore_cf = 0;
> +                               pe->pfn       = pfn;
> +
> +                               pvalidate_call->entries++;
> +                               if (pvalidate_call->entries < call_count && pfn != pfn_end)
> +                                       continue;
> +
> +                               ret = svsm_protocol(&call);
> +                               if (ret != SVSM_SUCCESS)
> +                                       break;
> +
> +                               pvalidate_call->entries = 0;
> +                               pvalidate_call->next    = 0;
> +                       }
> +               }
> +
> +               if (ret != SVSM_SUCCESS) {
> +                       pe = &pvalidate_call->entry[pvalidate_call->next];
> +                       vaddr = (unsigned long)pfn_to_kaddr(pe->pfn);
> +
> +                       WARN(1, "Failed to validate address %lx ret=%#x (%d)", vaddr, ret, ret);
> +                       sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
> +               }
> +
> +               pvalidate_call->entries = 0;
> +               pvalidate_call->next    = 0;
> +       }
> +
> +       if (flags & X86_EFLAGS_IF)
> +               native_irq_enable();
> +}
> +
> +static void pvalidate_pages(struct snp_psc_desc *desc)
> +{
> +       vmpl ? svsm_pvalidate_pages(desc)
> +            : base_pvalidate_pages(desc);
> +}
> +
>  static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
>  {
>         int cur_entry, end_entry, ret = 0;
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index 3bd7860fbfe1..2fd21090ef6b 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -161,6 +161,12 @@ static struct svsm_ca *__svsm_get_caa(void)
>                                        : boot_svsm_caa;
>  }
>
> +static u64 __svsm_get_caa_pa(void)
> +{
> +       return sev_cfg.cas_initialized ? this_cpu_read(svsm_caa_pa)
> +                                      : boot_svsm_caa_pa;
> +}
> +
>  static __always_inline bool on_vc_stack(struct pt_regs *regs)
>  {
>         unsigned long sp = regs->sp;
> @@ -777,7 +783,6 @@ static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
>  {
>         unsigned long paddr_end;
>         u64 val;
> -       int ret;
>
>         vaddr = vaddr & PAGE_MASK;
>
> @@ -785,12 +790,9 @@ static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
>         paddr_end = paddr + (npages << PAGE_SHIFT);
>
>         while (paddr < paddr_end) {
> -               if (op == SNP_PAGE_STATE_SHARED) {
> -                       /* Page validation must be rescinded before changing to shared */
> -                       ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false);
> -                       if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
> -                               goto e_term;
> -               }
> +               /* Page validation must be rescinded before changing to shared */
> +               if (op == SNP_PAGE_STATE_SHARED)
> +                       pvalidate_4k_page(vaddr, paddr, false);
>
>                 /*
>                  * Use the MSR protocol because this function can be called before
> @@ -812,12 +814,9 @@ static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
>                          paddr, GHCB_MSR_PSC_RESP_VAL(val)))
>                         goto e_term;
>
> -               if (op == SNP_PAGE_STATE_PRIVATE) {
> -                       /* Page validation must be performed after changing to private */
> -                       ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true);
> -                       if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
> -                               goto e_term;
> -               }
> +               /* Page validation must be performed after changing to private */
> +               if (op == SNP_PAGE_STATE_PRIVATE)
> +                       pvalidate_4k_page(vaddr, paddr, true);
>
>                 vaddr += PAGE_SIZE;
>                 paddr += PAGE_SIZE;
> --
> 2.42.0
>
>


--
-Dionna Glaze, PhD (she/her)
  
Tom Lendacky Jan. 27, 2024, 3:18 p.m. UTC | #2
On 1/26/24 18:59, Dionna Amalie Glaze wrote:
> On Fri, Jan 26, 2024 at 2:18 PM Tom Lendacky <thomas.lendacky@amd.com> wrote:
>>
>> The PVALIDATE instruction can only be performed at VMPL0. An SVSM will
>> be present when running at VMPL1 or a lower privilege level.
>>
>> When an SVSM is present, use the SVSM_CORE_PVALIDATE call to perform
>> memory validation instead of issuing the PVALIDATE instruction directly.
>>
>> The validation of a single 4K page is now explicitly identified as such
>> in the function name, pvalidate_4k_page(). The pvalidate_pages() function
>> is used for validating 1 or more pages at either 4K or 2M in size. Each
>> function, however, determines whether it can issue the PVALIDATE directly
>> or whether the SVSM needs to be invoked.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>   arch/x86/boot/compressed/sev.c |  42 +++++++-
>>   arch/x86/include/asm/sev.h     |  22 +++++
>>   arch/x86/kernel/sev-shared.c   | 176 ++++++++++++++++++++++++++++++++-
>>   arch/x86/kernel/sev.c          |  25 +++--
>>   4 files changed, 247 insertions(+), 18 deletions(-)
>>
>> diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
>> index 5d2403914ceb..3fbb614c31e0 100644
>> --- a/arch/x86/boot/compressed/sev.c
>> +++ b/arch/x86/boot/compressed/sev.c

>> +static int svsm_protocol(struct svsm_call *call)
>> +{
>> +       struct ghcb *ghcb;
>> +       int ret;
>> +
>> +       if (boot_ghcb)
>> +               ghcb = boot_ghcb;
>> +       else
>> +               ghcb = NULL;
>> +
>> +       do {
>> +               ret = ghcb ? __svsm_ghcb_protocol(ghcb, call)
>> +                          : __svsm_msr_protocol(call);
>> +       } while (ret == SVSM_ERR_BUSY);
> 
> Should this loop forever or eventually give up and panic?

The question becomes how many times before giving up. This would likely 
only happen if the hypervisor is causing an issue for the SVSM, so it 
would be similar to a DoS. I'm open to suggestions, though.

On a side not, that does remind that this should also be checking for 
SVSM_ERR_INCOMPLETE.

> 

>>   void snp_set_page_private(unsigned long paddr)
>> @@ -261,6 +289,12 @@ void sev_es_shutdown_ghcb(void)
>>          if (!sev_es_check_cpu_features())
>>                  error("SEV-ES CPU Features missing.");
>>
>> +       /*
>> +        * Ensure that the boot GHCB isn't used for the PVALIDATE when running
> 
> Why the definite article? Which PVALIDATE is this referring to?

I'll clarify the comment, but it is specifically relates to the 
set_page_encrypted() for the boot_ghcb_page that immediately follows. 
Since the GHCB page is being changed to encrypted, we need to use the MSR 
protocol by zeroing out the boot_ghcb variable. The comment should have 
said for the Page State Change and not PVALIDATE.

Thanks,
Tom

>
  

Patch

diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 5d2403914ceb..3fbb614c31e0 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -38,6 +38,16 @@  static u8 vmpl __section(".data");
 static u64 boot_svsm_caa_pa __section(".data");
 static struct svsm_ca *boot_svsm_caa __section(".data");
 
+static struct svsm_ca *__svsm_get_caa(void)
+{
+	return boot_svsm_caa;
+}
+
+static u64 __svsm_get_caa_pa(void)
+{
+	return boot_svsm_caa_pa;
+}
+
 /*
  * Copy a version of this function here - insn-eval.c can't be used in
  * pre-decompression code.
@@ -135,6 +145,24 @@  static bool fault_in_kernel_space(unsigned long address)
 /* Include code for early handlers */
 #include "../../kernel/sev-shared.c"
 
+static int svsm_protocol(struct svsm_call *call)
+{
+	struct ghcb *ghcb;
+	int ret;
+
+	if (boot_ghcb)
+		ghcb = boot_ghcb;
+	else
+		ghcb = NULL;
+
+	do {
+		ret = ghcb ? __svsm_ghcb_protocol(ghcb, call)
+			   : __svsm_msr_protocol(call);
+	} while (ret == SVSM_ERR_BUSY);
+
+	return ret;
+}
+
 bool sev_snp_enabled(void)
 {
 	return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
@@ -151,8 +179,8 @@  static void __page_state_change(unsigned long paddr, enum psc_op op)
 	 * If private -> shared then invalidate the page before requesting the
 	 * state change in the RMP table.
 	 */
-	if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
-		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+	if (op == SNP_PAGE_STATE_SHARED)
+		pvalidate_4k_page(paddr, paddr, 0);
 
 	/* Issue VMGEXIT to change the page state in RMP table. */
 	sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
@@ -167,8 +195,8 @@  static void __page_state_change(unsigned long paddr, enum psc_op op)
 	 * Now that page state is changed in the RMP table, validate it so that it is
 	 * consistent with the RMP entry.
 	 */
-	if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
-		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+	if (op == SNP_PAGE_STATE_PRIVATE)
+		pvalidate_4k_page(paddr, paddr, 1);
 }
 
 void snp_set_page_private(unsigned long paddr)
@@ -261,6 +289,12 @@  void sev_es_shutdown_ghcb(void)
 	if (!sev_es_check_cpu_features())
 		error("SEV-ES CPU Features missing.");
 
+	/*
+	 * Ensure that the boot GHCB isn't used for the PVALIDATE when running
+	 * under an SVSM.
+	 */
+	boot_ghcb = NULL;
+
 	/*
 	 * GHCB Page must be flushed from the cache and mapped encrypted again.
 	 * Otherwise the running kernel will see strange cache effects when
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 2f1e583769fc..dbd7fd041689 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -172,6 +172,27 @@  struct svsm_ca {
 #define SVSM_ERR_INVALID_PARAMETER		0x80000005
 #define SVSM_ERR_INVALID_REQUEST		0x80000006
 #define SVSM_ERR_BUSY				0x80000007
+#define SVSM_PVALIDATE_FAIL_SIZEMISMATCH	0x80001006
+
+/*
+ * The SVSM PVALIDATE related structures
+ */
+struct svsm_pvalidate_entry {
+	u64 page_size		: 2,
+	    action		: 1,
+	    ignore_cf		: 1,
+	    rsvd		: 8,
+	    pfn			: 52;
+};
+
+struct svsm_pvalidate_call {
+	u16 entries;
+	u16 next;
+
+	u8 rsvd1[4];
+
+	struct svsm_pvalidate_entry entry[];
+};
 
 /*
  * SVSM protocol structure
@@ -192,6 +213,7 @@  struct svsm_call {
 
 #define SVSM_CORE_CALL(x)		((0ULL << 32) | (x))
 #define SVSM_CORE_REMAP_CA		0
+#define SVSM_CORE_PVALIDATE		1
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 extern void __sev_es_ist_enter(struct pt_regs *regs);
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 7e9fa5d8889b..f26e872bc5d0 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -81,6 +81,8 @@  static u32 cpuid_std_range_max __ro_after_init;
 static u32 cpuid_hyp_range_max __ro_after_init;
 static u32 cpuid_ext_range_max __ro_after_init;
 
+static int svsm_protocol(struct svsm_call *call);
+
 static bool __init sev_es_check_cpu_features(void)
 {
 	if (!has_cpuflag(X86_FEATURE_RDRAND)) {
@@ -1181,7 +1183,65 @@  static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
 	}
 }
 
-static void pvalidate_pages(struct snp_psc_desc *desc)
+static int base_pvalidate_4k_page(unsigned long vaddr, bool validate)
+{
+	return pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
+}
+
+static int svsm_pvalidate_4k_page(unsigned long paddr, bool validate)
+{
+	struct svsm_pvalidate_call *pvalidate_call;
+	struct svsm_call call = {};
+	u64 pvalidate_call_pa;
+	unsigned long flags;
+	int ret;
+
+	/*
+	 * This can be called very early in the boot, use native functions in
+	 * order to avoid paravirt issues.
+	 */
+	flags = native_save_fl();
+	if (flags & X86_EFLAGS_IF)
+		native_irq_disable();
+
+	call.caa = __svsm_get_caa();
+
+	pvalidate_call = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
+	pvalidate_call_pa = __svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+
+	pvalidate_call->entries = 1;
+	pvalidate_call->next    = 0;
+	pvalidate_call->entry[0].page_size = RMP_PG_SIZE_4K;
+	pvalidate_call->entry[0].action    = validate;
+	pvalidate_call->entry[0].ignore_cf = 0;
+	pvalidate_call->entry[0].pfn       = paddr >> PAGE_SHIFT;
+
+	/* Protocol 0, Call ID 1 */
+	call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
+	call.rcx = pvalidate_call_pa;
+
+	ret = svsm_protocol(&call);
+
+	if (flags & X86_EFLAGS_IF)
+		native_irq_enable();
+
+	return ret;
+}
+
+static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool validate)
+{
+	int ret;
+
+	ret = vmpl ? svsm_pvalidate_4k_page(paddr, validate)
+		   : base_pvalidate_4k_page(vaddr, validate);
+
+	if (ret) {
+		WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, ret);
+		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+	}
+}
+
+static void base_pvalidate_pages(struct snp_psc_desc *desc)
 {
 	struct psc_entry *e;
 	unsigned long vaddr;
@@ -1215,6 +1275,120 @@  static void pvalidate_pages(struct snp_psc_desc *desc)
 	}
 }
 
+static void svsm_pvalidate_pages(struct snp_psc_desc *desc)
+{
+	struct svsm_pvalidate_call *pvalidate_call;
+	struct svsm_pvalidate_entry *pe;
+	unsigned int call_count, i;
+	struct svsm_call call = {};
+	u64 pvalidate_call_pa;
+	struct psc_entry *e;
+	unsigned long flags;
+	unsigned long vaddr;
+	bool action;
+	int ret;
+
+	/*
+	 * This can be called very early in the boot, use native functions in
+	 * order to avoid paravirt issues.
+	 */
+	flags = native_save_fl();
+	if (flags & X86_EFLAGS_IF)
+		native_irq_disable();
+
+	call.caa = __svsm_get_caa();
+
+	pvalidate_call = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
+	pvalidate_call_pa = __svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+
+	/* Calculate how many entries the CA buffer can hold */
+	call_count = sizeof(call.caa->svsm_buffer);
+	call_count -= offsetof(struct svsm_pvalidate_call, entry);
+	call_count /= sizeof(pvalidate_call->entry[0]);
+
+	/* Protocol 0, Call ID 1 */
+	call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
+	call.rcx = pvalidate_call_pa;
+
+	pvalidate_call->entries = 0;
+	pvalidate_call->next    = 0;
+
+	for (i = 0; i <= desc->hdr.end_entry; i++) {
+		e = &desc->entries[i];
+		pe = &pvalidate_call->entry[pvalidate_call->entries];
+
+		pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+		pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
+		pe->ignore_cf = 0;
+		pe->pfn       = e->gfn;
+
+		pvalidate_call->entries++;
+		if (pvalidate_call->entries < call_count && i != desc->hdr.end_entry)
+			continue;
+
+		ret = svsm_protocol(&call);
+		if (ret == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
+		    pvalidate_call->entry[pvalidate_call->next].page_size == RMP_PG_SIZE_2M) {
+			u64 pfn, pfn_end;
+
+			/*
+			 * The "next" field is the index of the failed entry. Calculate the
+			 * index of the entry after the failed entry before the fields are
+			 * cleared so that processing can continue on from that point (take
+			 * into account the for loop adding 1 to the entry).
+			 */
+			i -= pvalidate_call->entries - pvalidate_call->next;
+			i += 1;
+
+			action = pvalidate_call->entry[pvalidate_call->next].action;
+			pfn = pvalidate_call->entry[pvalidate_call->next].pfn;
+			pfn_end = pfn + 511;
+
+			pvalidate_call->entries = 0;
+			pvalidate_call->next    = 0;
+			for (; pfn <= pfn_end; pfn++) {
+				pe = &pvalidate_call->entry[pvalidate_call->entries];
+
+				pe->page_size = RMP_PG_SIZE_4K;
+				pe->action    = action;
+				pe->ignore_cf = 0;
+				pe->pfn       = pfn;
+
+				pvalidate_call->entries++;
+				if (pvalidate_call->entries < call_count && pfn != pfn_end)
+					continue;
+
+				ret = svsm_protocol(&call);
+				if (ret != SVSM_SUCCESS)
+					break;
+
+				pvalidate_call->entries = 0;
+				pvalidate_call->next    = 0;
+			}
+		}
+
+		if (ret != SVSM_SUCCESS) {
+			pe = &pvalidate_call->entry[pvalidate_call->next];
+			vaddr = (unsigned long)pfn_to_kaddr(pe->pfn);
+
+			WARN(1, "Failed to validate address %lx ret=%#x (%d)", vaddr, ret, ret);
+			sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+		}
+
+		pvalidate_call->entries = 0;
+		pvalidate_call->next    = 0;
+	}
+
+	if (flags & X86_EFLAGS_IF)
+		native_irq_enable();
+}
+
+static void pvalidate_pages(struct snp_psc_desc *desc)
+{
+	vmpl ? svsm_pvalidate_pages(desc)
+	     : base_pvalidate_pages(desc);
+}
+
 static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
 {
 	int cur_entry, end_entry, ret = 0;
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 3bd7860fbfe1..2fd21090ef6b 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -161,6 +161,12 @@  static struct svsm_ca *__svsm_get_caa(void)
 				       : boot_svsm_caa;
 }
 
+static u64 __svsm_get_caa_pa(void)
+{
+	return sev_cfg.cas_initialized ? this_cpu_read(svsm_caa_pa)
+				       : boot_svsm_caa_pa;
+}
+
 static __always_inline bool on_vc_stack(struct pt_regs *regs)
 {
 	unsigned long sp = regs->sp;
@@ -777,7 +783,6 @@  static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
 {
 	unsigned long paddr_end;
 	u64 val;
-	int ret;
 
 	vaddr = vaddr & PAGE_MASK;
 
@@ -785,12 +790,9 @@  static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
 	paddr_end = paddr + (npages << PAGE_SHIFT);
 
 	while (paddr < paddr_end) {
-		if (op == SNP_PAGE_STATE_SHARED) {
-			/* Page validation must be rescinded before changing to shared */
-			ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false);
-			if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
-				goto e_term;
-		}
+		/* Page validation must be rescinded before changing to shared */
+		if (op == SNP_PAGE_STATE_SHARED)
+			pvalidate_4k_page(vaddr, paddr, false);
 
 		/*
 		 * Use the MSR protocol because this function can be called before
@@ -812,12 +814,9 @@  static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
 			 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
 			goto e_term;
 
-		if (op == SNP_PAGE_STATE_PRIVATE) {
-			/* Page validation must be performed after changing to private */
-			ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true);
-			if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
-				goto e_term;
-		}
+		/* Page validation must be performed after changing to private */
+		if (op == SNP_PAGE_STATE_PRIVATE)
+			pvalidate_4k_page(vaddr, paddr, true);
 
 		vaddr += PAGE_SIZE;
 		paddr += PAGE_SIZE;