[PATCHv8,09/17] x86/mm: Adding callbacks to prepare encrypted memory for kexec
Commit Message
AMD SEV and Intel TDX guests allocate shared buffers for performing I/O.
This is done by allocating pages normally from the buddy allocator and
then converting them to shared using set_memory_decrypted().
On kexec, the second kernel is unaware of which memory has been
converted in this manner. It only sees E820_TYPE_RAM. Accessing shared
memory as private is fatal.
Therefore, the memory state must be reset to its original state before
starting the new kernel with kexec.
The process of converting shared memory back to private occurs in two
steps:
- enc_kexec_stop_conversion() stops new conversions.
- enc_kexec_unshare_mem() unshares all existing shared memory, reverting
it back to private.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>x
---
arch/x86/include/asm/x86_init.h | 2 ++
arch/x86/kernel/crash.c | 6 ++++++
arch/x86/kernel/reboot.c | 12 ++++++++++++
arch/x86/kernel/x86_init.c | 4 ++++
4 files changed, 24 insertions(+)
Comments
On 28/02/2024 10:24 am, Kirill A. Shutemov wrote:
> AMD SEV and Intel TDX guests allocate shared buffers for performing I/O.
> This is done by allocating pages normally from the buddy allocator and
> then converting them to shared using set_memory_decrypted().
>
> On kexec, the second kernel is unaware of which memory has been
> converted in this manner. It only sees E820_TYPE_RAM. Accessing shared
> memory as private is fatal.
>
> Therefore, the memory state must be reset to its original state before
> starting the new kernel with kexec.
>
> The process of converting shared memory back to private occurs in two
> steps:
>
> - enc_kexec_stop_conversion() stops new conversions.
>
> - enc_kexec_unshare_mem() unshares all existing shared memory, reverting
> it back to private.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>x
> ---
Reviewed-by: Kai Huang <kai.huang@intel.com>
@@ -152,6 +152,8 @@ struct x86_guest {
int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
bool (*enc_tlb_flush_required)(bool enc);
bool (*enc_cache_flush_required)(void);
+ void (*enc_kexec_stop_conversion)(bool crash);
+ void (*enc_kexec_unshare_mem)(void);
};
/**
@@ -128,6 +128,12 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
+
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ x86_platform.guest.enc_kexec_stop_conversion(true);
+ x86_platform.guest.enc_kexec_unshare_mem();
+ }
+
crash_save_cpu(regs, safe_smp_processor_id());
}
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/objtool.h>
#include <linux/pgtable.h>
+#include <linux/kexec.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -716,6 +717,14 @@ static void native_machine_emergency_restart(void)
void native_machine_shutdown(void)
{
+ /*
+ * Call enc_kexec_stop_conversion() while all CPUs are still active and
+ * interrupts are enabled. This will allow all in-flight memory
+ * conversions to finish cleanly.
+ */
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && kexec_in_progress)
+ x86_platform.guest.enc_kexec_stop_conversion(false);
+
/* Stop the cpus and apics */
#ifdef CONFIG_X86_IO_APIC
/*
@@ -752,6 +761,9 @@ void native_machine_shutdown(void)
#ifdef CONFIG_X86_64
x86_platform.iommu_shutdown();
#endif
+
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && kexec_in_progress)
+ x86_platform.guest.enc_kexec_unshare_mem();
}
static void __machine_emergency_restart(int emergency)
@@ -135,6 +135,8 @@ static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool
static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
static bool enc_tlb_flush_required_noop(bool enc) { return false; }
static bool enc_cache_flush_required_noop(void) { return false; }
+static void enc_kexec_stop_conversion_noop(bool crash) {}
+static void enc_kexec_unshare_mem_noop(void) {}
static bool is_private_mmio_noop(u64 addr) {return false; }
struct x86_platform_ops x86_platform __ro_after_init = {
@@ -158,6 +160,8 @@ struct x86_platform_ops x86_platform __ro_after_init = {
.enc_status_change_finish = enc_status_change_finish_noop,
.enc_tlb_flush_required = enc_tlb_flush_required_noop,
.enc_cache_flush_required = enc_cache_flush_required_noop,
+ .enc_kexec_stop_conversion = enc_kexec_stop_conversion_noop,
+ .enc_kexec_unshare_mem = enc_kexec_unshare_mem_noop,
},
};