[2/2] x86/tdx: Convert shared memory back to private on kexec
Commit Message
TDX guests allocate shared buffers to perform I/O. It is done by
allocating pages normally from the buddy allocator and converting them
to shared with set_memory_decrypted().
The target kernel has no idea what memory is converted this way. It only
sees E820_TYPE_RAM.
Accessing shared memory via private mapping is fatal. It leads to
unrecoverable TD exit.
Walk direct mapping and convert all shared memory back to private.
It makes all RAM private again and target kernel may use it normally.
Skip the conversion on kexec of crashkernel. It uses own pool of memory
and will not accidentally allocate from the memory the first kernel made
shared.
For crash investigation, it might be useful to access data in the shared
buffers.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
arch/x86/coco/tdx/Makefile | 1 +
arch/x86/coco/tdx/kexec.c | 82 ++++++++++++++++++++++++++++++
arch/x86/include/asm/tdx.h | 4 ++
arch/x86/kernel/machine_kexec_64.c | 2 +
4 files changed, 89 insertions(+)
create mode 100644 arch/x86/coco/tdx/kexec.c
Comments
> +void tdx_kexec_prepare(bool crash)
> +{
> + /*
> + * Crash kernel may want to see data in the shared buffers.
> + * Do not revert them to private on kexec of crash kernel.
> + */
> + if (crash)
> + return;
> +
> + /*
> + * Walk direct mapping and convert all shared memory back to private,
> + * so the target kernel will be able use it normally.
> + */
> + mmap_write_lock(&init_mm);
> + walk_page_range_novma(&init_mm,
> + PAGE_OFFSET,
> + PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT),
> + &unshare_ops, init_mm.pgd, NULL);
> + mmap_write_unlock(&init_mm);
> +}
Looks the page table walk is done unconditionally when !crash.
I think it's better to check whether this is TDX guest (either this function, or
below in machine_kexec()) and just return early if it's not a TDX guest?
[..]
> /*
> @@ -312,6 +313,7 @@ void machine_kexec(struct kimage *image)
> local_irq_disable();
> hw_breakpoint_disable();
> cet_disable();
> + tdx_kexec_prepare(image->type == KEXEC_TYPE_CRASH);
>
> if (image->preserve_context) {
> #ifdef CONFIG_X86_IO_APIC
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += tdx.o tdcall.o
+obj-$(CONFIG_KEXEC_CORE) += kexec.o
new file mode 100644
@@ -0,0 +1,82 @@
+#define pr_fmt(fmt) "tdx: " fmt
+
+#include <linux/pagewalk.h>
+#include <asm/tdx.h>
+#include <asm/x86_init.h>
+
+static inline bool pud_decrypted(pud_t pud)
+{
+ return cc_mkdec(pud_val(pud)) == pud_val(pud);
+}
+
+static inline bool pmd_decrypted(pmd_t pmd)
+{
+ return cc_mkdec(pmd_val(pmd)) == pmd_val(pmd);
+}
+
+static inline bool pte_decrypted(pte_t pte)
+{
+ return cc_mkdec(pte_val(pte)) == pte_val(pte);
+}
+
+static inline void unshare_range(unsigned long start, unsigned long end)
+{
+ int pages = (end - start) / PAGE_SIZE;
+
+ if (!x86_platform.guest.enc_status_change_finish(start, pages, true))
+ pr_err("Failed to unshare range %#lx-%#lx\n", start, end);
+}
+
+static int unshare_pud(pud_t *pud, unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ if (pud_decrypted(*pud))
+ unshare_range(addr, next);
+
+ return 0;
+}
+
+static int unshare_pmd(pmd_t *pmd, unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ if (pmd_decrypted(*pmd))
+ unshare_range(addr, next);
+
+ return 0;
+}
+
+static int unshare_pte(pte_t *pte, unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ if (pte_decrypted(*pte))
+ unshare_range(addr, next);
+
+ return 0;
+}
+
+static const struct mm_walk_ops unshare_ops = {
+ .pud_entry = unshare_pud,
+ .pmd_entry = unshare_pmd,
+ .pte_entry = unshare_pte,
+};
+
+void tdx_kexec_prepare(bool crash)
+{
+ /*
+ * Crash kernel may want to see data in the shared buffers.
+ * Do not revert them to private on kexec of crash kernel.
+ */
+ if (crash)
+ return;
+
+ /*
+ * Walk direct mapping and convert all shared memory back to private,
+ * so the target kernel will be able use it normally.
+ */
+ mmap_write_lock(&init_mm);
+ walk_page_range_novma(&init_mm,
+ PAGE_OFFSET,
+ PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT),
+ &unshare_ops, init_mm.pgd, NULL);
+ mmap_write_unlock(&init_mm);
+}
@@ -69,6 +69,8 @@ bool tdx_early_handle_ve(struct pt_regs *regs);
int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
+void tdx_kexec_prepare(bool crash);
+
#else
static inline void tdx_early_init(void) { };
@@ -76,6 +78,8 @@ static inline void tdx_safe_halt(void) { };
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
+static inline void tdx_kexec_prepare(bool crash) {}
+
#endif /* CONFIG_INTEL_TDX_GUEST */
#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_INTEL_TDX_GUEST)
@@ -28,6 +28,7 @@
#include <asm/setup.h>
#include <asm/set_memory.h>
#include <asm/cpu.h>
+#include <asm/tdx.h>
#ifdef CONFIG_ACPI
/*
@@ -312,6 +313,7 @@ void machine_kexec(struct kimage *image)
local_irq_disable();
hw_breakpoint_disable();
cet_disable();
+ tdx_kexec_prepare(image->type == KEXEC_TYPE_CRASH);
if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC