[RFC,3/5] KVM: TDX: Add base implementation for tdx_vm_move_enc_context_from

Message ID 20230407201921.2703758-4-sagis@google.com
State New
Headers
Series Add TDX intra host migration support |

Commit Message

Sagi Shahar April 7, 2023, 8:19 p.m. UTC
  This should mostly match the logic in sev_vm_move_enc_context_from.

Signed-off-by: Sagi Shahar <sagis@google.com>
---
 arch/x86/kvm/vmx/main.c    | 10 +++++++
 arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/tdx.h     |  2 ++
 arch/x86/kvm/vmx/x86_ops.h |  5 ++++
 4 files changed, 73 insertions(+)
  

Comments

Zhi Wang April 18, 2023, 6:28 a.m. UTC | #1
On Fri,  7 Apr 2023 20:19:19 +0000
Sagi Shahar <sagis@google.com> wrote:

Is there any reaon that TDX doesn't need .vm_copy_enc_context_from? Or it is
going to be deprecated? The patch comments needs to be refined according to
Sean's KVM x86 maintainer book.

> This should mostly match the logic in sev_vm_move_enc_context_from.
> 
> Signed-off-by: Sagi Shahar <sagis@google.com>
> ---
>  arch/x86/kvm/vmx/main.c    | 10 +++++++
>  arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/vmx/tdx.h     |  2 ++
>  arch/x86/kvm/vmx/x86_ops.h |  5 ++++
>  4 files changed, 73 insertions(+)
> 
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 5b64fe5404958..9d5d0ac465bf6 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
>  	return tdx_vcpu_ioctl(vcpu, argp);
>  }
>  
> +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> +{
> +	if (!is_td(kvm))
> +		return -ENOTTY;
> +
> +	return tdx_vm_move_enc_context_from(kvm, source_fd);
> +}
> +
>  #define VMX_REQUIRED_APICV_INHIBITS		       \
>  (						       \
>         BIT(APICV_INHIBIT_REASON_DISABLE)|	       \
> @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
>  	.dev_mem_enc_ioctl = tdx_dev_ioctl,
>  	.mem_enc_ioctl = vt_mem_enc_ioctl,
>  	.vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> +
> +	.vm_move_enc_context_from = vt_move_enc_context_from,
>  };
>  
>  struct kvm_x86_init_ops vt_init_ops __initdata = {
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 8af7e4e81c860..0999a6d827c99 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
>  		INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
>  	return 0;
>  }
> +
> +static __always_inline bool tdx_guest(struct kvm *kvm)
> +{
> +	struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> +
> +	return tdx_kvm->finalized;
> +}
> +
> +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> +{
> +	return -EINVAL;
> +}
> +
> +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> +{
> +	struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> +	struct file *src_kvm_file;
> +	struct kvm_tdx *src_tdx;
> +	struct kvm *src_kvm;
> +	int ret;
> +
> +	src_kvm_file = fget(source_fd);
> +	if (!file_is_kvm(src_kvm_file)) {
> +		ret = -EBADF;
> +		goto out_fput;
> +	}
> +	src_kvm = src_kvm_file->private_data;
> +	src_tdx = to_kvm_tdx(src_kvm);
> +
> +	ret = pre_move_enc_context_from(kvm, src_kvm,
> +					&dst_tdx->migration_in_progress,
> +					&src_tdx->migration_in_progress);
> +	if (ret)
> +		goto out_fput;
> +
> +	if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> +		ret = -EINVAL;
> +		goto out_post;
> +	}
> +
> +	ret = tdx_migrate_from(kvm, src_kvm);
> +	if (ret)
> +		goto out_post;
> +
> +	kvm_vm_dead(src_kvm);
> +	ret = 0;
> +
> +out_post:
> +	post_move_enc_context_from(kvm, src_kvm,
> +				 &dst_tdx->migration_in_progress,
> +				 &src_tdx->migration_in_progress);
> +out_fput:
> +	if (src_kvm_file)
> +		fput(src_kvm_file);
> +	return ret;
> +}
> diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> index 71818c5001862..21b7e710be1fd 100644
> --- a/arch/x86/kvm/vmx/tdx.h
> +++ b/arch/x86/kvm/vmx/tdx.h
> @@ -24,6 +24,8 @@ struct kvm_tdx {
>  	atomic_t tdh_mem_track;
>  
>  	u64 tsc_offset;
> +
> +	atomic_t migration_in_progress;
>  };
>  
>  union tdx_exit_reason {
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index d049e0c72ed0c..275f5d75e9bf1 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
>  void tdx_flush_tlb(struct kvm_vcpu *vcpu);
>  int tdx_sept_tlb_remote_flush(struct kvm *kvm);
>  void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> +
> +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
>  #else
>  static inline int tdx_init(void) { return 0; };
>  static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
>  static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
>  static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
>  static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> +
> +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> +					       nsigned int source_fd) { return -EOPNOTSUPP; }
>  #endif
>  
>  #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
  
Zhi Wang April 18, 2023, 12:11 p.m. UTC | #2
On Fri,  7 Apr 2023 20:19:19 +0000
Sagi Shahar <sagis@google.com> wrote:

What was the status of the src VM when calling the vm_move_enc_context_from?
Is it still active like common live migration or it has been paused?

> This should mostly match the logic in sev_vm_move_enc_context_from.
> 
> Signed-off-by: Sagi Shahar <sagis@google.com>
> ---
>  arch/x86/kvm/vmx/main.c    | 10 +++++++
>  arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/vmx/tdx.h     |  2 ++
>  arch/x86/kvm/vmx/x86_ops.h |  5 ++++
>  4 files changed, 73 insertions(+)
> 
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 5b64fe5404958..9d5d0ac465bf6 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
>  	return tdx_vcpu_ioctl(vcpu, argp);
>  }
>  
> +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> +{
> +	if (!is_td(kvm))
> +		return -ENOTTY;
> +
> +	return tdx_vm_move_enc_context_from(kvm, source_fd);
> +}
> +
>  #define VMX_REQUIRED_APICV_INHIBITS		       \
>  (						       \
>         BIT(APICV_INHIBIT_REASON_DISABLE)|	       \
> @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
>  	.dev_mem_enc_ioctl = tdx_dev_ioctl,
>  	.mem_enc_ioctl = vt_mem_enc_ioctl,
>  	.vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> +
> +	.vm_move_enc_context_from = vt_move_enc_context_from,
>  };
>  
>  struct kvm_x86_init_ops vt_init_ops __initdata = {
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 8af7e4e81c860..0999a6d827c99 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
>  		INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
>  	return 0;
>  }
> +
> +static __always_inline bool tdx_guest(struct kvm *kvm)
> +{
> +	struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> +
> +	return tdx_kvm->finalized;
> +}
        return is_td_finalized()?
> +
> +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> +{
> +	return -EINVAL;
> +}
> +
> +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> +{
> +	struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> +	struct file *src_kvm_file;
> +	struct kvm_tdx *src_tdx;
> +	struct kvm *src_kvm;
> +	int ret;
> +
> +	src_kvm_file = fget(source_fd);
> +	if (!file_is_kvm(src_kvm_file)) {
> +		ret = -EBADF;
> +		goto out_fput;
> +	}
> +	src_kvm = src_kvm_file->private_data;
> +	src_tdx = to_kvm_tdx(src_kvm);
> +
> +	ret = pre_move_enc_context_from(kvm, src_kvm,
> +					&dst_tdx->migration_in_progress,
> +					&src_tdx->migration_in_progress);
> +	if (ret)
> +		goto out_fput;
> +
> +	if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> +		ret = -EINVAL;
> +		goto out_post;
> +	}
> +
> +	ret = tdx_migrate_from(kvm, src_kvm);
> +	if (ret)
> +		goto out_post;
> +
> +	kvm_vm_dead(src_kvm);
> +	ret = 0;
> +
> +out_post:
> +	post_move_enc_context_from(kvm, src_kvm,
> +				 &dst_tdx->migration_in_progress,
> +				 &src_tdx->migration_in_progress);
> +out_fput:
> +	if (src_kvm_file)
> +		fput(src_kvm_file);
> +	return ret;
> +}
> diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> index 71818c5001862..21b7e710be1fd 100644
> --- a/arch/x86/kvm/vmx/tdx.h
> +++ b/arch/x86/kvm/vmx/tdx.h
> @@ -24,6 +24,8 @@ struct kvm_tdx {
>  	atomic_t tdh_mem_track;
>  
>  	u64 tsc_offset;
> +
> +	atomic_t migration_in_progress;
>  };
>  
>  union tdx_exit_reason {
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index d049e0c72ed0c..275f5d75e9bf1 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
>  void tdx_flush_tlb(struct kvm_vcpu *vcpu);
>  int tdx_sept_tlb_remote_flush(struct kvm *kvm);
>  void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> +
> +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
>  #else
>  static inline int tdx_init(void) { return 0; };
>  static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
>  static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
>  static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
>  static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> +
> +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> +					       nsigned int source_fd) { return -EOPNOTSUPP; }
>  #endif
>  
>  #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
  
Sagi Shahar April 18, 2023, 5:47 p.m. UTC | #3
On Mon, Apr 17, 2023 at 11:28 PM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
>
> On Fri,  7 Apr 2023 20:19:19 +0000
> Sagi Shahar <sagis@google.com> wrote:
>
> Is there any reaon that TDX doesn't need .vm_copy_enc_context_from? Or it is
> going to be deprecated? The patch comments needs to be refined according to
> Sean's KVM x86 maintainer book.

To clarify, there are 2 types of migrations. live migration (between
different hosts) and intra-host (between kvm instances in the same
host) migration. This patchset deals with intra-host migration and
doesn't add support for live migration.

vm_copy_enc_context_from is currently used for setting up the
migration helper for SEV live migration and therefore it is currently
not needed in this patcheset.

>
> > This should mostly match the logic in sev_vm_move_enc_context_from.
> >
> > Signed-off-by: Sagi Shahar <sagis@google.com>
> > ---
> >  arch/x86/kvm/vmx/main.c    | 10 +++++++
> >  arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
> >  arch/x86/kvm/vmx/tdx.h     |  2 ++
> >  arch/x86/kvm/vmx/x86_ops.h |  5 ++++
> >  4 files changed, 73 insertions(+)
> >
> > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> > index 5b64fe5404958..9d5d0ac465bf6 100644
> > --- a/arch/x86/kvm/vmx/main.c
> > +++ b/arch/x86/kvm/vmx/main.c
> > @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> >       return tdx_vcpu_ioctl(vcpu, argp);
> >  }
> >
> > +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > +{
> > +     if (!is_td(kvm))
> > +             return -ENOTTY;
> > +
> > +     return tdx_vm_move_enc_context_from(kvm, source_fd);
> > +}
> > +
> >  #define VMX_REQUIRED_APICV_INHIBITS                 \
> >  (                                                   \
> >         BIT(APICV_INHIBIT_REASON_DISABLE)|           \
> > @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> >       .dev_mem_enc_ioctl = tdx_dev_ioctl,
> >       .mem_enc_ioctl = vt_mem_enc_ioctl,
> >       .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> > +
> > +     .vm_move_enc_context_from = vt_move_enc_context_from,
> >  };
> >
> >  struct kvm_x86_init_ops vt_init_ops __initdata = {
> > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > index 8af7e4e81c860..0999a6d827c99 100644
> > --- a/arch/x86/kvm/vmx/tdx.c
> > +++ b/arch/x86/kvm/vmx/tdx.c
> > @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
> >               INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
> >       return 0;
> >  }
> > +
> > +static __always_inline bool tdx_guest(struct kvm *kvm)
> > +{
> > +     struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> > +
> > +     return tdx_kvm->finalized;
> > +}
> > +
> > +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> > +{
> > +     return -EINVAL;
> > +}
> > +
> > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > +{
> > +     struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> > +     struct file *src_kvm_file;
> > +     struct kvm_tdx *src_tdx;
> > +     struct kvm *src_kvm;
> > +     int ret;
> > +
> > +     src_kvm_file = fget(source_fd);
> > +     if (!file_is_kvm(src_kvm_file)) {
> > +             ret = -EBADF;
> > +             goto out_fput;
> > +     }
> > +     src_kvm = src_kvm_file->private_data;
> > +     src_tdx = to_kvm_tdx(src_kvm);
> > +
> > +     ret = pre_move_enc_context_from(kvm, src_kvm,
> > +                                     &dst_tdx->migration_in_progress,
> > +                                     &src_tdx->migration_in_progress);
> > +     if (ret)
> > +             goto out_fput;
> > +
> > +     if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> > +             ret = -EINVAL;
> > +             goto out_post;
> > +     }
> > +
> > +     ret = tdx_migrate_from(kvm, src_kvm);
> > +     if (ret)
> > +             goto out_post;
> > +
> > +     kvm_vm_dead(src_kvm);
> > +     ret = 0;
> > +
> > +out_post:
> > +     post_move_enc_context_from(kvm, src_kvm,
> > +                              &dst_tdx->migration_in_progress,
> > +                              &src_tdx->migration_in_progress);
> > +out_fput:
> > +     if (src_kvm_file)
> > +             fput(src_kvm_file);
> > +     return ret;
> > +}
> > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> > index 71818c5001862..21b7e710be1fd 100644
> > --- a/arch/x86/kvm/vmx/tdx.h
> > +++ b/arch/x86/kvm/vmx/tdx.h
> > @@ -24,6 +24,8 @@ struct kvm_tdx {
> >       atomic_t tdh_mem_track;
> >
> >       u64 tsc_offset;
> > +
> > +     atomic_t migration_in_progress;
> >  };
> >
> >  union tdx_exit_reason {
> > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> > index d049e0c72ed0c..275f5d75e9bf1 100644
> > --- a/arch/x86/kvm/vmx/x86_ops.h
> > +++ b/arch/x86/kvm/vmx/x86_ops.h
> > @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> >  void tdx_flush_tlb(struct kvm_vcpu *vcpu);
> >  int tdx_sept_tlb_remote_flush(struct kvm *kvm);
> >  void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> > +
> > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
> >  #else
> >  static inline int tdx_init(void) { return 0; };
> >  static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> > @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
> >  static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
> >  static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
> >  static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> > +
> > +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> > +                                            nsigned int source_fd) { return -EOPNOTSUPP; }
> >  #endif
> >
> >  #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
>
  
Sagi Shahar April 18, 2023, 5:51 p.m. UTC | #4
On Tue, Apr 18, 2023 at 5:12 AM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
>
> On Fri,  7 Apr 2023 20:19:19 +0000
> Sagi Shahar <sagis@google.com> wrote:
>
> What was the status of the src VM when calling the vm_move_enc_context_from?
> Is it still active like common live migration or it has been paused?
>

Yes the source VM is still active like in the live migration case.
You can also see that we check that the source VM is finalized when we
call tdx_guest before migrating the state.

> > This should mostly match the logic in sev_vm_move_enc_context_from.
> >
> > Signed-off-by: Sagi Shahar <sagis@google.com>
> > ---
> >  arch/x86/kvm/vmx/main.c    | 10 +++++++
> >  arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
> >  arch/x86/kvm/vmx/tdx.h     |  2 ++
> >  arch/x86/kvm/vmx/x86_ops.h |  5 ++++
> >  4 files changed, 73 insertions(+)
> >
> > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> > index 5b64fe5404958..9d5d0ac465bf6 100644
> > --- a/arch/x86/kvm/vmx/main.c
> > +++ b/arch/x86/kvm/vmx/main.c
> > @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> >       return tdx_vcpu_ioctl(vcpu, argp);
> >  }
> >
> > +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > +{
> > +     if (!is_td(kvm))
> > +             return -ENOTTY;
> > +
> > +     return tdx_vm_move_enc_context_from(kvm, source_fd);
> > +}
> > +
> >  #define VMX_REQUIRED_APICV_INHIBITS                 \
> >  (                                                   \
> >         BIT(APICV_INHIBIT_REASON_DISABLE)|           \
> > @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> >       .dev_mem_enc_ioctl = tdx_dev_ioctl,
> >       .mem_enc_ioctl = vt_mem_enc_ioctl,
> >       .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> > +
> > +     .vm_move_enc_context_from = vt_move_enc_context_from,
> >  };
> >
> >  struct kvm_x86_init_ops vt_init_ops __initdata = {
> > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > index 8af7e4e81c860..0999a6d827c99 100644
> > --- a/arch/x86/kvm/vmx/tdx.c
> > +++ b/arch/x86/kvm/vmx/tdx.c
> > @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
> >               INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
> >       return 0;
> >  }
> > +
> > +static __always_inline bool tdx_guest(struct kvm *kvm)
> > +{
> > +     struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> > +
> > +     return tdx_kvm->finalized;
> > +}
>         return is_td_finalized()?
> > +
> > +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> > +{
> > +     return -EINVAL;
> > +}
> > +
> > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > +{
> > +     struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> > +     struct file *src_kvm_file;
> > +     struct kvm_tdx *src_tdx;
> > +     struct kvm *src_kvm;
> > +     int ret;
> > +
> > +     src_kvm_file = fget(source_fd);
> > +     if (!file_is_kvm(src_kvm_file)) {
> > +             ret = -EBADF;
> > +             goto out_fput;
> > +     }
> > +     src_kvm = src_kvm_file->private_data;
> > +     src_tdx = to_kvm_tdx(src_kvm);
> > +
> > +     ret = pre_move_enc_context_from(kvm, src_kvm,
> > +                                     &dst_tdx->migration_in_progress,
> > +                                     &src_tdx->migration_in_progress);
> > +     if (ret)
> > +             goto out_fput;
> > +
> > +     if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> > +             ret = -EINVAL;
> > +             goto out_post;
> > +     }
> > +
> > +     ret = tdx_migrate_from(kvm, src_kvm);
> > +     if (ret)
> > +             goto out_post;
> > +
> > +     kvm_vm_dead(src_kvm);
> > +     ret = 0;
> > +
> > +out_post:
> > +     post_move_enc_context_from(kvm, src_kvm,
> > +                              &dst_tdx->migration_in_progress,
> > +                              &src_tdx->migration_in_progress);
> > +out_fput:
> > +     if (src_kvm_file)
> > +             fput(src_kvm_file);
> > +     return ret;
> > +}
> > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> > index 71818c5001862..21b7e710be1fd 100644
> > --- a/arch/x86/kvm/vmx/tdx.h
> > +++ b/arch/x86/kvm/vmx/tdx.h
> > @@ -24,6 +24,8 @@ struct kvm_tdx {
> >       atomic_t tdh_mem_track;
> >
> >       u64 tsc_offset;
> > +
> > +     atomic_t migration_in_progress;
> >  };
> >
> >  union tdx_exit_reason {
> > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> > index d049e0c72ed0c..275f5d75e9bf1 100644
> > --- a/arch/x86/kvm/vmx/x86_ops.h
> > +++ b/arch/x86/kvm/vmx/x86_ops.h
> > @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> >  void tdx_flush_tlb(struct kvm_vcpu *vcpu);
> >  int tdx_sept_tlb_remote_flush(struct kvm *kvm);
> >  void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> > +
> > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
> >  #else
> >  static inline int tdx_init(void) { return 0; };
> >  static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> > @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
> >  static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
> >  static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
> >  static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> > +
> > +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> > +                                            nsigned int source_fd) { return -EOPNOTSUPP; }
> >  #endif
> >
> >  #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
>
  
Zhi Wang April 19, 2023, 6:34 a.m. UTC | #5
On Tue, 18 Apr 2023 10:47:44 -0700
Sagi Shahar <sagis@google.com> wrote:

> On Mon, Apr 17, 2023 at 11:28 PM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
> >
> > On Fri,  7 Apr 2023 20:19:19 +0000
> > Sagi Shahar <sagis@google.com> wrote:
> >
> > Is there any reaon that TDX doesn't need .vm_copy_enc_context_from? Or it is
> > going to be deprecated? The patch comments needs to be refined according to
> > Sean's KVM x86 maintainer book.
> 
> To clarify, there are 2 types of migrations. live migration (between
> different hosts) and intra-host (between kvm instances in the same
> host) migration. This patchset deals with intra-host migration and
> doesn't add support for live migration.
> 
> vm_copy_enc_context_from is currently used for setting up the
> migration helper for SEV live migration and therefore it is currently
> not needed in this patcheset.

Out of curiosity, Is this the migration helper you mentioned here also
a SEV VM?
> 
> >
> > > This should mostly match the logic in sev_vm_move_enc_context_from.
> > >
> > > Signed-off-by: Sagi Shahar <sagis@google.com>
> > > ---
> > >  arch/x86/kvm/vmx/main.c    | 10 +++++++
> > >  arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
> > >  arch/x86/kvm/vmx/tdx.h     |  2 ++
> > >  arch/x86/kvm/vmx/x86_ops.h |  5 ++++
> > >  4 files changed, 73 insertions(+)
> > >
> > > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> > > index 5b64fe5404958..9d5d0ac465bf6 100644
> > > --- a/arch/x86/kvm/vmx/main.c
> > > +++ b/arch/x86/kvm/vmx/main.c
> > > @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> > >       return tdx_vcpu_ioctl(vcpu, argp);
> > >  }
> > >
> > > +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > > +{
> > > +     if (!is_td(kvm))
> > > +             return -ENOTTY;
> > > +
> > > +     return tdx_vm_move_enc_context_from(kvm, source_fd);
> > > +}
> > > +
> > >  #define VMX_REQUIRED_APICV_INHIBITS                 \
> > >  (                                                   \
> > >         BIT(APICV_INHIBIT_REASON_DISABLE)|           \
> > > @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> > >       .dev_mem_enc_ioctl = tdx_dev_ioctl,
> > >       .mem_enc_ioctl = vt_mem_enc_ioctl,
> > >       .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> > > +
> > > +     .vm_move_enc_context_from = vt_move_enc_context_from,
> > >  };
> > >
> > >  struct kvm_x86_init_ops vt_init_ops __initdata = {
> > > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > > index 8af7e4e81c860..0999a6d827c99 100644
> > > --- a/arch/x86/kvm/vmx/tdx.c
> > > +++ b/arch/x86/kvm/vmx/tdx.c
> > > @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
> > >               INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
> > >       return 0;
> > >  }
> > > +
> > > +static __always_inline bool tdx_guest(struct kvm *kvm)
> > > +{
> > > +     struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> > > +
> > > +     return tdx_kvm->finalized;
> > > +}
> > > +
> > > +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> > > +{
> > > +     return -EINVAL;
> > > +}
> > > +
> > > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > > +{
> > > +     struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> > > +     struct file *src_kvm_file;
> > > +     struct kvm_tdx *src_tdx;
> > > +     struct kvm *src_kvm;
> > > +     int ret;
> > > +
> > > +     src_kvm_file = fget(source_fd);
> > > +     if (!file_is_kvm(src_kvm_file)) {
> > > +             ret = -EBADF;
> > > +             goto out_fput;
> > > +     }
> > > +     src_kvm = src_kvm_file->private_data;
> > > +     src_tdx = to_kvm_tdx(src_kvm);
> > > +
> > > +     ret = pre_move_enc_context_from(kvm, src_kvm,
> > > +                                     &dst_tdx->migration_in_progress,
> > > +                                     &src_tdx->migration_in_progress);
> > > +     if (ret)
> > > +             goto out_fput;
> > > +
> > > +     if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> > > +             ret = -EINVAL;
> > > +             goto out_post;
> > > +     }
> > > +
> > > +     ret = tdx_migrate_from(kvm, src_kvm);
> > > +     if (ret)
> > > +             goto out_post;
> > > +
> > > +     kvm_vm_dead(src_kvm);
> > > +     ret = 0;
> > > +
> > > +out_post:
> > > +     post_move_enc_context_from(kvm, src_kvm,
> > > +                              &dst_tdx->migration_in_progress,
> > > +                              &src_tdx->migration_in_progress);
> > > +out_fput:
> > > +     if (src_kvm_file)
> > > +             fput(src_kvm_file);
> > > +     return ret;
> > > +}
> > > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> > > index 71818c5001862..21b7e710be1fd 100644
> > > --- a/arch/x86/kvm/vmx/tdx.h
> > > +++ b/arch/x86/kvm/vmx/tdx.h
> > > @@ -24,6 +24,8 @@ struct kvm_tdx {
> > >       atomic_t tdh_mem_track;
> > >
> > >       u64 tsc_offset;
> > > +
> > > +     atomic_t migration_in_progress;
> > >  };
> > >
> > >  union tdx_exit_reason {
> > > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> > > index d049e0c72ed0c..275f5d75e9bf1 100644
> > > --- a/arch/x86/kvm/vmx/x86_ops.h
> > > +++ b/arch/x86/kvm/vmx/x86_ops.h
> > > @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> > >  void tdx_flush_tlb(struct kvm_vcpu *vcpu);
> > >  int tdx_sept_tlb_remote_flush(struct kvm *kvm);
> > >  void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> > > +
> > > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
> > >  #else
> > >  static inline int tdx_init(void) { return 0; };
> > >  static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> > > @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
> > >  static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
> > >  static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
> > >  static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> > > +
> > > +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> > > +                                            nsigned int source_fd) { return -EOPNOTSUPP; }
> > >  #endif
> > >
> > >  #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
> >
  
Sagi Shahar April 27, 2023, 9:25 p.m. UTC | #6
On Tue, Apr 18, 2023 at 11:34 PM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
>
> On Tue, 18 Apr 2023 10:47:44 -0700
> Sagi Shahar <sagis@google.com> wrote:
>
> > On Mon, Apr 17, 2023 at 11:28 PM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
> > >
> > > On Fri,  7 Apr 2023 20:19:19 +0000
> > > Sagi Shahar <sagis@google.com> wrote:
> > >
> > > Is there any reaon that TDX doesn't need .vm_copy_enc_context_from? Or it is
> > > going to be deprecated? The patch comments needs to be refined according to
> > > Sean's KVM x86 maintainer book.
> >
> > To clarify, there are 2 types of migrations. live migration (between
> > different hosts) and intra-host (between kvm instances in the same
> > host) migration. This patchset deals with intra-host migration and
> > doesn't add support for live migration.
> >
> > vm_copy_enc_context_from is currently used for setting up the
> > migration helper for SEV live migration and therefore it is currently
> > not needed in this patcheset.
>
> Out of curiosity, Is this the migration helper you mentioned here also
> a SEV VM?

I'm not that familiar with SEV migration but from what I understand
the answer is "not exactly".
It's a guest process that runs as part of the SEV VM firmware.

There's some public information about it here:
https://lpc.events/event/11/contributions/958/attachments/769/1448/Live%20migration%20of%20confidential%20guests_LPC2021.pdf
> >
> > >
> > > > This should mostly match the logic in sev_vm_move_enc_context_from.
> > > >
> > > > Signed-off-by: Sagi Shahar <sagis@google.com>
> > > > ---
> > > >  arch/x86/kvm/vmx/main.c    | 10 +++++++
> > > >  arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
> > > >  arch/x86/kvm/vmx/tdx.h     |  2 ++
> > > >  arch/x86/kvm/vmx/x86_ops.h |  5 ++++
> > > >  4 files changed, 73 insertions(+)
> > > >
> > > > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> > > > index 5b64fe5404958..9d5d0ac465bf6 100644
> > > > --- a/arch/x86/kvm/vmx/main.c
> > > > +++ b/arch/x86/kvm/vmx/main.c
> > > > @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> > > >       return tdx_vcpu_ioctl(vcpu, argp);
> > > >  }
> > > >
> > > > +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > > > +{
> > > > +     if (!is_td(kvm))
> > > > +             return -ENOTTY;
> > > > +
> > > > +     return tdx_vm_move_enc_context_from(kvm, source_fd);
> > > > +}
> > > > +
> > > >  #define VMX_REQUIRED_APICV_INHIBITS                 \
> > > >  (                                                   \
> > > >         BIT(APICV_INHIBIT_REASON_DISABLE)|           \
> > > > @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> > > >       .dev_mem_enc_ioctl = tdx_dev_ioctl,
> > > >       .mem_enc_ioctl = vt_mem_enc_ioctl,
> > > >       .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> > > > +
> > > > +     .vm_move_enc_context_from = vt_move_enc_context_from,
> > > >  };
> > > >
> > > >  struct kvm_x86_init_ops vt_init_ops __initdata = {
> > > > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > > > index 8af7e4e81c860..0999a6d827c99 100644
> > > > --- a/arch/x86/kvm/vmx/tdx.c
> > > > +++ b/arch/x86/kvm/vmx/tdx.c
> > > > @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
> > > >               INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
> > > >       return 0;
> > > >  }
> > > > +
> > > > +static __always_inline bool tdx_guest(struct kvm *kvm)
> > > > +{
> > > > +     struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> > > > +
> > > > +     return tdx_kvm->finalized;
> > > > +}
> > > > +
> > > > +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> > > > +{
> > > > +     return -EINVAL;
> > > > +}
> > > > +
> > > > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > > > +{
> > > > +     struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> > > > +     struct file *src_kvm_file;
> > > > +     struct kvm_tdx *src_tdx;
> > > > +     struct kvm *src_kvm;
> > > > +     int ret;
> > > > +
> > > > +     src_kvm_file = fget(source_fd);
> > > > +     if (!file_is_kvm(src_kvm_file)) {
> > > > +             ret = -EBADF;
> > > > +             goto out_fput;
> > > > +     }
> > > > +     src_kvm = src_kvm_file->private_data;
> > > > +     src_tdx = to_kvm_tdx(src_kvm);
> > > > +
> > > > +     ret = pre_move_enc_context_from(kvm, src_kvm,
> > > > +                                     &dst_tdx->migration_in_progress,
> > > > +                                     &src_tdx->migration_in_progress);
> > > > +     if (ret)
> > > > +             goto out_fput;
> > > > +
> > > > +     if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> > > > +             ret = -EINVAL;
> > > > +             goto out_post;
> > > > +     }
> > > > +
> > > > +     ret = tdx_migrate_from(kvm, src_kvm);
> > > > +     if (ret)
> > > > +             goto out_post;
> > > > +
> > > > +     kvm_vm_dead(src_kvm);
> > > > +     ret = 0;
> > > > +
> > > > +out_post:
> > > > +     post_move_enc_context_from(kvm, src_kvm,
> > > > +                              &dst_tdx->migration_in_progress,
> > > > +                              &src_tdx->migration_in_progress);
> > > > +out_fput:
> > > > +     if (src_kvm_file)
> > > > +             fput(src_kvm_file);
> > > > +     return ret;
> > > > +}
> > > > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> > > > index 71818c5001862..21b7e710be1fd 100644
> > > > --- a/arch/x86/kvm/vmx/tdx.h
> > > > +++ b/arch/x86/kvm/vmx/tdx.h
> > > > @@ -24,6 +24,8 @@ struct kvm_tdx {
> > > >       atomic_t tdh_mem_track;
> > > >
> > > >       u64 tsc_offset;
> > > > +
> > > > +     atomic_t migration_in_progress;
> > > >  };
> > > >
> > > >  union tdx_exit_reason {
> > > > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> > > > index d049e0c72ed0c..275f5d75e9bf1 100644
> > > > --- a/arch/x86/kvm/vmx/x86_ops.h
> > > > +++ b/arch/x86/kvm/vmx/x86_ops.h
> > > > @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> > > >  void tdx_flush_tlb(struct kvm_vcpu *vcpu);
> > > >  int tdx_sept_tlb_remote_flush(struct kvm *kvm);
> > > >  void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> > > > +
> > > > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
> > > >  #else
> > > >  static inline int tdx_init(void) { return 0; };
> > > >  static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> > > > @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
> > > >  static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
> > > >  static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
> > > >  static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> > > > +
> > > > +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> > > > +                                            nsigned int source_fd) { return -EOPNOTSUPP; }
> > > >  #endif
> > > >
> > > >  #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
> > >
>
  
Zhi Wang April 28, 2023, 4:08 p.m. UTC | #7
On Thu, 27 Apr 2023 14:25:41 -0700
Sagi Shahar <sagis@google.com> wrote:

> On Tue, Apr 18, 2023 at 11:34 PM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
> >
> > On Tue, 18 Apr 2023 10:47:44 -0700
> > Sagi Shahar <sagis@google.com> wrote:
> >
> > > On Mon, Apr 17, 2023 at 11:28 PM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
> > > >
> > > > On Fri,  7 Apr 2023 20:19:19 +0000
> > > > Sagi Shahar <sagis@google.com> wrote:
> > > >
> > > > Is there any reaon that TDX doesn't need .vm_copy_enc_context_from? Or it is
> > > > going to be deprecated? The patch comments needs to be refined according to
> > > > Sean's KVM x86 maintainer book.
> > >
> > > To clarify, there are 2 types of migrations. live migration (between
> > > different hosts) and intra-host (between kvm instances in the same
> > > host) migration. This patchset deals with intra-host migration and
> > > doesn't add support for live migration.
> > >
> > > vm_copy_enc_context_from is currently used for setting up the
> > > migration helper for SEV live migration and therefore it is currently
> > > not needed in this patcheset.
> >
> > Out of curiosity, Is this the migration helper you mentioned here also
> > a SEV VM?
> 
> I'm not that familiar with SEV migration but from what I understand
> the answer is "not exactly".
> It's a guest process that runs as part of the SEV VM firmware.
> 
> There's some public information about it here:
> https://lpc.events/event/11/contributions/958/attachments/769/1448/Live%20migration%20of%20confidential%20guests_LPC2021.pdf

Thanks so much for the information. I spent some time on reading and digging
around it didn't talk about how the callback will be used. It would be nice
to see the whole picture then I guess I will have more comments. 

> > >
> > > >
> > > > > This should mostly match the logic in sev_vm_move_enc_context_from.
> > > > >
> > > > > Signed-off-by: Sagi Shahar <sagis@google.com>
> > > > > ---
> > > > >  arch/x86/kvm/vmx/main.c    | 10 +++++++
> > > > >  arch/x86/kvm/vmx/tdx.c     | 56 ++++++++++++++++++++++++++++++++++++++
> > > > >  arch/x86/kvm/vmx/tdx.h     |  2 ++
> > > > >  arch/x86/kvm/vmx/x86_ops.h |  5 ++++
> > > > >  4 files changed, 73 insertions(+)
> > > > >
> > > > > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> > > > > index 5b64fe5404958..9d5d0ac465bf6 100644
> > > > > --- a/arch/x86/kvm/vmx/main.c
> > > > > +++ b/arch/x86/kvm/vmx/main.c
> > > > > @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> > > > >       return tdx_vcpu_ioctl(vcpu, argp);
> > > > >  }
> > > > >
> > > > > +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > > > > +{
> > > > > +     if (!is_td(kvm))
> > > > > +             return -ENOTTY;
> > > > > +
> > > > > +     return tdx_vm_move_enc_context_from(kvm, source_fd);
> > > > > +}
> > > > > +
> > > > >  #define VMX_REQUIRED_APICV_INHIBITS                 \
> > > > >  (                                                   \
> > > > >         BIT(APICV_INHIBIT_REASON_DISABLE)|           \
> > > > > @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> > > > >       .dev_mem_enc_ioctl = tdx_dev_ioctl,
> > > > >       .mem_enc_ioctl = vt_mem_enc_ioctl,
> > > > >       .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> > > > > +
> > > > > +     .vm_move_enc_context_from = vt_move_enc_context_from,
> > > > >  };
> > > > >
> > > > >  struct kvm_x86_init_ops vt_init_ops __initdata = {
> > > > > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > > > > index 8af7e4e81c860..0999a6d827c99 100644
> > > > > --- a/arch/x86/kvm/vmx/tdx.c
> > > > > +++ b/arch/x86/kvm/vmx/tdx.c
> > > > > @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
> > > > >               INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
> > > > >       return 0;
> > > > >  }
> > > > > +
> > > > > +static __always_inline bool tdx_guest(struct kvm *kvm)
> > > > > +{
> > > > > +     struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> > > > > +
> > > > > +     return tdx_kvm->finalized;
> > > > > +}
> > > > > +
> > > > > +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> > > > > +{
> > > > > +     return -EINVAL;
> > > > > +}
> > > > > +
> > > > > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > > > > +{
> > > > > +     struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> > > > > +     struct file *src_kvm_file;
> > > > > +     struct kvm_tdx *src_tdx;
> > > > > +     struct kvm *src_kvm;
> > > > > +     int ret;
> > > > > +
> > > > > +     src_kvm_file = fget(source_fd);
> > > > > +     if (!file_is_kvm(src_kvm_file)) {
> > > > > +             ret = -EBADF;
> > > > > +             goto out_fput;
> > > > > +     }
> > > > > +     src_kvm = src_kvm_file->private_data;
> > > > > +     src_tdx = to_kvm_tdx(src_kvm);
> > > > > +
> > > > > +     ret = pre_move_enc_context_from(kvm, src_kvm,
> > > > > +                                     &dst_tdx->migration_in_progress,
> > > > > +                                     &src_tdx->migration_in_progress);
> > > > > +     if (ret)
> > > > > +             goto out_fput;
> > > > > +
> > > > > +     if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> > > > > +             ret = -EINVAL;
> > > > > +             goto out_post;
> > > > > +     }
> > > > > +
> > > > > +     ret = tdx_migrate_from(kvm, src_kvm);
> > > > > +     if (ret)
> > > > > +             goto out_post;
> > > > > +
> > > > > +     kvm_vm_dead(src_kvm);
> > > > > +     ret = 0;
> > > > > +
> > > > > +out_post:
> > > > > +     post_move_enc_context_from(kvm, src_kvm,
> > > > > +                              &dst_tdx->migration_in_progress,
> > > > > +                              &src_tdx->migration_in_progress);
> > > > > +out_fput:
> > > > > +     if (src_kvm_file)
> > > > > +             fput(src_kvm_file);
> > > > > +     return ret;
> > > > > +}
> > > > > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> > > > > index 71818c5001862..21b7e710be1fd 100644
> > > > > --- a/arch/x86/kvm/vmx/tdx.h
> > > > > +++ b/arch/x86/kvm/vmx/tdx.h
> > > > > @@ -24,6 +24,8 @@ struct kvm_tdx {
> > > > >       atomic_t tdh_mem_track;
> > > > >
> > > > >       u64 tsc_offset;
> > > > > +
> > > > > +     atomic_t migration_in_progress;
> > > > >  };
> > > > >
> > > > >  union tdx_exit_reason {
> > > > > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> > > > > index d049e0c72ed0c..275f5d75e9bf1 100644
> > > > > --- a/arch/x86/kvm/vmx/x86_ops.h
> > > > > +++ b/arch/x86/kvm/vmx/x86_ops.h
> > > > > @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> > > > >  void tdx_flush_tlb(struct kvm_vcpu *vcpu);
> > > > >  int tdx_sept_tlb_remote_flush(struct kvm *kvm);
> > > > >  void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> > > > > +
> > > > > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
> > > > >  #else
> > > > >  static inline int tdx_init(void) { return 0; };
> > > > >  static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> > > > > @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
> > > > >  static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
> > > > >  static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
> > > > >  static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> > > > > +
> > > > > +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> > > > > +                                            nsigned int source_fd) { return -EOPNOTSUPP; }
> > > > >  #endif
> > > > >
> > > > >  #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
> > > >
> >
  

Patch

diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 5b64fe5404958..9d5d0ac465bf6 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -979,6 +979,14 @@  static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
 	return tdx_vcpu_ioctl(vcpu, argp);
 }
 
+static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
+{
+	if (!is_td(kvm))
+		return -ENOTTY;
+
+	return tdx_vm_move_enc_context_from(kvm, source_fd);
+}
+
 #define VMX_REQUIRED_APICV_INHIBITS		       \
 (						       \
        BIT(APICV_INHIBIT_REASON_DISABLE)|	       \
@@ -1141,6 +1149,8 @@  struct kvm_x86_ops vt_x86_ops __initdata = {
 	.dev_mem_enc_ioctl = tdx_dev_ioctl,
 	.mem_enc_ioctl = vt_mem_enc_ioctl,
 	.vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
+
+	.vm_move_enc_context_from = vt_move_enc_context_from,
 };
 
 struct kvm_x86_init_ops vt_init_ops __initdata = {
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 8af7e4e81c860..0999a6d827c99 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -2826,3 +2826,59 @@  int __init tdx_init(void)
 		INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
 	return 0;
 }
+
+static __always_inline bool tdx_guest(struct kvm *kvm)
+{
+	struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
+
+	return tdx_kvm->finalized;
+}
+
+static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
+{
+	return -EINVAL;
+}
+
+int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
+{
+	struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
+	struct file *src_kvm_file;
+	struct kvm_tdx *src_tdx;
+	struct kvm *src_kvm;
+	int ret;
+
+	src_kvm_file = fget(source_fd);
+	if (!file_is_kvm(src_kvm_file)) {
+		ret = -EBADF;
+		goto out_fput;
+	}
+	src_kvm = src_kvm_file->private_data;
+	src_tdx = to_kvm_tdx(src_kvm);
+
+	ret = pre_move_enc_context_from(kvm, src_kvm,
+					&dst_tdx->migration_in_progress,
+					&src_tdx->migration_in_progress);
+	if (ret)
+		goto out_fput;
+
+	if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
+		ret = -EINVAL;
+		goto out_post;
+	}
+
+	ret = tdx_migrate_from(kvm, src_kvm);
+	if (ret)
+		goto out_post;
+
+	kvm_vm_dead(src_kvm);
+	ret = 0;
+
+out_post:
+	post_move_enc_context_from(kvm, src_kvm,
+				 &dst_tdx->migration_in_progress,
+				 &src_tdx->migration_in_progress);
+out_fput:
+	if (src_kvm_file)
+		fput(src_kvm_file);
+	return ret;
+}
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index 71818c5001862..21b7e710be1fd 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -24,6 +24,8 @@  struct kvm_tdx {
 	atomic_t tdh_mem_track;
 
 	u64 tsc_offset;
+
+	atomic_t migration_in_progress;
 };
 
 union tdx_exit_reason {
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index d049e0c72ed0c..275f5d75e9bf1 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -187,6 +187,8 @@  int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
 void tdx_flush_tlb(struct kvm_vcpu *vcpu);
 int tdx_sept_tlb_remote_flush(struct kvm *kvm);
 void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+
+int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
 #else
 static inline int tdx_init(void) { return 0; };
 static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
@@ -241,6 +243,9 @@  static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
 static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
 static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
 static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
+
+static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
+					       nsigned int source_fd) { return -EOPNOTSUPP; }
 #endif
 
 #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)