[v3,14/29] LoongArch: KVM: Implement vcpu load and vcpu put operations

Message ID 20230228070057.3687180-15-zhaotianrui@loongson.cn
State New
Headers
Series Add KVM LoongArch support |

Commit Message

zhaotianrui Feb. 28, 2023, 7 a.m. UTC
  Implement loongarch vcpu load and vcpu put operations, including
load csr value into hardware and save csr value into vcpu structure.

Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
---
 arch/loongarch/kvm/vcpu.c | 192 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 192 insertions(+)
  

Comments

maobibo March 7, 2023, 2:03 a.m. UTC | #1
在 2023/2/28 15:00, Tianrui Zhao 写道:
> Implement loongarch vcpu load and vcpu put operations, including
> load csr value into hardware and save csr value into vcpu structure.
> 
> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> ---
>  arch/loongarch/kvm/vcpu.c | 192 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 192 insertions(+)
> 
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 615f68d082f8..14c89208936f 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -771,6 +771,198 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
>  	}
>  }
>  
> +static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> +{
> +	struct kvm_context *context;
> +	struct loongarch_csrs *csr = vcpu->arch.csr;
> +	bool migrated, all;
> +
> +	/*
> +	 * Have we migrated to a different CPU?
> +	 * If so, any old guest TLB state may be stale.
> +	 */
> +	migrated = (vcpu->arch.last_sched_cpu != cpu);
> +
> +	/*
> +	 * Was this the last VCPU to run on this CPU?
> +	 * If not, any old guest state from this VCPU will have been clobbered.
> +	 */
> +	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
> +	all = migrated || (context->last_vcpu != vcpu);
> +	context->last_vcpu = vcpu;
> +
> +	/*
> +	 * Restore timer state regardless
> +	 */
> +	kvm_restore_timer(vcpu);
> +
> +	/* Control guest page CCA attribute */
> +	change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
> +	/* Don't bother restoring registers multiple times unless necessary */
> +	if (!all)
> +		return 0;
> +
> +	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
> +	/*
> +	 * Restore guest CSR registers
> +	 */
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
> +	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
> +
> +	/* restore Root.Guestexcept from unused Guest guestexcept register */
> +	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
> +
> +	/*
> +	 * We should clear linked load bit to break interrupted atomics. This
> +	 * prevents a SC on the next VCPU from succeeding by matching a LL on
> +	 * the previous VCPU.
> +	 */
> +	if (vcpu->kvm->created_vcpus > 1)
> +		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
> +
> +	return 0;
> +}
> +
> +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> +{
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
> +	vcpu->cpu = cpu;
> +	if (vcpu->arch.last_sched_cpu != cpu) {
> +		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
> +				vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
> +		/*
> +		 * Migrate the timer interrupt to the current CPU so that it
> +		 * always interrupts the guest and synchronously triggers a
> +		 * guest timer interrupt.
> +		 */
> +		kvm_migrate_count(vcpu);
> +	}
> +
> +	/* restore guest state to registers */
> +	_kvm_vcpu_load(vcpu, cpu);
> +	local_irq_restore(flags);
> +}
> +
> +static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
> +{
> +	struct loongarch_csrs *csr = vcpu->arch.csr;
> +
> +	kvm_lose_fpu(vcpu);
Hi Tianrui,

Can we add KVM_LARCH_CSR bit in vcpu->arch.aux_inuse similiar with
KVM_LARCH_FPU? It means that sw csr is consistent with hw csr registers.

And clear this bit when returning to guest, set this bit in this function
_kvm_vcpu_put. If it is true, we need not copy to sw csr from hw, and for
SET_ONE_REG function, both sw/hw csr register will be set.


Regards
Bibo, Mao

> +
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGD);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
> +	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
> +
> +	/* save Root.Guestexcept in unused Guest guestexcept register */
> +	kvm_save_timer(vcpu);
> +	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
> +	return 0;
> +}
> +
> +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
> +{
> +	unsigned long flags;
> +	int cpu;
> +
> +	local_irq_save(flags);
> +	cpu = smp_processor_id();
> +	vcpu->arch.last_sched_cpu = cpu;
> +	vcpu->cpu = -1;
> +
> +	/* save guest state in registers */
> +	_kvm_vcpu_put(vcpu, cpu);
> +	local_irq_restore(flags);
> +}
> +
>  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>  {
>  	int r = -EINTR;
  

Patch

diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 615f68d082f8..14c89208936f 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -771,6 +771,198 @@  void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 	}
 }
 
+static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	struct kvm_context *context;
+	struct loongarch_csrs *csr = vcpu->arch.csr;
+	bool migrated, all;
+
+	/*
+	 * Have we migrated to a different CPU?
+	 * If so, any old guest TLB state may be stale.
+	 */
+	migrated = (vcpu->arch.last_sched_cpu != cpu);
+
+	/*
+	 * Was this the last VCPU to run on this CPU?
+	 * If not, any old guest state from this VCPU will have been clobbered.
+	 */
+	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
+	all = migrated || (context->last_vcpu != vcpu);
+	context->last_vcpu = vcpu;
+
+	/*
+	 * Restore timer state regardless
+	 */
+	kvm_restore_timer(vcpu);
+
+	/* Control guest page CCA attribute */
+	change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
+	/* Don't bother restoring registers multiple times unless necessary */
+	if (!all)
+		return 0;
+
+	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
+	/*
+	 * Restore guest CSR registers
+	 */
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
+	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
+
+	/* restore Root.Guestexcept from unused Guest guestexcept register */
+	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
+
+	/*
+	 * We should clear linked load bit to break interrupted atomics. This
+	 * prevents a SC on the next VCPU from succeeding by matching a LL on
+	 * the previous VCPU.
+	 */
+	if (vcpu->kvm->created_vcpus > 1)
+		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
+
+	return 0;
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	vcpu->cpu = cpu;
+	if (vcpu->arch.last_sched_cpu != cpu) {
+		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+				vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+		/*
+		 * Migrate the timer interrupt to the current CPU so that it
+		 * always interrupts the guest and synchronously triggers a
+		 * guest timer interrupt.
+		 */
+		kvm_migrate_count(vcpu);
+	}
+
+	/* restore guest state to registers */
+	_kvm_vcpu_load(vcpu, cpu);
+	local_irq_restore(flags);
+}
+
+static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
+{
+	struct loongarch_csrs *csr = vcpu->arch.csr;
+
+	kvm_lose_fpu(vcpu);
+
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGD);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
+	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
+
+	/* save Root.Guestexcept in unused Guest guestexcept register */
+	kvm_save_timer(vcpu);
+	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
+	return 0;
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	unsigned long flags;
+	int cpu;
+
+	local_irq_save(flags);
+	cpu = smp_processor_id();
+	vcpu->arch.last_sched_cpu = cpu;
+	vcpu->cpu = -1;
+
+	/* save guest state in registers */
+	_kvm_vcpu_put(vcpu, cpu);
+	local_irq_restore(flags);
+}
+
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
 	int r = -EINTR;