[v21,01/29] LoongArch: KVM: Add kvm related header files

Message ID 20230915014949.1222777-2-zhaotianrui@loongson.cn
State New
Headers
Series Add KVM LoongArch support |

Commit Message

zhaotianrui Sept. 15, 2023, 1:49 a.m. UTC
  Add LoongArch KVM related header files, including kvm.h,
kvm_host.h, kvm_types.h. All of those are about LoongArch
virtualization features and kvm interfaces.

Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
---
 arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
 arch/loongarch/include/asm/kvm_types.h |  11 ++
 arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
 include/uapi/linux/kvm.h               |   9 +
 4 files changed, 373 insertions(+)
 create mode 100644 arch/loongarch/include/asm/kvm_host.h
 create mode 100644 arch/loongarch/include/asm/kvm_types.h
 create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
  

Comments

Huacai Chen Sept. 16, 2023, 8:48 a.m. UTC | #1
Hi, Tianrui,

On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
>
> Add LoongArch KVM related header files, including kvm.h,
> kvm_host.h, kvm_types.h. All of those are about LoongArch
> virtualization features and kvm interfaces.
>
> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> ---
>  arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
>  arch/loongarch/include/asm/kvm_types.h |  11 ++
>  arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
>  include/uapi/linux/kvm.h               |   9 +
>  4 files changed, 373 insertions(+)
>  create mode 100644 arch/loongarch/include/asm/kvm_host.h
>  create mode 100644 arch/loongarch/include/asm/kvm_types.h
>  create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> new file mode 100644
> index 0000000000..00e0c1876b
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -0,0 +1,245 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
> +#define __ASM_LOONGARCH_KVM_HOST_H__
> +
> +#include <linux/cpumask.h>
> +#include <linux/mutex.h>
> +#include <linux/hrtimer.h>
> +#include <linux/interrupt.h>
> +#include <linux/types.h>
> +#include <linux/kvm.h>
> +#include <linux/kvm_types.h>
> +#include <linux/threads.h>
> +#include <linux/spinlock.h>
> +
> +#include <asm/inst.h>
> +#include <asm/kvm_mmu.h>
> +#include <asm/loongarch.h>
> +
> +/* Loongarch KVM register ids */
> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> +
> +#define KVM_MAX_VCPUS                  256
> +#define KVM_MAX_CPUCFG_REGS            21
> +/* memory slots that does not exposed to userspace */
> +#define KVM_PRIVATE_MEM_SLOTS          0
> +
> +#define KVM_HALT_POLL_NS_DEFAULT       500000
> +
> +struct kvm_vm_stat {
> +       struct kvm_vm_stat_generic generic;
> +       u64 pages;
> +       u64 hugepages;
> +};
> +
> +struct kvm_vcpu_stat {
> +       struct kvm_vcpu_stat_generic generic;
> +       u64 idle_exits;
> +       u64 signal_exits;
> +       u64 int_exits;
> +       u64 cpucfg_exits;
> +};
> +
> +struct kvm_arch_memory_slot {
> +};
> +
> +struct kvm_context {
> +       unsigned long vpid_cache;
> +       struct kvm_vcpu *last_vcpu;
> +};
> +
> +struct kvm_world_switch {
> +       int (*guest_eentry)(void);
> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> +       unsigned long page_order;
> +};
> +
> +#define MAX_PGTABLE_LEVELS     4
> +struct kvm_arch {
> +       /* Guest physical mm */
> +       kvm_pte_t *pgd;
> +       unsigned long gpa_size;
> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
> +       unsigned int  root_level;
> +
> +       s64 time_offset;
> +       struct kvm_context __percpu *vmcs;
> +};
> +
> +#define CSR_MAX_NUMS           0x800
> +
> +struct loongarch_csrs {
> +       unsigned long csrs[CSR_MAX_NUMS];
> +};
> +
> +/* Resume Flags */
> +#define RESUME_HOST            0
> +#define RESUME_GUEST           1
> +
> +enum emulation_result {
> +       EMULATE_DONE,           /* no further processing */
> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
> +       EMULATE_FAIL,           /* can't emulate this instruction */
> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
> +};
> +
> +#define KVM_LARCH_FPU          (0x1 << 0)
> +#define KVM_LARCH_CSR          (0x1 << 1)
> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> +
> +struct kvm_vcpu_arch {
> +       /*
> +        * Switch pointer-to-function type to unsigned long
> +        * for loading the value into register directly.
> +        */
> +       unsigned long host_eentry;
> +       unsigned long guest_eentry;
> +
> +       /* Pointers stored here for easy accessing from assembly code */
> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> +
> +       /* Host registers preserved across guest mode execution */
> +       unsigned long host_sp;
> +       unsigned long host_tp;
> +       unsigned long host_pgd;
> +
> +       /* Host CSRs are used when handling exits from guest */
> +       unsigned long badi;
> +       unsigned long badv;
> +       unsigned long host_ecfg;
> +       unsigned long host_estat;
> +       unsigned long host_percpu;
> +
> +       /* GPRs */
> +       unsigned long gprs[32];
> +       unsigned long pc;
> +
> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
> +       unsigned int aux_inuse;
> +       /* FPU state */
> +       struct loongarch_fpu fpu FPU_ALIGN;
> +
> +       /* CSR state */
> +       struct loongarch_csrs *csr;
> +
> +       /* GPR used as IO source/target */
> +       u32 io_gpr;
> +
> +       struct hrtimer swtimer;
> +       /* KVM register to control count timer */
> +       u32 count_ctl;
> +
> +       /* Bitmask of intr that are pending */
> +       unsigned long irq_pending;
> +       /* Bitmask of pending intr to be cleared */
> +       unsigned long irq_clear;
> +
> +       /* Bitmask of exceptions that are pending */
> +       unsigned long exception_pending;
> +       unsigned int  subcode;
> +
> +       /* Cache for pages needed inside spinlock regions */
> +       struct kvm_mmu_memory_cache mmu_page_cache;
> +
> +       /* vcpu's vpid */
> +       u64 vpid;
> +
> +       /* Frequency of stable timer in Hz */
> +       u64 timer_mhz;
> +       ktime_t expire;
> +
> +       u64 core_ext_ioisr[4];
> +
> +       /* Last CPU the vCPU state was loaded on */
> +       int last_sched_cpu;
> +       /* mp state */
> +       struct kvm_mp_state mp_state;
> +       /* cpucfg */
> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
> +};
> +
> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
> +{
> +       return csr->csrs[reg];
> +}
> +
> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
> +{
> +       csr->csrs[reg] = val;
> +}
> +
> +/* Helpers */
> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
> +{
> +       return cpu_has_fpu;
> +}
> +
> +void kvm_init_fault(void);
> +
> +/* Debug: dump vcpu state */
> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
> +
> +/* MMU handling */
> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
> +void kvm_flush_tlb_all(void);
> +
> +#define KVM_ARCH_WANT_MMU_NOTIFIER
> +int kvm_unmap_hva_range(struct kvm *kvm,
> +                       unsigned long start, unsigned long end, bool blockable);
> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
> +
> +static inline void update_pc(struct kvm_vcpu_arch *arch)
> +{
> +       arch->pc += 4;
> +}
> +
> +/**
> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
> + * @vcpu:      Virtual CPU.
> + *
> + * Returns:    Whether the TLBL exception was likely due to an instruction
> + *             fetch fault rather than a data load fault.
> + */
> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
> +{
> +       return arch->pc == arch->badv;
> +}
> +
> +/* Misc */
> +static inline void kvm_arch_hardware_unsetup(void) {}
> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
> +                                  struct kvm_memory_slot *slot) {}
> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
> +                                       const struct kvm_memory_slot *memslot);
> +void kvm_init_vmcs(struct kvm *kvm);
> +void kvm_vector_entry(void);
> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
> +extern const unsigned long kvm_vector_size;
> +extern const unsigned long kvm_enter_guest_size;
> +extern unsigned long vpid_mask;
> +extern struct kvm_world_switch *kvm_loongarch_ops;
> +
> +#define SW_GCSR                (1 << 0)
> +#define HW_GCSR                (1 << 1)
> +#define INVALID_GCSR   (1 << 2)
> +int get_gcsr_flag(int csr);
> +extern void set_hw_gcsr(int csr_id, unsigned long val);
> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
> new file mode 100644
> index 0000000000..2fe1d4bdff
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kvm_types.h
> @@ -0,0 +1,11 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
> +#define _ASM_LOONGARCH_KVM_TYPES_H
> +
> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
> +
> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
> new file mode 100644
> index 0000000000..fafda487d6
> --- /dev/null
> +++ b/arch/loongarch/include/uapi/asm/kvm.h
> @@ -0,0 +1,108 @@
> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
> +#define __UAPI_ASM_LOONGARCH_KVM_H
> +
> +#include <linux/types.h>
> +
> +/*
> + * KVM Loongarch specific structures and definitions.
> + *
> + * Some parts derived from the x86 version of this file.
> + */
> +
> +#define __KVM_HAVE_READONLY_MEM
> +
> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
> +
> +/*
> + * for KVM_GET_REGS and KVM_SET_REGS
> + */
> +struct kvm_regs {
> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
> +       __u64 gpr[32];
> +       __u64 pc;
> +};
> +
> +/*
> + * for KVM_GET_FPU and KVM_SET_FPU
> + */
> +struct kvm_fpu {
> +       __u32 fcsr;
> +       __u64 fcc;    /* 8x8 */
> +       struct kvm_fpureg {
> +               __u64 val64[4];
> +       } fpr[32];
> +};
> +
> +/*
> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
> + * registers.  The id field is broken down as follows:
> + *
> + *  bits[63..52] - As per linux/kvm.h
> + *  bits[51..32] - Must be zero.
> + *  bits[31..16] - Register set.
> + *
> + * Register set = 0: GP registers from kvm_regs (see definitions below).
> + *
> + * Register set = 1: CSR registers.
> + *
> + * Register set = 2: KVM specific registers (see definitions below).
> + *
> + * Register set = 3: FPU / SIMD registers (see definitions below).
> + *
> + * Other sets registers may be added in the future.  Each set would
> + * have its own identifier in bits[31..16].
> + */
> +
> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
How about rename to KVM_REG_LOONGARCH_FPSIMD?

Huacai

> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
> +#define KVM_CSR_IDX_MASK               0x7fff
> +#define KVM_CPUCFG_IDX_MASK            0x7fff
> +
> +/*
> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
> + */
> +
> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
> +
> +#define LOONGARCH_REG_SHIFT            3
> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
> +
> +struct kvm_debug_exit_arch {
> +};
> +
> +/* for KVM_SET_GUEST_DEBUG */
> +struct kvm_guest_debug_arch {
> +};
> +
> +/* definition of registers in kvm_run */
> +struct kvm_sync_regs {
> +};
> +
> +/* dummy definition */
> +struct kvm_sregs {
> +};
> +
> +struct kvm_iocsr_entry {
> +       __u32 addr;
> +       __u32 pad;
> +       __u64 data;
> +};
> +
> +#define KVM_NR_IRQCHIPS                1
> +#define KVM_IRQCHIP_NUM_PINS   64
> +#define KVM_MAX_CORES          256
> +
> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> index 13065dd961..863f84619a 100644
> --- a/include/uapi/linux/kvm.h
> +++ b/include/uapi/linux/kvm.h
> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
>  #define KVM_EXIT_RISCV_SBI        35
>  #define KVM_EXIT_RISCV_CSR        36
>  #define KVM_EXIT_NOTIFY           37
> +#define KVM_EXIT_LOONGARCH_IOCSR  38
>
>  /* For KVM_EXIT_INTERNAL_ERROR */
>  /* Emulate instruction failed. */
> @@ -336,6 +337,13 @@ struct kvm_run {
>                         __u32 len;
>                         __u8  is_write;
>                 } mmio;
> +               /* KVM_EXIT_LOONGARCH_IOCSR */
> +               struct {
> +                       __u64 phys_addr;
> +                       __u8  data[8];
> +                       __u32 len;
> +                       __u8  is_write;
> +               } iocsr_io;
>                 /* KVM_EXIT_HYPERCALL */
>                 struct {
>                         __u64 nr;
> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
>  #define KVM_REG_ARM64          0x6000000000000000ULL
>  #define KVM_REG_MIPS           0x7000000000000000ULL
>  #define KVM_REG_RISCV          0x8000000000000000ULL
> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
>
>  #define KVM_REG_SIZE_SHIFT     52
>  #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
> --
> 2.39.1
>
  
Huacai Chen Sept. 17, 2023, 2:22 p.m. UTC | #2
Hi, Tianrui,

On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
>
> Add LoongArch KVM related header files, including kvm.h,
> kvm_host.h, kvm_types.h. All of those are about LoongArch
> virtualization features and kvm interfaces.
>
> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> ---
>  arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
>  arch/loongarch/include/asm/kvm_types.h |  11 ++
>  arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
>  include/uapi/linux/kvm.h               |   9 +
>  4 files changed, 373 insertions(+)
>  create mode 100644 arch/loongarch/include/asm/kvm_host.h
>  create mode 100644 arch/loongarch/include/asm/kvm_types.h
>  create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> new file mode 100644
> index 0000000000..00e0c1876b
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -0,0 +1,245 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
> +#define __ASM_LOONGARCH_KVM_HOST_H__
> +
> +#include <linux/cpumask.h>
> +#include <linux/mutex.h>
> +#include <linux/hrtimer.h>
> +#include <linux/interrupt.h>
> +#include <linux/types.h>
> +#include <linux/kvm.h>
> +#include <linux/kvm_types.h>
> +#include <linux/threads.h>
> +#include <linux/spinlock.h>
> +
> +#include <asm/inst.h>
> +#include <asm/kvm_mmu.h>
> +#include <asm/loongarch.h>
> +
> +/* Loongarch KVM register ids */
> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> +
> +#define KVM_MAX_VCPUS                  256
> +#define KVM_MAX_CPUCFG_REGS            21
> +/* memory slots that does not exposed to userspace */
> +#define KVM_PRIVATE_MEM_SLOTS          0
> +
> +#define KVM_HALT_POLL_NS_DEFAULT       500000
> +
> +struct kvm_vm_stat {
> +       struct kvm_vm_stat_generic generic;
> +       u64 pages;
> +       u64 hugepages;
> +};
> +
> +struct kvm_vcpu_stat {
> +       struct kvm_vcpu_stat_generic generic;
> +       u64 idle_exits;
> +       u64 signal_exits;
> +       u64 int_exits;
> +       u64 cpucfg_exits;
> +};
> +
> +struct kvm_arch_memory_slot {
> +};
> +
> +struct kvm_context {
> +       unsigned long vpid_cache;
> +       struct kvm_vcpu *last_vcpu;
> +};
> +
> +struct kvm_world_switch {
> +       int (*guest_eentry)(void);
> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> +       unsigned long page_order;
> +};
> +
> +#define MAX_PGTABLE_LEVELS     4
> +struct kvm_arch {
> +       /* Guest physical mm */
> +       kvm_pte_t *pgd;
> +       unsigned long gpa_size;
> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
> +       unsigned int  root_level;
> +
> +       s64 time_offset;
> +       struct kvm_context __percpu *vmcs;
> +};
> +
> +#define CSR_MAX_NUMS           0x800
> +
> +struct loongarch_csrs {
> +       unsigned long csrs[CSR_MAX_NUMS];
> +};
> +
> +/* Resume Flags */
> +#define RESUME_HOST            0
> +#define RESUME_GUEST           1
> +
> +enum emulation_result {
> +       EMULATE_DONE,           /* no further processing */
> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
> +       EMULATE_FAIL,           /* can't emulate this instruction */
> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
> +};
> +
> +#define KVM_LARCH_FPU          (0x1 << 0)
> +#define KVM_LARCH_CSR          (0x1 << 1)
> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> +
> +struct kvm_vcpu_arch {
> +       /*
> +        * Switch pointer-to-function type to unsigned long
> +        * for loading the value into register directly.
> +        */
> +       unsigned long host_eentry;
> +       unsigned long guest_eentry;
> +
> +       /* Pointers stored here for easy accessing from assembly code */
> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> +
> +       /* Host registers preserved across guest mode execution */
> +       unsigned long host_sp;
> +       unsigned long host_tp;
> +       unsigned long host_pgd;
> +
> +       /* Host CSRs are used when handling exits from guest */
> +       unsigned long badi;
> +       unsigned long badv;
> +       unsigned long host_ecfg;
> +       unsigned long host_estat;
> +       unsigned long host_percpu;
> +
> +       /* GPRs */
> +       unsigned long gprs[32];
> +       unsigned long pc;
> +
> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
> +       unsigned int aux_inuse;
> +       /* FPU state */
> +       struct loongarch_fpu fpu FPU_ALIGN;
> +
> +       /* CSR state */
> +       struct loongarch_csrs *csr;
> +
> +       /* GPR used as IO source/target */
> +       u32 io_gpr;
> +
> +       struct hrtimer swtimer;
> +       /* KVM register to control count timer */
> +       u32 count_ctl;
> +
> +       /* Bitmask of intr that are pending */
> +       unsigned long irq_pending;
> +       /* Bitmask of pending intr to be cleared */
> +       unsigned long irq_clear;
> +
> +       /* Bitmask of exceptions that are pending */
> +       unsigned long exception_pending;
> +       unsigned int  subcode;
> +
> +       /* Cache for pages needed inside spinlock regions */
> +       struct kvm_mmu_memory_cache mmu_page_cache;
> +
> +       /* vcpu's vpid */
> +       u64 vpid;
> +
> +       /* Frequency of stable timer in Hz */
> +       u64 timer_mhz;
> +       ktime_t expire;
> +
> +       u64 core_ext_ioisr[4];
> +
> +       /* Last CPU the vCPU state was loaded on */
> +       int last_sched_cpu;
> +       /* mp state */
> +       struct kvm_mp_state mp_state;
> +       /* cpucfg */
> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
> +};
> +
> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
> +{
> +       return csr->csrs[reg];
> +}
> +
> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
> +{
> +       csr->csrs[reg] = val;
> +}
> +
> +/* Helpers */
> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
> +{
> +       return cpu_has_fpu;
> +}
> +
> +void kvm_init_fault(void);
> +
> +/* Debug: dump vcpu state */
> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
> +
> +/* MMU handling */
> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
> +void kvm_flush_tlb_all(void);
> +
> +#define KVM_ARCH_WANT_MMU_NOTIFIER
> +int kvm_unmap_hva_range(struct kvm *kvm,
> +                       unsigned long start, unsigned long end, bool blockable);
> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
> +
> +static inline void update_pc(struct kvm_vcpu_arch *arch)
> +{
> +       arch->pc += 4;
> +}
> +
> +/**
> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
> + * @vcpu:      Virtual CPU.
> + *
> + * Returns:    Whether the TLBL exception was likely due to an instruction
> + *             fetch fault rather than a data load fault.
> + */
> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
> +{
> +       return arch->pc == arch->badv;
> +}
> +
> +/* Misc */
> +static inline void kvm_arch_hardware_unsetup(void) {}
> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
> +                                  struct kvm_memory_slot *slot) {}
> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
> +                                       const struct kvm_memory_slot *memslot);
> +void kvm_init_vmcs(struct kvm *kvm);
> +void kvm_vector_entry(void);
> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
> +extern const unsigned long kvm_vector_size;
> +extern const unsigned long kvm_enter_guest_size;
> +extern unsigned long vpid_mask;
> +extern struct kvm_world_switch *kvm_loongarch_ops;
> +
> +#define SW_GCSR                (1 << 0)
> +#define HW_GCSR                (1 << 1)
> +#define INVALID_GCSR   (1 << 2)
> +int get_gcsr_flag(int csr);
> +extern void set_hw_gcsr(int csr_id, unsigned long val);
> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
> new file mode 100644
> index 0000000000..2fe1d4bdff
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kvm_types.h
> @@ -0,0 +1,11 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
> +#define _ASM_LOONGARCH_KVM_TYPES_H
> +
> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
> +
> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
> new file mode 100644
> index 0000000000..fafda487d6
> --- /dev/null
> +++ b/arch/loongarch/include/uapi/asm/kvm.h
> @@ -0,0 +1,108 @@
> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
> +#define __UAPI_ASM_LOONGARCH_KVM_H
> +
> +#include <linux/types.h>
> +
> +/*
> + * KVM Loongarch specific structures and definitions.
> + *
> + * Some parts derived from the x86 version of this file.
> + */
> +
> +#define __KVM_HAVE_READONLY_MEM
> +
> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
> +
> +/*
> + * for KVM_GET_REGS and KVM_SET_REGS
> + */
> +struct kvm_regs {
> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
> +       __u64 gpr[32];
> +       __u64 pc;
> +};
> +
> +/*
> + * for KVM_GET_FPU and KVM_SET_FPU
> + */
> +struct kvm_fpu {
> +       __u32 fcsr;
> +       __u64 fcc;    /* 8x8 */
> +       struct kvm_fpureg {
> +               __u64 val64[4];
> +       } fpr[32];
> +};
> +
> +/*
> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
> + * registers.  The id field is broken down as follows:
> + *
> + *  bits[63..52] - As per linux/kvm.h
> + *  bits[51..32] - Must be zero.
> + *  bits[31..16] - Register set.
> + *
> + * Register set = 0: GP registers from kvm_regs (see definitions below).
> + *
> + * Register set = 1: CSR registers.
> + *
> + * Register set = 2: KVM specific registers (see definitions below).
> + *
> + * Register set = 3: FPU / SIMD registers (see definitions below).
> + *
> + * Other sets registers may be added in the future.  Each set would
> + * have its own identifier in bits[31..16].
> + */
> +
> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
> +#define KVM_CSR_IDX_MASK               0x7fff
> +#define KVM_CPUCFG_IDX_MASK            0x7fff
> +
> +/*
> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
> + */
> +
> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
Why begin with 3? 0, 1, 2 reserved for what?

Huacai

> +
> +#define LOONGARCH_REG_SHIFT            3
> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
> +
> +struct kvm_debug_exit_arch {
> +};
> +
> +/* for KVM_SET_GUEST_DEBUG */
> +struct kvm_guest_debug_arch {
> +};
> +
> +/* definition of registers in kvm_run */
> +struct kvm_sync_regs {
> +};
> +
> +/* dummy definition */
> +struct kvm_sregs {
> +};
> +
> +struct kvm_iocsr_entry {
> +       __u32 addr;
> +       __u32 pad;
> +       __u64 data;
> +};
> +
> +#define KVM_NR_IRQCHIPS                1
> +#define KVM_IRQCHIP_NUM_PINS   64
> +#define KVM_MAX_CORES          256
> +
> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> index 13065dd961..863f84619a 100644
> --- a/include/uapi/linux/kvm.h
> +++ b/include/uapi/linux/kvm.h
> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
>  #define KVM_EXIT_RISCV_SBI        35
>  #define KVM_EXIT_RISCV_CSR        36
>  #define KVM_EXIT_NOTIFY           37
> +#define KVM_EXIT_LOONGARCH_IOCSR  38
>
>  /* For KVM_EXIT_INTERNAL_ERROR */
>  /* Emulate instruction failed. */
> @@ -336,6 +337,13 @@ struct kvm_run {
>                         __u32 len;
>                         __u8  is_write;
>                 } mmio;
> +               /* KVM_EXIT_LOONGARCH_IOCSR */
> +               struct {
> +                       __u64 phys_addr;
> +                       __u8  data[8];
> +                       __u32 len;
> +                       __u8  is_write;
> +               } iocsr_io;
>                 /* KVM_EXIT_HYPERCALL */
>                 struct {
>                         __u64 nr;
> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
>  #define KVM_REG_ARM64          0x6000000000000000ULL
>  #define KVM_REG_MIPS           0x7000000000000000ULL
>  #define KVM_REG_RISCV          0x8000000000000000ULL
> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
>
>  #define KVM_REG_SIZE_SHIFT     52
>  #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
> --
> 2.39.1
>
  
zhaotianrui Sept. 18, 2023, 1:32 a.m. UTC | #3
在 2023/9/16 下午4:48, Huacai Chen 写道:
> Hi, Tianrui,
>
> On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
>> Add LoongArch KVM related header files, including kvm.h,
>> kvm_host.h, kvm_types.h. All of those are about LoongArch
>> virtualization features and kvm interfaces.
>>
>> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
>> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
>> ---
>>   arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
>>   arch/loongarch/include/asm/kvm_types.h |  11 ++
>>   arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
>>   include/uapi/linux/kvm.h               |   9 +
>>   4 files changed, 373 insertions(+)
>>   create mode 100644 arch/loongarch/include/asm/kvm_host.h
>>   create mode 100644 arch/loongarch/include/asm/kvm_types.h
>>   create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
>>
>> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
>> new file mode 100644
>> index 0000000000..00e0c1876b
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kvm_host.h
>> @@ -0,0 +1,245 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
>> +#define __ASM_LOONGARCH_KVM_HOST_H__
>> +
>> +#include <linux/cpumask.h>
>> +#include <linux/mutex.h>
>> +#include <linux/hrtimer.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/types.h>
>> +#include <linux/kvm.h>
>> +#include <linux/kvm_types.h>
>> +#include <linux/threads.h>
>> +#include <linux/spinlock.h>
>> +
>> +#include <asm/inst.h>
>> +#include <asm/kvm_mmu.h>
>> +#include <asm/loongarch.h>
>> +
>> +/* Loongarch KVM register ids */
>> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>> +
>> +#define KVM_MAX_VCPUS                  256
>> +#define KVM_MAX_CPUCFG_REGS            21
>> +/* memory slots that does not exposed to userspace */
>> +#define KVM_PRIVATE_MEM_SLOTS          0
>> +
>> +#define KVM_HALT_POLL_NS_DEFAULT       500000
>> +
>> +struct kvm_vm_stat {
>> +       struct kvm_vm_stat_generic generic;
>> +       u64 pages;
>> +       u64 hugepages;
>> +};
>> +
>> +struct kvm_vcpu_stat {
>> +       struct kvm_vcpu_stat_generic generic;
>> +       u64 idle_exits;
>> +       u64 signal_exits;
>> +       u64 int_exits;
>> +       u64 cpucfg_exits;
>> +};
>> +
>> +struct kvm_arch_memory_slot {
>> +};
>> +
>> +struct kvm_context {
>> +       unsigned long vpid_cache;
>> +       struct kvm_vcpu *last_vcpu;
>> +};
>> +
>> +struct kvm_world_switch {
>> +       int (*guest_eentry)(void);
>> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>> +       unsigned long page_order;
>> +};
>> +
>> +#define MAX_PGTABLE_LEVELS     4
>> +struct kvm_arch {
>> +       /* Guest physical mm */
>> +       kvm_pte_t *pgd;
>> +       unsigned long gpa_size;
>> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
>> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
>> +       unsigned int  root_level;
>> +
>> +       s64 time_offset;
>> +       struct kvm_context __percpu *vmcs;
>> +};
>> +
>> +#define CSR_MAX_NUMS           0x800
>> +
>> +struct loongarch_csrs {
>> +       unsigned long csrs[CSR_MAX_NUMS];
>> +};
>> +
>> +/* Resume Flags */
>> +#define RESUME_HOST            0
>> +#define RESUME_GUEST           1
>> +
>> +enum emulation_result {
>> +       EMULATE_DONE,           /* no further processing */
>> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
>> +       EMULATE_FAIL,           /* can't emulate this instruction */
>> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
>> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
>> +};
>> +
>> +#define KVM_LARCH_FPU          (0x1 << 0)
>> +#define KVM_LARCH_CSR          (0x1 << 1)
>> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
>> +
>> +struct kvm_vcpu_arch {
>> +       /*
>> +        * Switch pointer-to-function type to unsigned long
>> +        * for loading the value into register directly.
>> +        */
>> +       unsigned long host_eentry;
>> +       unsigned long guest_eentry;
>> +
>> +       /* Pointers stored here for easy accessing from assembly code */
>> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>> +
>> +       /* Host registers preserved across guest mode execution */
>> +       unsigned long host_sp;
>> +       unsigned long host_tp;
>> +       unsigned long host_pgd;
>> +
>> +       /* Host CSRs are used when handling exits from guest */
>> +       unsigned long badi;
>> +       unsigned long badv;
>> +       unsigned long host_ecfg;
>> +       unsigned long host_estat;
>> +       unsigned long host_percpu;
>> +
>> +       /* GPRs */
>> +       unsigned long gprs[32];
>> +       unsigned long pc;
>> +
>> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
>> +       unsigned int aux_inuse;
>> +       /* FPU state */
>> +       struct loongarch_fpu fpu FPU_ALIGN;
>> +
>> +       /* CSR state */
>> +       struct loongarch_csrs *csr;
>> +
>> +       /* GPR used as IO source/target */
>> +       u32 io_gpr;
>> +
>> +       struct hrtimer swtimer;
>> +       /* KVM register to control count timer */
>> +       u32 count_ctl;
>> +
>> +       /* Bitmask of intr that are pending */
>> +       unsigned long irq_pending;
>> +       /* Bitmask of pending intr to be cleared */
>> +       unsigned long irq_clear;
>> +
>> +       /* Bitmask of exceptions that are pending */
>> +       unsigned long exception_pending;
>> +       unsigned int  subcode;
>> +
>> +       /* Cache for pages needed inside spinlock regions */
>> +       struct kvm_mmu_memory_cache mmu_page_cache;
>> +
>> +       /* vcpu's vpid */
>> +       u64 vpid;
>> +
>> +       /* Frequency of stable timer in Hz */
>> +       u64 timer_mhz;
>> +       ktime_t expire;
>> +
>> +       u64 core_ext_ioisr[4];
>> +
>> +       /* Last CPU the vCPU state was loaded on */
>> +       int last_sched_cpu;
>> +       /* mp state */
>> +       struct kvm_mp_state mp_state;
>> +       /* cpucfg */
>> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
>> +};
>> +
>> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
>> +{
>> +       return csr->csrs[reg];
>> +}
>> +
>> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
>> +{
>> +       csr->csrs[reg] = val;
>> +}
>> +
>> +/* Helpers */
>> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
>> +{
>> +       return cpu_has_fpu;
>> +}
>> +
>> +void kvm_init_fault(void);
>> +
>> +/* Debug: dump vcpu state */
>> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>> +
>> +/* MMU handling */
>> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
>> +void kvm_flush_tlb_all(void);
>> +
>> +#define KVM_ARCH_WANT_MMU_NOTIFIER
>> +int kvm_unmap_hva_range(struct kvm *kvm,
>> +                       unsigned long start, unsigned long end, bool blockable);
>> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
>> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
>> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
>> +
>> +static inline void update_pc(struct kvm_vcpu_arch *arch)
>> +{
>> +       arch->pc += 4;
>> +}
>> +
>> +/**
>> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
>> + * @vcpu:      Virtual CPU.
>> + *
>> + * Returns:    Whether the TLBL exception was likely due to an instruction
>> + *             fetch fault rather than a data load fault.
>> + */
>> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
>> +{
>> +       return arch->pc == arch->badv;
>> +}
>> +
>> +/* Misc */
>> +static inline void kvm_arch_hardware_unsetup(void) {}
>> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
>> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
>> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
>> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
>> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
>> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
>> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
>> +                                  struct kvm_memory_slot *slot) {}
>> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
>> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
>> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
>> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
>> +                                       const struct kvm_memory_slot *memslot);
>> +void kvm_init_vmcs(struct kvm *kvm);
>> +void kvm_vector_entry(void);
>> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
>> +extern const unsigned long kvm_vector_size;
>> +extern const unsigned long kvm_enter_guest_size;
>> +extern unsigned long vpid_mask;
>> +extern struct kvm_world_switch *kvm_loongarch_ops;
>> +
>> +#define SW_GCSR                (1 << 0)
>> +#define HW_GCSR                (1 << 1)
>> +#define INVALID_GCSR   (1 << 2)
>> +int get_gcsr_flag(int csr);
>> +extern void set_hw_gcsr(int csr_id, unsigned long val);
>> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
>> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
>> new file mode 100644
>> index 0000000000..2fe1d4bdff
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kvm_types.h
>> @@ -0,0 +1,11 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
>> +#define _ASM_LOONGARCH_KVM_TYPES_H
>> +
>> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
>> +
>> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
>> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
>> new file mode 100644
>> index 0000000000..fafda487d6
>> --- /dev/null
>> +++ b/arch/loongarch/include/uapi/asm/kvm.h
>> @@ -0,0 +1,108 @@
>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
>> +#define __UAPI_ASM_LOONGARCH_KVM_H
>> +
>> +#include <linux/types.h>
>> +
>> +/*
>> + * KVM Loongarch specific structures and definitions.
>> + *
>> + * Some parts derived from the x86 version of this file.
>> + */
>> +
>> +#define __KVM_HAVE_READONLY_MEM
>> +
>> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
>> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
>> +
>> +/*
>> + * for KVM_GET_REGS and KVM_SET_REGS
>> + */
>> +struct kvm_regs {
>> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
>> +       __u64 gpr[32];
>> +       __u64 pc;
>> +};
>> +
>> +/*
>> + * for KVM_GET_FPU and KVM_SET_FPU
>> + */
>> +struct kvm_fpu {
>> +       __u32 fcsr;
>> +       __u64 fcc;    /* 8x8 */
>> +       struct kvm_fpureg {
>> +               __u64 val64[4];
>> +       } fpr[32];
>> +};
>> +
>> +/*
>> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
>> + * registers.  The id field is broken down as follows:
>> + *
>> + *  bits[63..52] - As per linux/kvm.h
>> + *  bits[51..32] - Must be zero.
>> + *  bits[31..16] - Register set.
>> + *
>> + * Register set = 0: GP registers from kvm_regs (see definitions below).
>> + *
>> + * Register set = 1: CSR registers.
>> + *
>> + * Register set = 2: KVM specific registers (see definitions below).
>> + *
>> + * Register set = 3: FPU / SIMD registers (see definitions below).
>> + *
>> + * Other sets registers may be added in the future.  Each set would
>> + * have its own identifier in bits[31..16].
>> + */
>> +
>> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
>> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
>> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
>> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
> How about rename to KVM_REG_LOONGARCH_FPSIMD?
>
> Huacai
It will broke uapi used by user space software, it may cause 
incompatible issue, so I think it is better to keep the original name.

Thanks
Tianrui Zhao
>
>> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
>> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
>> +#define KVM_CSR_IDX_MASK               0x7fff
>> +#define KVM_CPUCFG_IDX_MASK            0x7fff
>> +
>> +/*
>> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
>> + */
>> +
>> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
>> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
>> +
>> +#define LOONGARCH_REG_SHIFT            3
>> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
>> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
>> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
>> +
>> +struct kvm_debug_exit_arch {
>> +};
>> +
>> +/* for KVM_SET_GUEST_DEBUG */
>> +struct kvm_guest_debug_arch {
>> +};
>> +
>> +/* definition of registers in kvm_run */
>> +struct kvm_sync_regs {
>> +};
>> +
>> +/* dummy definition */
>> +struct kvm_sregs {
>> +};
>> +
>> +struct kvm_iocsr_entry {
>> +       __u32 addr;
>> +       __u32 pad;
>> +       __u64 data;
>> +};
>> +
>> +#define KVM_NR_IRQCHIPS                1
>> +#define KVM_IRQCHIP_NUM_PINS   64
>> +#define KVM_MAX_CORES          256
>> +
>> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
>> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
>> index 13065dd961..863f84619a 100644
>> --- a/include/uapi/linux/kvm.h
>> +++ b/include/uapi/linux/kvm.h
>> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
>>   #define KVM_EXIT_RISCV_SBI        35
>>   #define KVM_EXIT_RISCV_CSR        36
>>   #define KVM_EXIT_NOTIFY           37
>> +#define KVM_EXIT_LOONGARCH_IOCSR  38
>>
>>   /* For KVM_EXIT_INTERNAL_ERROR */
>>   /* Emulate instruction failed. */
>> @@ -336,6 +337,13 @@ struct kvm_run {
>>                          __u32 len;
>>                          __u8  is_write;
>>                  } mmio;
>> +               /* KVM_EXIT_LOONGARCH_IOCSR */
>> +               struct {
>> +                       __u64 phys_addr;
>> +                       __u8  data[8];
>> +                       __u32 len;
>> +                       __u8  is_write;
>> +               } iocsr_io;
>>                  /* KVM_EXIT_HYPERCALL */
>>                  struct {
>>                          __u64 nr;
>> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
>>   #define KVM_REG_ARM64          0x6000000000000000ULL
>>   #define KVM_REG_MIPS           0x7000000000000000ULL
>>   #define KVM_REG_RISCV          0x8000000000000000ULL
>> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
>>
>>   #define KVM_REG_SIZE_SHIFT     52
>>   #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
>> --
>> 2.39.1
>>
  
Huacai Chen Sept. 18, 2023, 1:36 a.m. UTC | #4
Hi, Tianrui,

On Mon, Sep 18, 2023 at 9:32 AM zhaotianrui <zhaotianrui@loongson.cn> wrote:
>
>
> 在 2023/9/16 下午4:48, Huacai Chen 写道:
> > Hi, Tianrui,
> >
> > On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
> >> Add LoongArch KVM related header files, including kvm.h,
> >> kvm_host.h, kvm_types.h. All of those are about LoongArch
> >> virtualization features and kvm interfaces.
> >>
> >> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
> >> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> >> ---
> >>   arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
> >>   arch/loongarch/include/asm/kvm_types.h |  11 ++
> >>   arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
> >>   include/uapi/linux/kvm.h               |   9 +
> >>   4 files changed, 373 insertions(+)
> >>   create mode 100644 arch/loongarch/include/asm/kvm_host.h
> >>   create mode 100644 arch/loongarch/include/asm/kvm_types.h
> >>   create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
> >>
> >> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> >> new file mode 100644
> >> index 0000000000..00e0c1876b
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/asm/kvm_host.h
> >> @@ -0,0 +1,245 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
> >> +#define __ASM_LOONGARCH_KVM_HOST_H__
> >> +
> >> +#include <linux/cpumask.h>
> >> +#include <linux/mutex.h>
> >> +#include <linux/hrtimer.h>
> >> +#include <linux/interrupt.h>
> >> +#include <linux/types.h>
> >> +#include <linux/kvm.h>
> >> +#include <linux/kvm_types.h>
> >> +#include <linux/threads.h>
> >> +#include <linux/spinlock.h>
> >> +
> >> +#include <asm/inst.h>
> >> +#include <asm/kvm_mmu.h>
> >> +#include <asm/loongarch.h>
> >> +
> >> +/* Loongarch KVM register ids */
> >> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> >> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> >> +
> >> +#define KVM_MAX_VCPUS                  256
> >> +#define KVM_MAX_CPUCFG_REGS            21
> >> +/* memory slots that does not exposed to userspace */
> >> +#define KVM_PRIVATE_MEM_SLOTS          0
> >> +
> >> +#define KVM_HALT_POLL_NS_DEFAULT       500000
> >> +
> >> +struct kvm_vm_stat {
> >> +       struct kvm_vm_stat_generic generic;
> >> +       u64 pages;
> >> +       u64 hugepages;
> >> +};
> >> +
> >> +struct kvm_vcpu_stat {
> >> +       struct kvm_vcpu_stat_generic generic;
> >> +       u64 idle_exits;
> >> +       u64 signal_exits;
> >> +       u64 int_exits;
> >> +       u64 cpucfg_exits;
> >> +};
> >> +
> >> +struct kvm_arch_memory_slot {
> >> +};
> >> +
> >> +struct kvm_context {
> >> +       unsigned long vpid_cache;
> >> +       struct kvm_vcpu *last_vcpu;
> >> +};
> >> +
> >> +struct kvm_world_switch {
> >> +       int (*guest_eentry)(void);
> >> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >> +       unsigned long page_order;
> >> +};
> >> +
> >> +#define MAX_PGTABLE_LEVELS     4
> >> +struct kvm_arch {
> >> +       /* Guest physical mm */
> >> +       kvm_pte_t *pgd;
> >> +       unsigned long gpa_size;
> >> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
> >> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
> >> +       unsigned int  root_level;
> >> +
> >> +       s64 time_offset;
> >> +       struct kvm_context __percpu *vmcs;
> >> +};
> >> +
> >> +#define CSR_MAX_NUMS           0x800
> >> +
> >> +struct loongarch_csrs {
> >> +       unsigned long csrs[CSR_MAX_NUMS];
> >> +};
> >> +
> >> +/* Resume Flags */
> >> +#define RESUME_HOST            0
> >> +#define RESUME_GUEST           1
> >> +
> >> +enum emulation_result {
> >> +       EMULATE_DONE,           /* no further processing */
> >> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
> >> +       EMULATE_FAIL,           /* can't emulate this instruction */
> >> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
> >> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
> >> +};
> >> +
> >> +#define KVM_LARCH_FPU          (0x1 << 0)
> >> +#define KVM_LARCH_CSR          (0x1 << 1)
> >> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> >> +
> >> +struct kvm_vcpu_arch {
> >> +       /*
> >> +        * Switch pointer-to-function type to unsigned long
> >> +        * for loading the value into register directly.
> >> +        */
> >> +       unsigned long host_eentry;
> >> +       unsigned long guest_eentry;
> >> +
> >> +       /* Pointers stored here for easy accessing from assembly code */
> >> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >> +
> >> +       /* Host registers preserved across guest mode execution */
> >> +       unsigned long host_sp;
> >> +       unsigned long host_tp;
> >> +       unsigned long host_pgd;
> >> +
> >> +       /* Host CSRs are used when handling exits from guest */
> >> +       unsigned long badi;
> >> +       unsigned long badv;
> >> +       unsigned long host_ecfg;
> >> +       unsigned long host_estat;
> >> +       unsigned long host_percpu;
> >> +
> >> +       /* GPRs */
> >> +       unsigned long gprs[32];
> >> +       unsigned long pc;
> >> +
> >> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
> >> +       unsigned int aux_inuse;
> >> +       /* FPU state */
> >> +       struct loongarch_fpu fpu FPU_ALIGN;
> >> +
> >> +       /* CSR state */
> >> +       struct loongarch_csrs *csr;
> >> +
> >> +       /* GPR used as IO source/target */
> >> +       u32 io_gpr;
> >> +
> >> +       struct hrtimer swtimer;
> >> +       /* KVM register to control count timer */
> >> +       u32 count_ctl;
> >> +
> >> +       /* Bitmask of intr that are pending */
> >> +       unsigned long irq_pending;
> >> +       /* Bitmask of pending intr to be cleared */
> >> +       unsigned long irq_clear;
> >> +
> >> +       /* Bitmask of exceptions that are pending */
> >> +       unsigned long exception_pending;
> >> +       unsigned int  subcode;
> >> +
> >> +       /* Cache for pages needed inside spinlock regions */
> >> +       struct kvm_mmu_memory_cache mmu_page_cache;
> >> +
> >> +       /* vcpu's vpid */
> >> +       u64 vpid;
> >> +
> >> +       /* Frequency of stable timer in Hz */
> >> +       u64 timer_mhz;
> >> +       ktime_t expire;
> >> +
> >> +       u64 core_ext_ioisr[4];
> >> +
> >> +       /* Last CPU the vCPU state was loaded on */
> >> +       int last_sched_cpu;
> >> +       /* mp state */
> >> +       struct kvm_mp_state mp_state;
> >> +       /* cpucfg */
> >> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
> >> +};
> >> +
> >> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
> >> +{
> >> +       return csr->csrs[reg];
> >> +}
> >> +
> >> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
> >> +{
> >> +       csr->csrs[reg] = val;
> >> +}
> >> +
> >> +/* Helpers */
> >> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
> >> +{
> >> +       return cpu_has_fpu;
> >> +}
> >> +
> >> +void kvm_init_fault(void);
> >> +
> >> +/* Debug: dump vcpu state */
> >> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
> >> +
> >> +/* MMU handling */
> >> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
> >> +void kvm_flush_tlb_all(void);
> >> +
> >> +#define KVM_ARCH_WANT_MMU_NOTIFIER
> >> +int kvm_unmap_hva_range(struct kvm *kvm,
> >> +                       unsigned long start, unsigned long end, bool blockable);
> >> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
> >> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
> >> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
> >> +
> >> +static inline void update_pc(struct kvm_vcpu_arch *arch)
> >> +{
> >> +       arch->pc += 4;
> >> +}
> >> +
> >> +/**
> >> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
> >> + * @vcpu:      Virtual CPU.
> >> + *
> >> + * Returns:    Whether the TLBL exception was likely due to an instruction
> >> + *             fetch fault rather than a data load fault.
> >> + */
> >> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
> >> +{
> >> +       return arch->pc == arch->badv;
> >> +}
> >> +
> >> +/* Misc */
> >> +static inline void kvm_arch_hardware_unsetup(void) {}
> >> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
> >> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
> >> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
> >> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
> >> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
> >> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
> >> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
> >> +                                  struct kvm_memory_slot *slot) {}
> >> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
> >> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
> >> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
> >> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
> >> +                                       const struct kvm_memory_slot *memslot);
> >> +void kvm_init_vmcs(struct kvm *kvm);
> >> +void kvm_vector_entry(void);
> >> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >> +extern const unsigned long kvm_vector_size;
> >> +extern const unsigned long kvm_enter_guest_size;
> >> +extern unsigned long vpid_mask;
> >> +extern struct kvm_world_switch *kvm_loongarch_ops;
> >> +
> >> +#define SW_GCSR                (1 << 0)
> >> +#define HW_GCSR                (1 << 1)
> >> +#define INVALID_GCSR   (1 << 2)
> >> +int get_gcsr_flag(int csr);
> >> +extern void set_hw_gcsr(int csr_id, unsigned long val);
> >> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
> >> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
> >> new file mode 100644
> >> index 0000000000..2fe1d4bdff
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/asm/kvm_types.h
> >> @@ -0,0 +1,11 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
> >> +#define _ASM_LOONGARCH_KVM_TYPES_H
> >> +
> >> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
> >> +
> >> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
> >> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
> >> new file mode 100644
> >> index 0000000000..fafda487d6
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/uapi/asm/kvm.h
> >> @@ -0,0 +1,108 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
> >> +#define __UAPI_ASM_LOONGARCH_KVM_H
> >> +
> >> +#include <linux/types.h>
> >> +
> >> +/*
> >> + * KVM Loongarch specific structures and definitions.
> >> + *
> >> + * Some parts derived from the x86 version of this file.
> >> + */
> >> +
> >> +#define __KVM_HAVE_READONLY_MEM
> >> +
> >> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
> >> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
> >> +
> >> +/*
> >> + * for KVM_GET_REGS and KVM_SET_REGS
> >> + */
> >> +struct kvm_regs {
> >> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
> >> +       __u64 gpr[32];
> >> +       __u64 pc;
> >> +};
> >> +
> >> +/*
> >> + * for KVM_GET_FPU and KVM_SET_FPU
> >> + */
> >> +struct kvm_fpu {
> >> +       __u32 fcsr;
> >> +       __u64 fcc;    /* 8x8 */
> >> +       struct kvm_fpureg {
> >> +               __u64 val64[4];
> >> +       } fpr[32];
> >> +};
> >> +
> >> +/*
> >> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
> >> + * registers.  The id field is broken down as follows:
> >> + *
> >> + *  bits[63..52] - As per linux/kvm.h
> >> + *  bits[51..32] - Must be zero.
> >> + *  bits[31..16] - Register set.
> >> + *
> >> + * Register set = 0: GP registers from kvm_regs (see definitions below).
> >> + *
> >> + * Register set = 1: CSR registers.
> >> + *
> >> + * Register set = 2: KVM specific registers (see definitions below).
> >> + *
> >> + * Register set = 3: FPU / SIMD registers (see definitions below).
> >> + *
> >> + * Other sets registers may be added in the future.  Each set would
> >> + * have its own identifier in bits[31..16].
> >> + */
> >> +
> >> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
> >> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
> >> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
> >> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
> > How about rename to KVM_REG_LOONGARCH_FPSIMD?
> >
> > Huacai
> It will broke uapi used by user space software, it may cause
> incompatible issue, so I think it is better to keep the original name.
In your comments above it is not only FPU but FPU&SIMD, and this code
hasn't been upstream yet, how to break UAPI?

Huacai

>
> Thanks
> Tianrui Zhao
> >
> >> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
> >> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
> >> +#define KVM_CSR_IDX_MASK               0x7fff
> >> +#define KVM_CPUCFG_IDX_MASK            0x7fff
> >> +
> >> +/*
> >> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
> >> + */
> >> +
> >> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
> >> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
> >> +
> >> +#define LOONGARCH_REG_SHIFT            3
> >> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
> >> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
> >> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
> >> +
> >> +struct kvm_debug_exit_arch {
> >> +};
> >> +
> >> +/* for KVM_SET_GUEST_DEBUG */
> >> +struct kvm_guest_debug_arch {
> >> +};
> >> +
> >> +/* definition of registers in kvm_run */
> >> +struct kvm_sync_regs {
> >> +};
> >> +
> >> +/* dummy definition */
> >> +struct kvm_sregs {
> >> +};
> >> +
> >> +struct kvm_iocsr_entry {
> >> +       __u32 addr;
> >> +       __u32 pad;
> >> +       __u64 data;
> >> +};
> >> +
> >> +#define KVM_NR_IRQCHIPS                1
> >> +#define KVM_IRQCHIP_NUM_PINS   64
> >> +#define KVM_MAX_CORES          256
> >> +
> >> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
> >> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> >> index 13065dd961..863f84619a 100644
> >> --- a/include/uapi/linux/kvm.h
> >> +++ b/include/uapi/linux/kvm.h
> >> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
> >>   #define KVM_EXIT_RISCV_SBI        35
> >>   #define KVM_EXIT_RISCV_CSR        36
> >>   #define KVM_EXIT_NOTIFY           37
> >> +#define KVM_EXIT_LOONGARCH_IOCSR  38
> >>
> >>   /* For KVM_EXIT_INTERNAL_ERROR */
> >>   /* Emulate instruction failed. */
> >> @@ -336,6 +337,13 @@ struct kvm_run {
> >>                          __u32 len;
> >>                          __u8  is_write;
> >>                  } mmio;
> >> +               /* KVM_EXIT_LOONGARCH_IOCSR */
> >> +               struct {
> >> +                       __u64 phys_addr;
> >> +                       __u8  data[8];
> >> +                       __u32 len;
> >> +                       __u8  is_write;
> >> +               } iocsr_io;
> >>                  /* KVM_EXIT_HYPERCALL */
> >>                  struct {
> >>                          __u64 nr;
> >> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
> >>   #define KVM_REG_ARM64          0x6000000000000000ULL
> >>   #define KVM_REG_MIPS           0x7000000000000000ULL
> >>   #define KVM_REG_RISCV          0x8000000000000000ULL
> >> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
> >>
> >>   #define KVM_REG_SIZE_SHIFT     52
> >>   #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
> >> --
> >> 2.39.1
> >>
>
>
  
zhaotianrui Sept. 18, 2023, 6:28 a.m. UTC | #5
在 2023/9/18 上午9:36, Huacai Chen 写道:
> Hi, Tianrui,
>
> On Mon, Sep 18, 2023 at 9:32 AM zhaotianrui <zhaotianrui@loongson.cn> wrote:
>>
>> 在 2023/9/16 下午4:48, Huacai Chen 写道:
>>> Hi, Tianrui,
>>>
>>> On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
>>>> Add LoongArch KVM related header files, including kvm.h,
>>>> kvm_host.h, kvm_types.h. All of those are about LoongArch
>>>> virtualization features and kvm interfaces.
>>>>
>>>> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
>>>> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
>>>> ---
>>>>    arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
>>>>    arch/loongarch/include/asm/kvm_types.h |  11 ++
>>>>    arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
>>>>    include/uapi/linux/kvm.h               |   9 +
>>>>    4 files changed, 373 insertions(+)
>>>>    create mode 100644 arch/loongarch/include/asm/kvm_host.h
>>>>    create mode 100644 arch/loongarch/include/asm/kvm_types.h
>>>>    create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
>>>>
>>>> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
>>>> new file mode 100644
>>>> index 0000000000..00e0c1876b
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_host.h
>>>> @@ -0,0 +1,245 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
>>>> +#define __ASM_LOONGARCH_KVM_HOST_H__
>>>> +
>>>> +#include <linux/cpumask.h>
>>>> +#include <linux/mutex.h>
>>>> +#include <linux/hrtimer.h>
>>>> +#include <linux/interrupt.h>
>>>> +#include <linux/types.h>
>>>> +#include <linux/kvm.h>
>>>> +#include <linux/kvm_types.h>
>>>> +#include <linux/threads.h>
>>>> +#include <linux/spinlock.h>
>>>> +
>>>> +#include <asm/inst.h>
>>>> +#include <asm/kvm_mmu.h>
>>>> +#include <asm/loongarch.h>
>>>> +
>>>> +/* Loongarch KVM register ids */
>>>> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>>>> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>>>> +
>>>> +#define KVM_MAX_VCPUS                  256
>>>> +#define KVM_MAX_CPUCFG_REGS            21
>>>> +/* memory slots that does not exposed to userspace */
>>>> +#define KVM_PRIVATE_MEM_SLOTS          0
>>>> +
>>>> +#define KVM_HALT_POLL_NS_DEFAULT       500000
>>>> +
>>>> +struct kvm_vm_stat {
>>>> +       struct kvm_vm_stat_generic generic;
>>>> +       u64 pages;
>>>> +       u64 hugepages;
>>>> +};
>>>> +
>>>> +struct kvm_vcpu_stat {
>>>> +       struct kvm_vcpu_stat_generic generic;
>>>> +       u64 idle_exits;
>>>> +       u64 signal_exits;
>>>> +       u64 int_exits;
>>>> +       u64 cpucfg_exits;
>>>> +};
>>>> +
>>>> +struct kvm_arch_memory_slot {
>>>> +};
>>>> +
>>>> +struct kvm_context {
>>>> +       unsigned long vpid_cache;
>>>> +       struct kvm_vcpu *last_vcpu;
>>>> +};
>>>> +
>>>> +struct kvm_world_switch {
>>>> +       int (*guest_eentry)(void);
>>>> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +       unsigned long page_order;
>>>> +};
>>>> +
>>>> +#define MAX_PGTABLE_LEVELS     4
>>>> +struct kvm_arch {
>>>> +       /* Guest physical mm */
>>>> +       kvm_pte_t *pgd;
>>>> +       unsigned long gpa_size;
>>>> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
>>>> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
>>>> +       unsigned int  root_level;
>>>> +
>>>> +       s64 time_offset;
>>>> +       struct kvm_context __percpu *vmcs;
>>>> +};
>>>> +
>>>> +#define CSR_MAX_NUMS           0x800
>>>> +
>>>> +struct loongarch_csrs {
>>>> +       unsigned long csrs[CSR_MAX_NUMS];
>>>> +};
>>>> +
>>>> +/* Resume Flags */
>>>> +#define RESUME_HOST            0
>>>> +#define RESUME_GUEST           1
>>>> +
>>>> +enum emulation_result {
>>>> +       EMULATE_DONE,           /* no further processing */
>>>> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
>>>> +       EMULATE_FAIL,           /* can't emulate this instruction */
>>>> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
>>>> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
>>>> +};
>>>> +
>>>> +#define KVM_LARCH_FPU          (0x1 << 0)
>>>> +#define KVM_LARCH_CSR          (0x1 << 1)
>>>> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
>>>> +
>>>> +struct kvm_vcpu_arch {
>>>> +       /*
>>>> +        * Switch pointer-to-function type to unsigned long
>>>> +        * for loading the value into register directly.
>>>> +        */
>>>> +       unsigned long host_eentry;
>>>> +       unsigned long guest_eentry;
>>>> +
>>>> +       /* Pointers stored here for easy accessing from assembly code */
>>>> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +
>>>> +       /* Host registers preserved across guest mode execution */
>>>> +       unsigned long host_sp;
>>>> +       unsigned long host_tp;
>>>> +       unsigned long host_pgd;
>>>> +
>>>> +       /* Host CSRs are used when handling exits from guest */
>>>> +       unsigned long badi;
>>>> +       unsigned long badv;
>>>> +       unsigned long host_ecfg;
>>>> +       unsigned long host_estat;
>>>> +       unsigned long host_percpu;
>>>> +
>>>> +       /* GPRs */
>>>> +       unsigned long gprs[32];
>>>> +       unsigned long pc;
>>>> +
>>>> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
>>>> +       unsigned int aux_inuse;
>>>> +       /* FPU state */
>>>> +       struct loongarch_fpu fpu FPU_ALIGN;
>>>> +
>>>> +       /* CSR state */
>>>> +       struct loongarch_csrs *csr;
>>>> +
>>>> +       /* GPR used as IO source/target */
>>>> +       u32 io_gpr;
>>>> +
>>>> +       struct hrtimer swtimer;
>>>> +       /* KVM register to control count timer */
>>>> +       u32 count_ctl;
>>>> +
>>>> +       /* Bitmask of intr that are pending */
>>>> +       unsigned long irq_pending;
>>>> +       /* Bitmask of pending intr to be cleared */
>>>> +       unsigned long irq_clear;
>>>> +
>>>> +       /* Bitmask of exceptions that are pending */
>>>> +       unsigned long exception_pending;
>>>> +       unsigned int  subcode;
>>>> +
>>>> +       /* Cache for pages needed inside spinlock regions */
>>>> +       struct kvm_mmu_memory_cache mmu_page_cache;
>>>> +
>>>> +       /* vcpu's vpid */
>>>> +       u64 vpid;
>>>> +
>>>> +       /* Frequency of stable timer in Hz */
>>>> +       u64 timer_mhz;
>>>> +       ktime_t expire;
>>>> +
>>>> +       u64 core_ext_ioisr[4];
>>>> +
>>>> +       /* Last CPU the vCPU state was loaded on */
>>>> +       int last_sched_cpu;
>>>> +       /* mp state */
>>>> +       struct kvm_mp_state mp_state;
>>>> +       /* cpucfg */
>>>> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
>>>> +};
>>>> +
>>>> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
>>>> +{
>>>> +       return csr->csrs[reg];
>>>> +}
>>>> +
>>>> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
>>>> +{
>>>> +       csr->csrs[reg] = val;
>>>> +}
>>>> +
>>>> +/* Helpers */
>>>> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
>>>> +{
>>>> +       return cpu_has_fpu;
>>>> +}
>>>> +
>>>> +void kvm_init_fault(void);
>>>> +
>>>> +/* Debug: dump vcpu state */
>>>> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>>> +
>>>> +/* MMU handling */
>>>> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
>>>> +void kvm_flush_tlb_all(void);
>>>> +
>>>> +#define KVM_ARCH_WANT_MMU_NOTIFIER
>>>> +int kvm_unmap_hva_range(struct kvm *kvm,
>>>> +                       unsigned long start, unsigned long end, bool blockable);
>>>> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
>>>> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
>>>> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
>>>> +
>>>> +static inline void update_pc(struct kvm_vcpu_arch *arch)
>>>> +{
>>>> +       arch->pc += 4;
>>>> +}
>>>> +
>>>> +/**
>>>> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
>>>> + * @vcpu:      Virtual CPU.
>>>> + *
>>>> + * Returns:    Whether the TLBL exception was likely due to an instruction
>>>> + *             fetch fault rather than a data load fault.
>>>> + */
>>>> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
>>>> +{
>>>> +       return arch->pc == arch->badv;
>>>> +}
>>>> +
>>>> +/* Misc */
>>>> +static inline void kvm_arch_hardware_unsetup(void) {}
>>>> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
>>>> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
>>>> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
>>>> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
>>>> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
>>>> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
>>>> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
>>>> +                                  struct kvm_memory_slot *slot) {}
>>>> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
>>>> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
>>>> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
>>>> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
>>>> +                                       const struct kvm_memory_slot *memslot);
>>>> +void kvm_init_vmcs(struct kvm *kvm);
>>>> +void kvm_vector_entry(void);
>>>> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +extern const unsigned long kvm_vector_size;
>>>> +extern const unsigned long kvm_enter_guest_size;
>>>> +extern unsigned long vpid_mask;
>>>> +extern struct kvm_world_switch *kvm_loongarch_ops;
>>>> +
>>>> +#define SW_GCSR                (1 << 0)
>>>> +#define HW_GCSR                (1 << 1)
>>>> +#define INVALID_GCSR   (1 << 2)
>>>> +int get_gcsr_flag(int csr);
>>>> +extern void set_hw_gcsr(int csr_id, unsigned long val);
>>>> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
>>>> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
>>>> new file mode 100644
>>>> index 0000000000..2fe1d4bdff
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_types.h
>>>> @@ -0,0 +1,11 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
>>>> +#define _ASM_LOONGARCH_KVM_TYPES_H
>>>> +
>>>> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
>>>> +
>>>> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
>>>> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
>>>> new file mode 100644
>>>> index 0000000000..fafda487d6
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/uapi/asm/kvm.h
>>>> @@ -0,0 +1,108 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
>>>> +#define __UAPI_ASM_LOONGARCH_KVM_H
>>>> +
>>>> +#include <linux/types.h>
>>>> +
>>>> +/*
>>>> + * KVM Loongarch specific structures and definitions.
>>>> + *
>>>> + * Some parts derived from the x86 version of this file.
>>>> + */
>>>> +
>>>> +#define __KVM_HAVE_READONLY_MEM
>>>> +
>>>> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
>>>> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
>>>> +
>>>> +/*
>>>> + * for KVM_GET_REGS and KVM_SET_REGS
>>>> + */
>>>> +struct kvm_regs {
>>>> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
>>>> +       __u64 gpr[32];
>>>> +       __u64 pc;
>>>> +};
>>>> +
>>>> +/*
>>>> + * for KVM_GET_FPU and KVM_SET_FPU
>>>> + */
>>>> +struct kvm_fpu {
>>>> +       __u32 fcsr;
>>>> +       __u64 fcc;    /* 8x8 */
>>>> +       struct kvm_fpureg {
>>>> +               __u64 val64[4];
>>>> +       } fpr[32];
>>>> +};
>>>> +
>>>> +/*
>>>> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
>>>> + * registers.  The id field is broken down as follows:
>>>> + *
>>>> + *  bits[63..52] - As per linux/kvm.h
>>>> + *  bits[51..32] - Must be zero.
>>>> + *  bits[31..16] - Register set.
>>>> + *
>>>> + * Register set = 0: GP registers from kvm_regs (see definitions below).
>>>> + *
>>>> + * Register set = 1: CSR registers.
>>>> + *
>>>> + * Register set = 2: KVM specific registers (see definitions below).
>>>> + *
>>>> + * Register set = 3: FPU / SIMD registers (see definitions below).
>>>> + *
>>>> + * Other sets registers may be added in the future.  Each set would
>>>> + * have its own identifier in bits[31..16].
>>>> + */
>>>> +
>>>> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
>>>> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
>>>> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
>>>> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
>>> How about rename to KVM_REG_LOONGARCH_FPSIMD?
>>>
>>> Huacai
>> It will broke uapi used by user space software, it may cause
>> incompatible issue, so I think it is better to keep the original name.
> In your comments above it is not only FPU but FPU&SIMD, and this code
> hasn't been upstream yet, how to break UAPI?
We want to apply this patch series to our other project when it is 
upstream, so we need update the previous codes and it may break the 
uapi. What do you think of it?

Thanks
Tianrui Zhao
>
> Huacai
>
>> Thanks
>> Tianrui Zhao
>>>> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
>>>> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
>>>> +#define KVM_CSR_IDX_MASK               0x7fff
>>>> +#define KVM_CPUCFG_IDX_MASK            0x7fff
>>>> +
>>>> +/*
>>>> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
>>>> + */
>>>> +
>>>> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
>>>> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
>>>> +
>>>> +#define LOONGARCH_REG_SHIFT            3
>>>> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
>>>> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
>>>> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
>>>> +
>>>> +struct kvm_debug_exit_arch {
>>>> +};
>>>> +
>>>> +/* for KVM_SET_GUEST_DEBUG */
>>>> +struct kvm_guest_debug_arch {
>>>> +};
>>>> +
>>>> +/* definition of registers in kvm_run */
>>>> +struct kvm_sync_regs {
>>>> +};
>>>> +
>>>> +/* dummy definition */
>>>> +struct kvm_sregs {
>>>> +};
>>>> +
>>>> +struct kvm_iocsr_entry {
>>>> +       __u32 addr;
>>>> +       __u32 pad;
>>>> +       __u64 data;
>>>> +};
>>>> +
>>>> +#define KVM_NR_IRQCHIPS                1
>>>> +#define KVM_IRQCHIP_NUM_PINS   64
>>>> +#define KVM_MAX_CORES          256
>>>> +
>>>> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
>>>> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
>>>> index 13065dd961..863f84619a 100644
>>>> --- a/include/uapi/linux/kvm.h
>>>> +++ b/include/uapi/linux/kvm.h
>>>> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
>>>>    #define KVM_EXIT_RISCV_SBI        35
>>>>    #define KVM_EXIT_RISCV_CSR        36
>>>>    #define KVM_EXIT_NOTIFY           37
>>>> +#define KVM_EXIT_LOONGARCH_IOCSR  38
>>>>
>>>>    /* For KVM_EXIT_INTERNAL_ERROR */
>>>>    /* Emulate instruction failed. */
>>>> @@ -336,6 +337,13 @@ struct kvm_run {
>>>>                           __u32 len;
>>>>                           __u8  is_write;
>>>>                   } mmio;
>>>> +               /* KVM_EXIT_LOONGARCH_IOCSR */
>>>> +               struct {
>>>> +                       __u64 phys_addr;
>>>> +                       __u8  data[8];
>>>> +                       __u32 len;
>>>> +                       __u8  is_write;
>>>> +               } iocsr_io;
>>>>                   /* KVM_EXIT_HYPERCALL */
>>>>                   struct {
>>>>                           __u64 nr;
>>>> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
>>>>    #define KVM_REG_ARM64          0x6000000000000000ULL
>>>>    #define KVM_REG_MIPS           0x7000000000000000ULL
>>>>    #define KVM_REG_RISCV          0x8000000000000000ULL
>>>> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
>>>>
>>>>    #define KVM_REG_SIZE_SHIFT     52
>>>>    #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
>>>> --
>>>> 2.39.1
>>>>
>>
  
zhaotianrui Sept. 18, 2023, 6:32 a.m. UTC | #6
在 2023/9/17 下午10:22, Huacai Chen 写道:
> Hi, Tianrui,
>
> On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
>> Add LoongArch KVM related header files, including kvm.h,
>> kvm_host.h, kvm_types.h. All of those are about LoongArch
>> virtualization features and kvm interfaces.
>>
>> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
>> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
>> ---
>>   arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
>>   arch/loongarch/include/asm/kvm_types.h |  11 ++
>>   arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
>>   include/uapi/linux/kvm.h               |   9 +
>>   4 files changed, 373 insertions(+)
>>   create mode 100644 arch/loongarch/include/asm/kvm_host.h
>>   create mode 100644 arch/loongarch/include/asm/kvm_types.h
>>   create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
>>
>> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
>> new file mode 100644
>> index 0000000000..00e0c1876b
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kvm_host.h
>> @@ -0,0 +1,245 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
>> +#define __ASM_LOONGARCH_KVM_HOST_H__
>> +
>> +#include <linux/cpumask.h>
>> +#include <linux/mutex.h>
>> +#include <linux/hrtimer.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/types.h>
>> +#include <linux/kvm.h>
>> +#include <linux/kvm_types.h>
>> +#include <linux/threads.h>
>> +#include <linux/spinlock.h>
>> +
>> +#include <asm/inst.h>
>> +#include <asm/kvm_mmu.h>
>> +#include <asm/loongarch.h>
>> +
>> +/* Loongarch KVM register ids */
>> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>> +
>> +#define KVM_MAX_VCPUS                  256
>> +#define KVM_MAX_CPUCFG_REGS            21
>> +/* memory slots that does not exposed to userspace */
>> +#define KVM_PRIVATE_MEM_SLOTS          0
>> +
>> +#define KVM_HALT_POLL_NS_DEFAULT       500000
>> +
>> +struct kvm_vm_stat {
>> +       struct kvm_vm_stat_generic generic;
>> +       u64 pages;
>> +       u64 hugepages;
>> +};
>> +
>> +struct kvm_vcpu_stat {
>> +       struct kvm_vcpu_stat_generic generic;
>> +       u64 idle_exits;
>> +       u64 signal_exits;
>> +       u64 int_exits;
>> +       u64 cpucfg_exits;
>> +};
>> +
>> +struct kvm_arch_memory_slot {
>> +};
>> +
>> +struct kvm_context {
>> +       unsigned long vpid_cache;
>> +       struct kvm_vcpu *last_vcpu;
>> +};
>> +
>> +struct kvm_world_switch {
>> +       int (*guest_eentry)(void);
>> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>> +       unsigned long page_order;
>> +};
>> +
>> +#define MAX_PGTABLE_LEVELS     4
>> +struct kvm_arch {
>> +       /* Guest physical mm */
>> +       kvm_pte_t *pgd;
>> +       unsigned long gpa_size;
>> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
>> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
>> +       unsigned int  root_level;
>> +
>> +       s64 time_offset;
>> +       struct kvm_context __percpu *vmcs;
>> +};
>> +
>> +#define CSR_MAX_NUMS           0x800
>> +
>> +struct loongarch_csrs {
>> +       unsigned long csrs[CSR_MAX_NUMS];
>> +};
>> +
>> +/* Resume Flags */
>> +#define RESUME_HOST            0
>> +#define RESUME_GUEST           1
>> +
>> +enum emulation_result {
>> +       EMULATE_DONE,           /* no further processing */
>> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
>> +       EMULATE_FAIL,           /* can't emulate this instruction */
>> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
>> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
>> +};
>> +
>> +#define KVM_LARCH_FPU          (0x1 << 0)
>> +#define KVM_LARCH_CSR          (0x1 << 1)
>> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
>> +
>> +struct kvm_vcpu_arch {
>> +       /*
>> +        * Switch pointer-to-function type to unsigned long
>> +        * for loading the value into register directly.
>> +        */
>> +       unsigned long host_eentry;
>> +       unsigned long guest_eentry;
>> +
>> +       /* Pointers stored here for easy accessing from assembly code */
>> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>> +
>> +       /* Host registers preserved across guest mode execution */
>> +       unsigned long host_sp;
>> +       unsigned long host_tp;
>> +       unsigned long host_pgd;
>> +
>> +       /* Host CSRs are used when handling exits from guest */
>> +       unsigned long badi;
>> +       unsigned long badv;
>> +       unsigned long host_ecfg;
>> +       unsigned long host_estat;
>> +       unsigned long host_percpu;
>> +
>> +       /* GPRs */
>> +       unsigned long gprs[32];
>> +       unsigned long pc;
>> +
>> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
>> +       unsigned int aux_inuse;
>> +       /* FPU state */
>> +       struct loongarch_fpu fpu FPU_ALIGN;
>> +
>> +       /* CSR state */
>> +       struct loongarch_csrs *csr;
>> +
>> +       /* GPR used as IO source/target */
>> +       u32 io_gpr;
>> +
>> +       struct hrtimer swtimer;
>> +       /* KVM register to control count timer */
>> +       u32 count_ctl;
>> +
>> +       /* Bitmask of intr that are pending */
>> +       unsigned long irq_pending;
>> +       /* Bitmask of pending intr to be cleared */
>> +       unsigned long irq_clear;
>> +
>> +       /* Bitmask of exceptions that are pending */
>> +       unsigned long exception_pending;
>> +       unsigned int  subcode;
>> +
>> +       /* Cache for pages needed inside spinlock regions */
>> +       struct kvm_mmu_memory_cache mmu_page_cache;
>> +
>> +       /* vcpu's vpid */
>> +       u64 vpid;
>> +
>> +       /* Frequency of stable timer in Hz */
>> +       u64 timer_mhz;
>> +       ktime_t expire;
>> +
>> +       u64 core_ext_ioisr[4];
>> +
>> +       /* Last CPU the vCPU state was loaded on */
>> +       int last_sched_cpu;
>> +       /* mp state */
>> +       struct kvm_mp_state mp_state;
>> +       /* cpucfg */
>> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
>> +};
>> +
>> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
>> +{
>> +       return csr->csrs[reg];
>> +}
>> +
>> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
>> +{
>> +       csr->csrs[reg] = val;
>> +}
>> +
>> +/* Helpers */
>> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
>> +{
>> +       return cpu_has_fpu;
>> +}
>> +
>> +void kvm_init_fault(void);
>> +
>> +/* Debug: dump vcpu state */
>> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>> +
>> +/* MMU handling */
>> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
>> +void kvm_flush_tlb_all(void);
>> +
>> +#define KVM_ARCH_WANT_MMU_NOTIFIER
>> +int kvm_unmap_hva_range(struct kvm *kvm,
>> +                       unsigned long start, unsigned long end, bool blockable);
>> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
>> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
>> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
>> +
>> +static inline void update_pc(struct kvm_vcpu_arch *arch)
>> +{
>> +       arch->pc += 4;
>> +}
>> +
>> +/**
>> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
>> + * @vcpu:      Virtual CPU.
>> + *
>> + * Returns:    Whether the TLBL exception was likely due to an instruction
>> + *             fetch fault rather than a data load fault.
>> + */
>> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
>> +{
>> +       return arch->pc == arch->badv;
>> +}
>> +
>> +/* Misc */
>> +static inline void kvm_arch_hardware_unsetup(void) {}
>> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
>> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
>> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
>> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
>> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
>> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
>> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
>> +                                  struct kvm_memory_slot *slot) {}
>> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
>> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
>> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
>> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
>> +                                       const struct kvm_memory_slot *memslot);
>> +void kvm_init_vmcs(struct kvm *kvm);
>> +void kvm_vector_entry(void);
>> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
>> +extern const unsigned long kvm_vector_size;
>> +extern const unsigned long kvm_enter_guest_size;
>> +extern unsigned long vpid_mask;
>> +extern struct kvm_world_switch *kvm_loongarch_ops;
>> +
>> +#define SW_GCSR                (1 << 0)
>> +#define HW_GCSR                (1 << 1)
>> +#define INVALID_GCSR   (1 << 2)
>> +int get_gcsr_flag(int csr);
>> +extern void set_hw_gcsr(int csr_id, unsigned long val);
>> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
>> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
>> new file mode 100644
>> index 0000000000..2fe1d4bdff
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kvm_types.h
>> @@ -0,0 +1,11 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
>> +#define _ASM_LOONGARCH_KVM_TYPES_H
>> +
>> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
>> +
>> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
>> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
>> new file mode 100644
>> index 0000000000..fafda487d6
>> --- /dev/null
>> +++ b/arch/loongarch/include/uapi/asm/kvm.h
>> @@ -0,0 +1,108 @@
>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
>> +#define __UAPI_ASM_LOONGARCH_KVM_H
>> +
>> +#include <linux/types.h>
>> +
>> +/*
>> + * KVM Loongarch specific structures and definitions.
>> + *
>> + * Some parts derived from the x86 version of this file.
>> + */
>> +
>> +#define __KVM_HAVE_READONLY_MEM
>> +
>> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
>> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
>> +
>> +/*
>> + * for KVM_GET_REGS and KVM_SET_REGS
>> + */
>> +struct kvm_regs {
>> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
>> +       __u64 gpr[32];
>> +       __u64 pc;
>> +};
>> +
>> +/*
>> + * for KVM_GET_FPU and KVM_SET_FPU
>> + */
>> +struct kvm_fpu {
>> +       __u32 fcsr;
>> +       __u64 fcc;    /* 8x8 */
>> +       struct kvm_fpureg {
>> +               __u64 val64[4];
>> +       } fpr[32];
>> +};
>> +
>> +/*
>> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
>> + * registers.  The id field is broken down as follows:
>> + *
>> + *  bits[63..52] - As per linux/kvm.h
>> + *  bits[51..32] - Must be zero.
>> + *  bits[31..16] - Register set.
>> + *
>> + * Register set = 0: GP registers from kvm_regs (see definitions below).
>> + *
>> + * Register set = 1: CSR registers.
>> + *
>> + * Register set = 2: KVM specific registers (see definitions below).
>> + *
>> + * Register set = 3: FPU / SIMD registers (see definitions below).
>> + *
>> + * Other sets registers may be added in the future.  Each set would
>> + * have its own identifier in bits[31..16].
>> + */
>> +
>> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
>> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
>> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
>> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
>> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
>> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
>> +#define KVM_CSR_IDX_MASK               0x7fff
>> +#define KVM_CPUCFG_IDX_MASK            0x7fff
>> +
>> +/*
>> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
>> + */
>> +
>> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
>> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
> Why begin with 3? 0, 1, 2 reserved for what?
They are keep consistent with our original codes, and 0,1,2 are not used 
now.

Thanks
Tianrui Zhao
>
> Huacai
>
>> +
>> +#define LOONGARCH_REG_SHIFT            3
>> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
>> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
>> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
>> +
>> +struct kvm_debug_exit_arch {
>> +};
>> +
>> +/* for KVM_SET_GUEST_DEBUG */
>> +struct kvm_guest_debug_arch {
>> +};
>> +
>> +/* definition of registers in kvm_run */
>> +struct kvm_sync_regs {
>> +};
>> +
>> +/* dummy definition */
>> +struct kvm_sregs {
>> +};
>> +
>> +struct kvm_iocsr_entry {
>> +       __u32 addr;
>> +       __u32 pad;
>> +       __u64 data;
>> +};
>> +
>> +#define KVM_NR_IRQCHIPS                1
>> +#define KVM_IRQCHIP_NUM_PINS   64
>> +#define KVM_MAX_CORES          256
>> +
>> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
>> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
>> index 13065dd961..863f84619a 100644
>> --- a/include/uapi/linux/kvm.h
>> +++ b/include/uapi/linux/kvm.h
>> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
>>   #define KVM_EXIT_RISCV_SBI        35
>>   #define KVM_EXIT_RISCV_CSR        36
>>   #define KVM_EXIT_NOTIFY           37
>> +#define KVM_EXIT_LOONGARCH_IOCSR  38
>>
>>   /* For KVM_EXIT_INTERNAL_ERROR */
>>   /* Emulate instruction failed. */
>> @@ -336,6 +337,13 @@ struct kvm_run {
>>                          __u32 len;
>>                          __u8  is_write;
>>                  } mmio;
>> +               /* KVM_EXIT_LOONGARCH_IOCSR */
>> +               struct {
>> +                       __u64 phys_addr;
>> +                       __u8  data[8];
>> +                       __u32 len;
>> +                       __u8  is_write;
>> +               } iocsr_io;
>>                  /* KVM_EXIT_HYPERCALL */
>>                  struct {
>>                          __u64 nr;
>> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
>>   #define KVM_REG_ARM64          0x6000000000000000ULL
>>   #define KVM_REG_MIPS           0x7000000000000000ULL
>>   #define KVM_REG_RISCV          0x8000000000000000ULL
>> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
>>
>>   #define KVM_REG_SIZE_SHIFT     52
>>   #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
>> --
>> 2.39.1
>>
  
Huacai Chen Sept. 18, 2023, 6:35 a.m. UTC | #7
On Mon, Sep 18, 2023 at 2:28 PM zhaotianrui <zhaotianrui@loongson.cn> wrote:
>
>
> 在 2023/9/18 上午9:36, Huacai Chen 写道:
> > Hi, Tianrui,
> >
> > On Mon, Sep 18, 2023 at 9:32 AM zhaotianrui <zhaotianrui@loongson.cn> wrote:
> >>
> >> 在 2023/9/16 下午4:48, Huacai Chen 写道:
> >>> Hi, Tianrui,
> >>>
> >>> On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
> >>>> Add LoongArch KVM related header files, including kvm.h,
> >>>> kvm_host.h, kvm_types.h. All of those are about LoongArch
> >>>> virtualization features and kvm interfaces.
> >>>>
> >>>> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
> >>>> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> >>>> ---
> >>>>    arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
> >>>>    arch/loongarch/include/asm/kvm_types.h |  11 ++
> >>>>    arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
> >>>>    include/uapi/linux/kvm.h               |   9 +
> >>>>    4 files changed, 373 insertions(+)
> >>>>    create mode 100644 arch/loongarch/include/asm/kvm_host.h
> >>>>    create mode 100644 arch/loongarch/include/asm/kvm_types.h
> >>>>    create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
> >>>>
> >>>> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> >>>> new file mode 100644
> >>>> index 0000000000..00e0c1876b
> >>>> --- /dev/null
> >>>> +++ b/arch/loongarch/include/asm/kvm_host.h
> >>>> @@ -0,0 +1,245 @@
> >>>> +/* SPDX-License-Identifier: GPL-2.0 */
> >>>> +/*
> >>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >>>> + */
> >>>> +
> >>>> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
> >>>> +#define __ASM_LOONGARCH_KVM_HOST_H__
> >>>> +
> >>>> +#include <linux/cpumask.h>
> >>>> +#include <linux/mutex.h>
> >>>> +#include <linux/hrtimer.h>
> >>>> +#include <linux/interrupt.h>
> >>>> +#include <linux/types.h>
> >>>> +#include <linux/kvm.h>
> >>>> +#include <linux/kvm_types.h>
> >>>> +#include <linux/threads.h>
> >>>> +#include <linux/spinlock.h>
> >>>> +
> >>>> +#include <asm/inst.h>
> >>>> +#include <asm/kvm_mmu.h>
> >>>> +#include <asm/loongarch.h>
> >>>> +
> >>>> +/* Loongarch KVM register ids */
> >>>> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> >>>> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> >>>> +
> >>>> +#define KVM_MAX_VCPUS                  256
> >>>> +#define KVM_MAX_CPUCFG_REGS            21
> >>>> +/* memory slots that does not exposed to userspace */
> >>>> +#define KVM_PRIVATE_MEM_SLOTS          0
> >>>> +
> >>>> +#define KVM_HALT_POLL_NS_DEFAULT       500000
> >>>> +
> >>>> +struct kvm_vm_stat {
> >>>> +       struct kvm_vm_stat_generic generic;
> >>>> +       u64 pages;
> >>>> +       u64 hugepages;
> >>>> +};
> >>>> +
> >>>> +struct kvm_vcpu_stat {
> >>>> +       struct kvm_vcpu_stat_generic generic;
> >>>> +       u64 idle_exits;
> >>>> +       u64 signal_exits;
> >>>> +       u64 int_exits;
> >>>> +       u64 cpucfg_exits;
> >>>> +};
> >>>> +
> >>>> +struct kvm_arch_memory_slot {
> >>>> +};
> >>>> +
> >>>> +struct kvm_context {
> >>>> +       unsigned long vpid_cache;
> >>>> +       struct kvm_vcpu *last_vcpu;
> >>>> +};
> >>>> +
> >>>> +struct kvm_world_switch {
> >>>> +       int (*guest_eentry)(void);
> >>>> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >>>> +       unsigned long page_order;
> >>>> +};
> >>>> +
> >>>> +#define MAX_PGTABLE_LEVELS     4
> >>>> +struct kvm_arch {
> >>>> +       /* Guest physical mm */
> >>>> +       kvm_pte_t *pgd;
> >>>> +       unsigned long gpa_size;
> >>>> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
> >>>> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
> >>>> +       unsigned int  root_level;
> >>>> +
> >>>> +       s64 time_offset;
> >>>> +       struct kvm_context __percpu *vmcs;
> >>>> +};
> >>>> +
> >>>> +#define CSR_MAX_NUMS           0x800
> >>>> +
> >>>> +struct loongarch_csrs {
> >>>> +       unsigned long csrs[CSR_MAX_NUMS];
> >>>> +};
> >>>> +
> >>>> +/* Resume Flags */
> >>>> +#define RESUME_HOST            0
> >>>> +#define RESUME_GUEST           1
> >>>> +
> >>>> +enum emulation_result {
> >>>> +       EMULATE_DONE,           /* no further processing */
> >>>> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
> >>>> +       EMULATE_FAIL,           /* can't emulate this instruction */
> >>>> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
> >>>> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
> >>>> +};
> >>>> +
> >>>> +#define KVM_LARCH_FPU          (0x1 << 0)
> >>>> +#define KVM_LARCH_CSR          (0x1 << 1)
> >>>> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> >>>> +
> >>>> +struct kvm_vcpu_arch {
> >>>> +       /*
> >>>> +        * Switch pointer-to-function type to unsigned long
> >>>> +        * for loading the value into register directly.
> >>>> +        */
> >>>> +       unsigned long host_eentry;
> >>>> +       unsigned long guest_eentry;
> >>>> +
> >>>> +       /* Pointers stored here for easy accessing from assembly code */
> >>>> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >>>> +
> >>>> +       /* Host registers preserved across guest mode execution */
> >>>> +       unsigned long host_sp;
> >>>> +       unsigned long host_tp;
> >>>> +       unsigned long host_pgd;
> >>>> +
> >>>> +       /* Host CSRs are used when handling exits from guest */
> >>>> +       unsigned long badi;
> >>>> +       unsigned long badv;
> >>>> +       unsigned long host_ecfg;
> >>>> +       unsigned long host_estat;
> >>>> +       unsigned long host_percpu;
> >>>> +
> >>>> +       /* GPRs */
> >>>> +       unsigned long gprs[32];
> >>>> +       unsigned long pc;
> >>>> +
> >>>> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
> >>>> +       unsigned int aux_inuse;
> >>>> +       /* FPU state */
> >>>> +       struct loongarch_fpu fpu FPU_ALIGN;
> >>>> +
> >>>> +       /* CSR state */
> >>>> +       struct loongarch_csrs *csr;
> >>>> +
> >>>> +       /* GPR used as IO source/target */
> >>>> +       u32 io_gpr;
> >>>> +
> >>>> +       struct hrtimer swtimer;
> >>>> +       /* KVM register to control count timer */
> >>>> +       u32 count_ctl;
> >>>> +
> >>>> +       /* Bitmask of intr that are pending */
> >>>> +       unsigned long irq_pending;
> >>>> +       /* Bitmask of pending intr to be cleared */
> >>>> +       unsigned long irq_clear;
> >>>> +
> >>>> +       /* Bitmask of exceptions that are pending */
> >>>> +       unsigned long exception_pending;
> >>>> +       unsigned int  subcode;
> >>>> +
> >>>> +       /* Cache for pages needed inside spinlock regions */
> >>>> +       struct kvm_mmu_memory_cache mmu_page_cache;
> >>>> +
> >>>> +       /* vcpu's vpid */
> >>>> +       u64 vpid;
> >>>> +
> >>>> +       /* Frequency of stable timer in Hz */
> >>>> +       u64 timer_mhz;
> >>>> +       ktime_t expire;
> >>>> +
> >>>> +       u64 core_ext_ioisr[4];
> >>>> +
> >>>> +       /* Last CPU the vCPU state was loaded on */
> >>>> +       int last_sched_cpu;
> >>>> +       /* mp state */
> >>>> +       struct kvm_mp_state mp_state;
> >>>> +       /* cpucfg */
> >>>> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
> >>>> +};
> >>>> +
> >>>> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
> >>>> +{
> >>>> +       return csr->csrs[reg];
> >>>> +}
> >>>> +
> >>>> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
> >>>> +{
> >>>> +       csr->csrs[reg] = val;
> >>>> +}
> >>>> +
> >>>> +/* Helpers */
> >>>> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
> >>>> +{
> >>>> +       return cpu_has_fpu;
> >>>> +}
> >>>> +
> >>>> +void kvm_init_fault(void);
> >>>> +
> >>>> +/* Debug: dump vcpu state */
> >>>> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
> >>>> +
> >>>> +/* MMU handling */
> >>>> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
> >>>> +void kvm_flush_tlb_all(void);
> >>>> +
> >>>> +#define KVM_ARCH_WANT_MMU_NOTIFIER
> >>>> +int kvm_unmap_hva_range(struct kvm *kvm,
> >>>> +                       unsigned long start, unsigned long end, bool blockable);
> >>>> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
> >>>> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
> >>>> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
> >>>> +
> >>>> +static inline void update_pc(struct kvm_vcpu_arch *arch)
> >>>> +{
> >>>> +       arch->pc += 4;
> >>>> +}
> >>>> +
> >>>> +/**
> >>>> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
> >>>> + * @vcpu:      Virtual CPU.
> >>>> + *
> >>>> + * Returns:    Whether the TLBL exception was likely due to an instruction
> >>>> + *             fetch fault rather than a data load fault.
> >>>> + */
> >>>> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
> >>>> +{
> >>>> +       return arch->pc == arch->badv;
> >>>> +}
> >>>> +
> >>>> +/* Misc */
> >>>> +static inline void kvm_arch_hardware_unsetup(void) {}
> >>>> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
> >>>> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
> >>>> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
> >>>> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
> >>>> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
> >>>> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
> >>>> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
> >>>> +                                  struct kvm_memory_slot *slot) {}
> >>>> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
> >>>> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
> >>>> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
> >>>> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
> >>>> +                                       const struct kvm_memory_slot *memslot);
> >>>> +void kvm_init_vmcs(struct kvm *kvm);
> >>>> +void kvm_vector_entry(void);
> >>>> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >>>> +extern const unsigned long kvm_vector_size;
> >>>> +extern const unsigned long kvm_enter_guest_size;
> >>>> +extern unsigned long vpid_mask;
> >>>> +extern struct kvm_world_switch *kvm_loongarch_ops;
> >>>> +
> >>>> +#define SW_GCSR                (1 << 0)
> >>>> +#define HW_GCSR                (1 << 1)
> >>>> +#define INVALID_GCSR   (1 << 2)
> >>>> +int get_gcsr_flag(int csr);
> >>>> +extern void set_hw_gcsr(int csr_id, unsigned long val);
> >>>> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
> >>>> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
> >>>> new file mode 100644
> >>>> index 0000000000..2fe1d4bdff
> >>>> --- /dev/null
> >>>> +++ b/arch/loongarch/include/asm/kvm_types.h
> >>>> @@ -0,0 +1,11 @@
> >>>> +/* SPDX-License-Identifier: GPL-2.0 */
> >>>> +/*
> >>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >>>> + */
> >>>> +
> >>>> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
> >>>> +#define _ASM_LOONGARCH_KVM_TYPES_H
> >>>> +
> >>>> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
> >>>> +
> >>>> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
> >>>> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
> >>>> new file mode 100644
> >>>> index 0000000000..fafda487d6
> >>>> --- /dev/null
> >>>> +++ b/arch/loongarch/include/uapi/asm/kvm.h
> >>>> @@ -0,0 +1,108 @@
> >>>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> >>>> +/*
> >>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >>>> + */
> >>>> +
> >>>> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
> >>>> +#define __UAPI_ASM_LOONGARCH_KVM_H
> >>>> +
> >>>> +#include <linux/types.h>
> >>>> +
> >>>> +/*
> >>>> + * KVM Loongarch specific structures and definitions.
> >>>> + *
> >>>> + * Some parts derived from the x86 version of this file.
> >>>> + */
> >>>> +
> >>>> +#define __KVM_HAVE_READONLY_MEM
> >>>> +
> >>>> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
> >>>> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
> >>>> +
> >>>> +/*
> >>>> + * for KVM_GET_REGS and KVM_SET_REGS
> >>>> + */
> >>>> +struct kvm_regs {
> >>>> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
> >>>> +       __u64 gpr[32];
> >>>> +       __u64 pc;
> >>>> +};
> >>>> +
> >>>> +/*
> >>>> + * for KVM_GET_FPU and KVM_SET_FPU
> >>>> + */
> >>>> +struct kvm_fpu {
> >>>> +       __u32 fcsr;
> >>>> +       __u64 fcc;    /* 8x8 */
> >>>> +       struct kvm_fpureg {
> >>>> +               __u64 val64[4];
> >>>> +       } fpr[32];
> >>>> +};
> >>>> +
> >>>> +/*
> >>>> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
> >>>> + * registers.  The id field is broken down as follows:
> >>>> + *
> >>>> + *  bits[63..52] - As per linux/kvm.h
> >>>> + *  bits[51..32] - Must be zero.
> >>>> + *  bits[31..16] - Register set.
> >>>> + *
> >>>> + * Register set = 0: GP registers from kvm_regs (see definitions below).
> >>>> + *
> >>>> + * Register set = 1: CSR registers.
> >>>> + *
> >>>> + * Register set = 2: KVM specific registers (see definitions below).
> >>>> + *
> >>>> + * Register set = 3: FPU / SIMD registers (see definitions below).
> >>>> + *
> >>>> + * Other sets registers may be added in the future.  Each set would
> >>>> + * have its own identifier in bits[31..16].
> >>>> + */
> >>>> +
> >>>> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
> >>>> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
> >>>> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
> >>>> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
> >>> How about rename to KVM_REG_LOONGARCH_FPSIMD?
> >>>
> >>> Huacai
> >> It will broke uapi used by user space software, it may cause
> >> incompatible issue, so I think it is better to keep the original name.
> > In your comments above it is not only FPU but FPU&SIMD, and this code
> > hasn't been upstream yet, how to break UAPI?
> We want to apply this patch series to our other project when it is
> upstream, so we need update the previous codes and it may break the
> uapi. What do you think of it?
Generally, the kernel is the first one to be upstream, so kernel can
do anything reasonable, other projects should align to kernel when
they want to get upstream.

Huacai

>
> Thanks
> Tianrui Zhao
> >
> > Huacai
> >
> >> Thanks
> >> Tianrui Zhao
> >>>> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
> >>>> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
> >>>> +#define KVM_CSR_IDX_MASK               0x7fff
> >>>> +#define KVM_CPUCFG_IDX_MASK            0x7fff
> >>>> +
> >>>> +/*
> >>>> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
> >>>> + */
> >>>> +
> >>>> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
> >>>> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
> >>>> +
> >>>> +#define LOONGARCH_REG_SHIFT            3
> >>>> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
> >>>> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
> >>>> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
> >>>> +
> >>>> +struct kvm_debug_exit_arch {
> >>>> +};
> >>>> +
> >>>> +/* for KVM_SET_GUEST_DEBUG */
> >>>> +struct kvm_guest_debug_arch {
> >>>> +};
> >>>> +
> >>>> +/* definition of registers in kvm_run */
> >>>> +struct kvm_sync_regs {
> >>>> +};
> >>>> +
> >>>> +/* dummy definition */
> >>>> +struct kvm_sregs {
> >>>> +};
> >>>> +
> >>>> +struct kvm_iocsr_entry {
> >>>> +       __u32 addr;
> >>>> +       __u32 pad;
> >>>> +       __u64 data;
> >>>> +};
> >>>> +
> >>>> +#define KVM_NR_IRQCHIPS                1
> >>>> +#define KVM_IRQCHIP_NUM_PINS   64
> >>>> +#define KVM_MAX_CORES          256
> >>>> +
> >>>> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
> >>>> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> >>>> index 13065dd961..863f84619a 100644
> >>>> --- a/include/uapi/linux/kvm.h
> >>>> +++ b/include/uapi/linux/kvm.h
> >>>> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
> >>>>    #define KVM_EXIT_RISCV_SBI        35
> >>>>    #define KVM_EXIT_RISCV_CSR        36
> >>>>    #define KVM_EXIT_NOTIFY           37
> >>>> +#define KVM_EXIT_LOONGARCH_IOCSR  38
> >>>>
> >>>>    /* For KVM_EXIT_INTERNAL_ERROR */
> >>>>    /* Emulate instruction failed. */
> >>>> @@ -336,6 +337,13 @@ struct kvm_run {
> >>>>                           __u32 len;
> >>>>                           __u8  is_write;
> >>>>                   } mmio;
> >>>> +               /* KVM_EXIT_LOONGARCH_IOCSR */
> >>>> +               struct {
> >>>> +                       __u64 phys_addr;
> >>>> +                       __u8  data[8];
> >>>> +                       __u32 len;
> >>>> +                       __u8  is_write;
> >>>> +               } iocsr_io;
> >>>>                   /* KVM_EXIT_HYPERCALL */
> >>>>                   struct {
> >>>>                           __u64 nr;
> >>>> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
> >>>>    #define KVM_REG_ARM64          0x6000000000000000ULL
> >>>>    #define KVM_REG_MIPS           0x7000000000000000ULL
> >>>>    #define KVM_REG_RISCV          0x8000000000000000ULL
> >>>> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
> >>>>
> >>>>    #define KVM_REG_SIZE_SHIFT     52
> >>>>    #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
> >>>> --
> >>>> 2.39.1
> >>>>
> >>
>
  
Huacai Chen Sept. 18, 2023, 6:37 a.m. UTC | #8
On Mon, Sep 18, 2023 at 2:32 PM zhaotianrui <zhaotianrui@loongson.cn> wrote:
>
>
> 在 2023/9/17 下午10:22, Huacai Chen 写道:
> > Hi, Tianrui,
> >
> > On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
> >> Add LoongArch KVM related header files, including kvm.h,
> >> kvm_host.h, kvm_types.h. All of those are about LoongArch
> >> virtualization features and kvm interfaces.
> >>
> >> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
> >> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> >> ---
> >>   arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
> >>   arch/loongarch/include/asm/kvm_types.h |  11 ++
> >>   arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
> >>   include/uapi/linux/kvm.h               |   9 +
> >>   4 files changed, 373 insertions(+)
> >>   create mode 100644 arch/loongarch/include/asm/kvm_host.h
> >>   create mode 100644 arch/loongarch/include/asm/kvm_types.h
> >>   create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
> >>
> >> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> >> new file mode 100644
> >> index 0000000000..00e0c1876b
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/asm/kvm_host.h
> >> @@ -0,0 +1,245 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
> >> +#define __ASM_LOONGARCH_KVM_HOST_H__
> >> +
> >> +#include <linux/cpumask.h>
> >> +#include <linux/mutex.h>
> >> +#include <linux/hrtimer.h>
> >> +#include <linux/interrupt.h>
> >> +#include <linux/types.h>
> >> +#include <linux/kvm.h>
> >> +#include <linux/kvm_types.h>
> >> +#include <linux/threads.h>
> >> +#include <linux/spinlock.h>
> >> +
> >> +#include <asm/inst.h>
> >> +#include <asm/kvm_mmu.h>
> >> +#include <asm/loongarch.h>
> >> +
> >> +/* Loongarch KVM register ids */
> >> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> >> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
> >> +
> >> +#define KVM_MAX_VCPUS                  256
> >> +#define KVM_MAX_CPUCFG_REGS            21
> >> +/* memory slots that does not exposed to userspace */
> >> +#define KVM_PRIVATE_MEM_SLOTS          0
> >> +
> >> +#define KVM_HALT_POLL_NS_DEFAULT       500000
> >> +
> >> +struct kvm_vm_stat {
> >> +       struct kvm_vm_stat_generic generic;
> >> +       u64 pages;
> >> +       u64 hugepages;
> >> +};
> >> +
> >> +struct kvm_vcpu_stat {
> >> +       struct kvm_vcpu_stat_generic generic;
> >> +       u64 idle_exits;
> >> +       u64 signal_exits;
> >> +       u64 int_exits;
> >> +       u64 cpucfg_exits;
> >> +};
> >> +
> >> +struct kvm_arch_memory_slot {
> >> +};
> >> +
> >> +struct kvm_context {
> >> +       unsigned long vpid_cache;
> >> +       struct kvm_vcpu *last_vcpu;
> >> +};
> >> +
> >> +struct kvm_world_switch {
> >> +       int (*guest_eentry)(void);
> >> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >> +       unsigned long page_order;
> >> +};
> >> +
> >> +#define MAX_PGTABLE_LEVELS     4
> >> +struct kvm_arch {
> >> +       /* Guest physical mm */
> >> +       kvm_pte_t *pgd;
> >> +       unsigned long gpa_size;
> >> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
> >> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
> >> +       unsigned int  root_level;
> >> +
> >> +       s64 time_offset;
> >> +       struct kvm_context __percpu *vmcs;
> >> +};
> >> +
> >> +#define CSR_MAX_NUMS           0x800
> >> +
> >> +struct loongarch_csrs {
> >> +       unsigned long csrs[CSR_MAX_NUMS];
> >> +};
> >> +
> >> +/* Resume Flags */
> >> +#define RESUME_HOST            0
> >> +#define RESUME_GUEST           1
> >> +
> >> +enum emulation_result {
> >> +       EMULATE_DONE,           /* no further processing */
> >> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
> >> +       EMULATE_FAIL,           /* can't emulate this instruction */
> >> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
> >> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
> >> +};
> >> +
> >> +#define KVM_LARCH_FPU          (0x1 << 0)
> >> +#define KVM_LARCH_CSR          (0x1 << 1)
> >> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> >> +
> >> +struct kvm_vcpu_arch {
> >> +       /*
> >> +        * Switch pointer-to-function type to unsigned long
> >> +        * for loading the value into register directly.
> >> +        */
> >> +       unsigned long host_eentry;
> >> +       unsigned long guest_eentry;
> >> +
> >> +       /* Pointers stored here for easy accessing from assembly code */
> >> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >> +
> >> +       /* Host registers preserved across guest mode execution */
> >> +       unsigned long host_sp;
> >> +       unsigned long host_tp;
> >> +       unsigned long host_pgd;
> >> +
> >> +       /* Host CSRs are used when handling exits from guest */
> >> +       unsigned long badi;
> >> +       unsigned long badv;
> >> +       unsigned long host_ecfg;
> >> +       unsigned long host_estat;
> >> +       unsigned long host_percpu;
> >> +
> >> +       /* GPRs */
> >> +       unsigned long gprs[32];
> >> +       unsigned long pc;
> >> +
> >> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
> >> +       unsigned int aux_inuse;
> >> +       /* FPU state */
> >> +       struct loongarch_fpu fpu FPU_ALIGN;
> >> +
> >> +       /* CSR state */
> >> +       struct loongarch_csrs *csr;
> >> +
> >> +       /* GPR used as IO source/target */
> >> +       u32 io_gpr;
> >> +
> >> +       struct hrtimer swtimer;
> >> +       /* KVM register to control count timer */
> >> +       u32 count_ctl;
> >> +
> >> +       /* Bitmask of intr that are pending */
> >> +       unsigned long irq_pending;
> >> +       /* Bitmask of pending intr to be cleared */
> >> +       unsigned long irq_clear;
> >> +
> >> +       /* Bitmask of exceptions that are pending */
> >> +       unsigned long exception_pending;
> >> +       unsigned int  subcode;
> >> +
> >> +       /* Cache for pages needed inside spinlock regions */
> >> +       struct kvm_mmu_memory_cache mmu_page_cache;
> >> +
> >> +       /* vcpu's vpid */
> >> +       u64 vpid;
> >> +
> >> +       /* Frequency of stable timer in Hz */
> >> +       u64 timer_mhz;
> >> +       ktime_t expire;
> >> +
> >> +       u64 core_ext_ioisr[4];
> >> +
> >> +       /* Last CPU the vCPU state was loaded on */
> >> +       int last_sched_cpu;
> >> +       /* mp state */
> >> +       struct kvm_mp_state mp_state;
> >> +       /* cpucfg */
> >> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
> >> +};
> >> +
> >> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
> >> +{
> >> +       return csr->csrs[reg];
> >> +}
> >> +
> >> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
> >> +{
> >> +       csr->csrs[reg] = val;
> >> +}
> >> +
> >> +/* Helpers */
> >> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
> >> +{
> >> +       return cpu_has_fpu;
> >> +}
> >> +
> >> +void kvm_init_fault(void);
> >> +
> >> +/* Debug: dump vcpu state */
> >> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
> >> +
> >> +/* MMU handling */
> >> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
> >> +void kvm_flush_tlb_all(void);
> >> +
> >> +#define KVM_ARCH_WANT_MMU_NOTIFIER
> >> +int kvm_unmap_hva_range(struct kvm *kvm,
> >> +                       unsigned long start, unsigned long end, bool blockable);
> >> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
> >> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
> >> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
> >> +
> >> +static inline void update_pc(struct kvm_vcpu_arch *arch)
> >> +{
> >> +       arch->pc += 4;
> >> +}
> >> +
> >> +/**
> >> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
> >> + * @vcpu:      Virtual CPU.
> >> + *
> >> + * Returns:    Whether the TLBL exception was likely due to an instruction
> >> + *             fetch fault rather than a data load fault.
> >> + */
> >> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
> >> +{
> >> +       return arch->pc == arch->badv;
> >> +}
> >> +
> >> +/* Misc */
> >> +static inline void kvm_arch_hardware_unsetup(void) {}
> >> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
> >> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
> >> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
> >> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
> >> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
> >> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
> >> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
> >> +                                  struct kvm_memory_slot *slot) {}
> >> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
> >> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
> >> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
> >> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
> >> +                                       const struct kvm_memory_slot *memslot);
> >> +void kvm_init_vmcs(struct kvm *kvm);
> >> +void kvm_vector_entry(void);
> >> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
> >> +extern const unsigned long kvm_vector_size;
> >> +extern const unsigned long kvm_enter_guest_size;
> >> +extern unsigned long vpid_mask;
> >> +extern struct kvm_world_switch *kvm_loongarch_ops;
> >> +
> >> +#define SW_GCSR                (1 << 0)
> >> +#define HW_GCSR                (1 << 1)
> >> +#define INVALID_GCSR   (1 << 2)
> >> +int get_gcsr_flag(int csr);
> >> +extern void set_hw_gcsr(int csr_id, unsigned long val);
> >> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
> >> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
> >> new file mode 100644
> >> index 0000000000..2fe1d4bdff
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/asm/kvm_types.h
> >> @@ -0,0 +1,11 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
> >> +#define _ASM_LOONGARCH_KVM_TYPES_H
> >> +
> >> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
> >> +
> >> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
> >> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
> >> new file mode 100644
> >> index 0000000000..fafda487d6
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/uapi/asm/kvm.h
> >> @@ -0,0 +1,108 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
> >> +#define __UAPI_ASM_LOONGARCH_KVM_H
> >> +
> >> +#include <linux/types.h>
> >> +
> >> +/*
> >> + * KVM Loongarch specific structures and definitions.
> >> + *
> >> + * Some parts derived from the x86 version of this file.
> >> + */
> >> +
> >> +#define __KVM_HAVE_READONLY_MEM
> >> +
> >> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
> >> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
> >> +
> >> +/*
> >> + * for KVM_GET_REGS and KVM_SET_REGS
> >> + */
> >> +struct kvm_regs {
> >> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
> >> +       __u64 gpr[32];
> >> +       __u64 pc;
> >> +};
> >> +
> >> +/*
> >> + * for KVM_GET_FPU and KVM_SET_FPU
> >> + */
> >> +struct kvm_fpu {
> >> +       __u32 fcsr;
> >> +       __u64 fcc;    /* 8x8 */
> >> +       struct kvm_fpureg {
> >> +               __u64 val64[4];
> >> +       } fpr[32];
> >> +};
> >> +
> >> +/*
> >> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
> >> + * registers.  The id field is broken down as follows:
> >> + *
> >> + *  bits[63..52] - As per linux/kvm.h
> >> + *  bits[51..32] - Must be zero.
> >> + *  bits[31..16] - Register set.
> >> + *
> >> + * Register set = 0: GP registers from kvm_regs (see definitions below).
> >> + *
> >> + * Register set = 1: CSR registers.
> >> + *
> >> + * Register set = 2: KVM specific registers (see definitions below).
> >> + *
> >> + * Register set = 3: FPU / SIMD registers (see definitions below).
> >> + *
> >> + * Other sets registers may be added in the future.  Each set would
> >> + * have its own identifier in bits[31..16].
> >> + */
> >> +
> >> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
> >> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
> >> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
> >> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
> >> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
> >> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
> >> +#define KVM_CSR_IDX_MASK               0x7fff
> >> +#define KVM_CPUCFG_IDX_MASK            0x7fff
> >> +
> >> +/*
> >> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
> >> + */
> >> +
> >> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
> >> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
> > Why begin with 3? 0, 1, 2 reserved for what?
> They are keep consistent with our original codes, and 0,1,2 are not used
> now.
The same as KVM_REG_LOONGARCH_FPU, kernel is the first one, so I
suggest beginning with 0 or 1, but don't begin with 3.

Huacai

>
> Thanks
> Tianrui Zhao
> >
> > Huacai
> >
> >> +
> >> +#define LOONGARCH_REG_SHIFT            3
> >> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
> >> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
> >> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
> >> +
> >> +struct kvm_debug_exit_arch {
> >> +};
> >> +
> >> +/* for KVM_SET_GUEST_DEBUG */
> >> +struct kvm_guest_debug_arch {
> >> +};
> >> +
> >> +/* definition of registers in kvm_run */
> >> +struct kvm_sync_regs {
> >> +};
> >> +
> >> +/* dummy definition */
> >> +struct kvm_sregs {
> >> +};
> >> +
> >> +struct kvm_iocsr_entry {
> >> +       __u32 addr;
> >> +       __u32 pad;
> >> +       __u64 data;
> >> +};
> >> +
> >> +#define KVM_NR_IRQCHIPS                1
> >> +#define KVM_IRQCHIP_NUM_PINS   64
> >> +#define KVM_MAX_CORES          256
> >> +
> >> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
> >> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> >> index 13065dd961..863f84619a 100644
> >> --- a/include/uapi/linux/kvm.h
> >> +++ b/include/uapi/linux/kvm.h
> >> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
> >>   #define KVM_EXIT_RISCV_SBI        35
> >>   #define KVM_EXIT_RISCV_CSR        36
> >>   #define KVM_EXIT_NOTIFY           37
> >> +#define KVM_EXIT_LOONGARCH_IOCSR  38
> >>
> >>   /* For KVM_EXIT_INTERNAL_ERROR */
> >>   /* Emulate instruction failed. */
> >> @@ -336,6 +337,13 @@ struct kvm_run {
> >>                          __u32 len;
> >>                          __u8  is_write;
> >>                  } mmio;
> >> +               /* KVM_EXIT_LOONGARCH_IOCSR */
> >> +               struct {
> >> +                       __u64 phys_addr;
> >> +                       __u8  data[8];
> >> +                       __u32 len;
> >> +                       __u8  is_write;
> >> +               } iocsr_io;
> >>                  /* KVM_EXIT_HYPERCALL */
> >>                  struct {
> >>                          __u64 nr;
> >> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
> >>   #define KVM_REG_ARM64          0x6000000000000000ULL
> >>   #define KVM_REG_MIPS           0x7000000000000000ULL
> >>   #define KVM_REG_RISCV          0x8000000000000000ULL
> >> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
> >>
> >>   #define KVM_REG_SIZE_SHIFT     52
> >>   #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
> >> --
> >> 2.39.1
> >>
>
  
zhaotianrui Sept. 19, 2023, 2:56 a.m. UTC | #9
在 2023/9/18 下午2:35, Huacai Chen 写道:
> On Mon, Sep 18, 2023 at 2:28 PM zhaotianrui <zhaotianrui@loongson.cn> wrote:
>>
>> 在 2023/9/18 上午9:36, Huacai Chen 写道:
>>> Hi, Tianrui,
>>>
>>> On Mon, Sep 18, 2023 at 9:32 AM zhaotianrui <zhaotianrui@loongson.cn> wrote:
>>>> 在 2023/9/16 下午4:48, Huacai Chen 写道:
>>>>> Hi, Tianrui,
>>>>>
>>>>> On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
>>>>>> Add LoongArch KVM related header files, including kvm.h,
>>>>>> kvm_host.h, kvm_types.h. All of those are about LoongArch
>>>>>> virtualization features and kvm interfaces.
>>>>>>
>>>>>> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
>>>>>> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
>>>>>> ---
>>>>>>     arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
>>>>>>     arch/loongarch/include/asm/kvm_types.h |  11 ++
>>>>>>     arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
>>>>>>     include/uapi/linux/kvm.h               |   9 +
>>>>>>     4 files changed, 373 insertions(+)
>>>>>>     create mode 100644 arch/loongarch/include/asm/kvm_host.h
>>>>>>     create mode 100644 arch/loongarch/include/asm/kvm_types.h
>>>>>>     create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
>>>>>>
>>>>>> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
>>>>>> new file mode 100644
>>>>>> index 0000000000..00e0c1876b
>>>>>> --- /dev/null
>>>>>> +++ b/arch/loongarch/include/asm/kvm_host.h
>>>>>> @@ -0,0 +1,245 @@
>>>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>>>> +/*
>>>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>>>> + */
>>>>>> +
>>>>>> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
>>>>>> +#define __ASM_LOONGARCH_KVM_HOST_H__
>>>>>> +
>>>>>> +#include <linux/cpumask.h>
>>>>>> +#include <linux/mutex.h>
>>>>>> +#include <linux/hrtimer.h>
>>>>>> +#include <linux/interrupt.h>
>>>>>> +#include <linux/types.h>
>>>>>> +#include <linux/kvm.h>
>>>>>> +#include <linux/kvm_types.h>
>>>>>> +#include <linux/threads.h>
>>>>>> +#include <linux/spinlock.h>
>>>>>> +
>>>>>> +#include <asm/inst.h>
>>>>>> +#include <asm/kvm_mmu.h>
>>>>>> +#include <asm/loongarch.h>
>>>>>> +
>>>>>> +/* Loongarch KVM register ids */
>>>>>> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>>>>>> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>>>>>> +
>>>>>> +#define KVM_MAX_VCPUS                  256
>>>>>> +#define KVM_MAX_CPUCFG_REGS            21
>>>>>> +/* memory slots that does not exposed to userspace */
>>>>>> +#define KVM_PRIVATE_MEM_SLOTS          0
>>>>>> +
>>>>>> +#define KVM_HALT_POLL_NS_DEFAULT       500000
>>>>>> +
>>>>>> +struct kvm_vm_stat {
>>>>>> +       struct kvm_vm_stat_generic generic;
>>>>>> +       u64 pages;
>>>>>> +       u64 hugepages;
>>>>>> +};
>>>>>> +
>>>>>> +struct kvm_vcpu_stat {
>>>>>> +       struct kvm_vcpu_stat_generic generic;
>>>>>> +       u64 idle_exits;
>>>>>> +       u64 signal_exits;
>>>>>> +       u64 int_exits;
>>>>>> +       u64 cpucfg_exits;
>>>>>> +};
>>>>>> +
>>>>>> +struct kvm_arch_memory_slot {
>>>>>> +};
>>>>>> +
>>>>>> +struct kvm_context {
>>>>>> +       unsigned long vpid_cache;
>>>>>> +       struct kvm_vcpu *last_vcpu;
>>>>>> +};
>>>>>> +
>>>>>> +struct kvm_world_switch {
>>>>>> +       int (*guest_eentry)(void);
>>>>>> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>>>> +       unsigned long page_order;
>>>>>> +};
>>>>>> +
>>>>>> +#define MAX_PGTABLE_LEVELS     4
>>>>>> +struct kvm_arch {
>>>>>> +       /* Guest physical mm */
>>>>>> +       kvm_pte_t *pgd;
>>>>>> +       unsigned long gpa_size;
>>>>>> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
>>>>>> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
>>>>>> +       unsigned int  root_level;
>>>>>> +
>>>>>> +       s64 time_offset;
>>>>>> +       struct kvm_context __percpu *vmcs;
>>>>>> +};
>>>>>> +
>>>>>> +#define CSR_MAX_NUMS           0x800
>>>>>> +
>>>>>> +struct loongarch_csrs {
>>>>>> +       unsigned long csrs[CSR_MAX_NUMS];
>>>>>> +};
>>>>>> +
>>>>>> +/* Resume Flags */
>>>>>> +#define RESUME_HOST            0
>>>>>> +#define RESUME_GUEST           1
>>>>>> +
>>>>>> +enum emulation_result {
>>>>>> +       EMULATE_DONE,           /* no further processing */
>>>>>> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
>>>>>> +       EMULATE_FAIL,           /* can't emulate this instruction */
>>>>>> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
>>>>>> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
>>>>>> +};
>>>>>> +
>>>>>> +#define KVM_LARCH_FPU          (0x1 << 0)
>>>>>> +#define KVM_LARCH_CSR          (0x1 << 1)
>>>>>> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
>>>>>> +
>>>>>> +struct kvm_vcpu_arch {
>>>>>> +       /*
>>>>>> +        * Switch pointer-to-function type to unsigned long
>>>>>> +        * for loading the value into register directly.
>>>>>> +        */
>>>>>> +       unsigned long host_eentry;
>>>>>> +       unsigned long guest_eentry;
>>>>>> +
>>>>>> +       /* Pointers stored here for easy accessing from assembly code */
>>>>>> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>>>> +
>>>>>> +       /* Host registers preserved across guest mode execution */
>>>>>> +       unsigned long host_sp;
>>>>>> +       unsigned long host_tp;
>>>>>> +       unsigned long host_pgd;
>>>>>> +
>>>>>> +       /* Host CSRs are used when handling exits from guest */
>>>>>> +       unsigned long badi;
>>>>>> +       unsigned long badv;
>>>>>> +       unsigned long host_ecfg;
>>>>>> +       unsigned long host_estat;
>>>>>> +       unsigned long host_percpu;
>>>>>> +
>>>>>> +       /* GPRs */
>>>>>> +       unsigned long gprs[32];
>>>>>> +       unsigned long pc;
>>>>>> +
>>>>>> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
>>>>>> +       unsigned int aux_inuse;
>>>>>> +       /* FPU state */
>>>>>> +       struct loongarch_fpu fpu FPU_ALIGN;
>>>>>> +
>>>>>> +       /* CSR state */
>>>>>> +       struct loongarch_csrs *csr;
>>>>>> +
>>>>>> +       /* GPR used as IO source/target */
>>>>>> +       u32 io_gpr;
>>>>>> +
>>>>>> +       struct hrtimer swtimer;
>>>>>> +       /* KVM register to control count timer */
>>>>>> +       u32 count_ctl;
>>>>>> +
>>>>>> +       /* Bitmask of intr that are pending */
>>>>>> +       unsigned long irq_pending;
>>>>>> +       /* Bitmask of pending intr to be cleared */
>>>>>> +       unsigned long irq_clear;
>>>>>> +
>>>>>> +       /* Bitmask of exceptions that are pending */
>>>>>> +       unsigned long exception_pending;
>>>>>> +       unsigned int  subcode;
>>>>>> +
>>>>>> +       /* Cache for pages needed inside spinlock regions */
>>>>>> +       struct kvm_mmu_memory_cache mmu_page_cache;
>>>>>> +
>>>>>> +       /* vcpu's vpid */
>>>>>> +       u64 vpid;
>>>>>> +
>>>>>> +       /* Frequency of stable timer in Hz */
>>>>>> +       u64 timer_mhz;
>>>>>> +       ktime_t expire;
>>>>>> +
>>>>>> +       u64 core_ext_ioisr[4];
>>>>>> +
>>>>>> +       /* Last CPU the vCPU state was loaded on */
>>>>>> +       int last_sched_cpu;
>>>>>> +       /* mp state */
>>>>>> +       struct kvm_mp_state mp_state;
>>>>>> +       /* cpucfg */
>>>>>> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
>>>>>> +};
>>>>>> +
>>>>>> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
>>>>>> +{
>>>>>> +       return csr->csrs[reg];
>>>>>> +}
>>>>>> +
>>>>>> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
>>>>>> +{
>>>>>> +       csr->csrs[reg] = val;
>>>>>> +}
>>>>>> +
>>>>>> +/* Helpers */
>>>>>> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
>>>>>> +{
>>>>>> +       return cpu_has_fpu;
>>>>>> +}
>>>>>> +
>>>>>> +void kvm_init_fault(void);
>>>>>> +
>>>>>> +/* Debug: dump vcpu state */
>>>>>> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>>>>> +
>>>>>> +/* MMU handling */
>>>>>> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
>>>>>> +void kvm_flush_tlb_all(void);
>>>>>> +
>>>>>> +#define KVM_ARCH_WANT_MMU_NOTIFIER
>>>>>> +int kvm_unmap_hva_range(struct kvm *kvm,
>>>>>> +                       unsigned long start, unsigned long end, bool blockable);
>>>>>> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
>>>>>> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
>>>>>> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
>>>>>> +
>>>>>> +static inline void update_pc(struct kvm_vcpu_arch *arch)
>>>>>> +{
>>>>>> +       arch->pc += 4;
>>>>>> +}
>>>>>> +
>>>>>> +/**
>>>>>> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
>>>>>> + * @vcpu:      Virtual CPU.
>>>>>> + *
>>>>>> + * Returns:    Whether the TLBL exception was likely due to an instruction
>>>>>> + *             fetch fault rather than a data load fault.
>>>>>> + */
>>>>>> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
>>>>>> +{
>>>>>> +       return arch->pc == arch->badv;
>>>>>> +}
>>>>>> +
>>>>>> +/* Misc */
>>>>>> +static inline void kvm_arch_hardware_unsetup(void) {}
>>>>>> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
>>>>>> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
>>>>>> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
>>>>>> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
>>>>>> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
>>>>>> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
>>>>>> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
>>>>>> +                                  struct kvm_memory_slot *slot) {}
>>>>>> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
>>>>>> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
>>>>>> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
>>>>>> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
>>>>>> +                                       const struct kvm_memory_slot *memslot);
>>>>>> +void kvm_init_vmcs(struct kvm *kvm);
>>>>>> +void kvm_vector_entry(void);
>>>>>> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>>>> +extern const unsigned long kvm_vector_size;
>>>>>> +extern const unsigned long kvm_enter_guest_size;
>>>>>> +extern unsigned long vpid_mask;
>>>>>> +extern struct kvm_world_switch *kvm_loongarch_ops;
>>>>>> +
>>>>>> +#define SW_GCSR                (1 << 0)
>>>>>> +#define HW_GCSR                (1 << 1)
>>>>>> +#define INVALID_GCSR   (1 << 2)
>>>>>> +int get_gcsr_flag(int csr);
>>>>>> +extern void set_hw_gcsr(int csr_id, unsigned long val);
>>>>>> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
>>>>>> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
>>>>>> new file mode 100644
>>>>>> index 0000000000..2fe1d4bdff
>>>>>> --- /dev/null
>>>>>> +++ b/arch/loongarch/include/asm/kvm_types.h
>>>>>> @@ -0,0 +1,11 @@
>>>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>>>> +/*
>>>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>>>> + */
>>>>>> +
>>>>>> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
>>>>>> +#define _ASM_LOONGARCH_KVM_TYPES_H
>>>>>> +
>>>>>> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
>>>>>> +
>>>>>> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
>>>>>> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
>>>>>> new file mode 100644
>>>>>> index 0000000000..fafda487d6
>>>>>> --- /dev/null
>>>>>> +++ b/arch/loongarch/include/uapi/asm/kvm.h
>>>>>> @@ -0,0 +1,108 @@
>>>>>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
>>>>>> +/*
>>>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>>>> + */
>>>>>> +
>>>>>> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
>>>>>> +#define __UAPI_ASM_LOONGARCH_KVM_H
>>>>>> +
>>>>>> +#include <linux/types.h>
>>>>>> +
>>>>>> +/*
>>>>>> + * KVM Loongarch specific structures and definitions.
>>>>>> + *
>>>>>> + * Some parts derived from the x86 version of this file.
>>>>>> + */
>>>>>> +
>>>>>> +#define __KVM_HAVE_READONLY_MEM
>>>>>> +
>>>>>> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
>>>>>> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
>>>>>> +
>>>>>> +/*
>>>>>> + * for KVM_GET_REGS and KVM_SET_REGS
>>>>>> + */
>>>>>> +struct kvm_regs {
>>>>>> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
>>>>>> +       __u64 gpr[32];
>>>>>> +       __u64 pc;
>>>>>> +};
>>>>>> +
>>>>>> +/*
>>>>>> + * for KVM_GET_FPU and KVM_SET_FPU
>>>>>> + */
>>>>>> +struct kvm_fpu {
>>>>>> +       __u32 fcsr;
>>>>>> +       __u64 fcc;    /* 8x8 */
>>>>>> +       struct kvm_fpureg {
>>>>>> +               __u64 val64[4];
>>>>>> +       } fpr[32];
>>>>>> +};
>>>>>> +
>>>>>> +/*
>>>>>> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
>>>>>> + * registers.  The id field is broken down as follows:
>>>>>> + *
>>>>>> + *  bits[63..52] - As per linux/kvm.h
>>>>>> + *  bits[51..32] - Must be zero.
>>>>>> + *  bits[31..16] - Register set.
>>>>>> + *
>>>>>> + * Register set = 0: GP registers from kvm_regs (see definitions below).
>>>>>> + *
>>>>>> + * Register set = 1: CSR registers.
>>>>>> + *
>>>>>> + * Register set = 2: KVM specific registers (see definitions below).
>>>>>> + *
>>>>>> + * Register set = 3: FPU / SIMD registers (see definitions below).
>>>>>> + *
>>>>>> + * Other sets registers may be added in the future.  Each set would
>>>>>> + * have its own identifier in bits[31..16].
>>>>>> + */
>>>>>> +
>>>>>> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
>>>>>> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
>>>>>> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
>>>>>> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
>>>>> How about rename to KVM_REG_LOONGARCH_FPSIMD?
>>>>>
>>>>> Huacai
>>>> It will broke uapi used by user space software, it may cause
>>>> incompatible issue, so I think it is better to keep the original name.
>>> In your comments above it is not only FPU but FPU&SIMD, and this code
>>> hasn't been upstream yet, how to break UAPI?
>> We want to apply this patch series to our other project when it is
>> upstream, so we need update the previous codes and it may break the
>> uapi. What do you think of it?
> Generally, the kernel is the first one to be upstream, so kernel can
> do anything reasonable, other projects should align to kernel when
> they want to get upstream.
>
> Huacai
Thanks for your advice, I understand your meaning above and I think it 
is better to rename to KVM_REG_LOONGARCH_FPSIMD.

Thanks
Tianrui Zhao
>
>> Thanks
>> Tianrui Zhao
>>> Huacai
>>>
>>>> Thanks
>>>> Tianrui Zhao
>>>>>> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
>>>>>> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
>>>>>> +#define KVM_CSR_IDX_MASK               0x7fff
>>>>>> +#define KVM_CPUCFG_IDX_MASK            0x7fff
>>>>>> +
>>>>>> +/*
>>>>>> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
>>>>>> + */
>>>>>> +
>>>>>> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
>>>>>> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
>>>>>> +
>>>>>> +#define LOONGARCH_REG_SHIFT            3
>>>>>> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
>>>>>> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
>>>>>> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
>>>>>> +
>>>>>> +struct kvm_debug_exit_arch {
>>>>>> +};
>>>>>> +
>>>>>> +/* for KVM_SET_GUEST_DEBUG */
>>>>>> +struct kvm_guest_debug_arch {
>>>>>> +};
>>>>>> +
>>>>>> +/* definition of registers in kvm_run */
>>>>>> +struct kvm_sync_regs {
>>>>>> +};
>>>>>> +
>>>>>> +/* dummy definition */
>>>>>> +struct kvm_sregs {
>>>>>> +};
>>>>>> +
>>>>>> +struct kvm_iocsr_entry {
>>>>>> +       __u32 addr;
>>>>>> +       __u32 pad;
>>>>>> +       __u64 data;
>>>>>> +};
>>>>>> +
>>>>>> +#define KVM_NR_IRQCHIPS                1
>>>>>> +#define KVM_IRQCHIP_NUM_PINS   64
>>>>>> +#define KVM_MAX_CORES          256
>>>>>> +
>>>>>> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
>>>>>> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
>>>>>> index 13065dd961..863f84619a 100644
>>>>>> --- a/include/uapi/linux/kvm.h
>>>>>> +++ b/include/uapi/linux/kvm.h
>>>>>> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
>>>>>>     #define KVM_EXIT_RISCV_SBI        35
>>>>>>     #define KVM_EXIT_RISCV_CSR        36
>>>>>>     #define KVM_EXIT_NOTIFY           37
>>>>>> +#define KVM_EXIT_LOONGARCH_IOCSR  38
>>>>>>
>>>>>>     /* For KVM_EXIT_INTERNAL_ERROR */
>>>>>>     /* Emulate instruction failed. */
>>>>>> @@ -336,6 +337,13 @@ struct kvm_run {
>>>>>>                            __u32 len;
>>>>>>                            __u8  is_write;
>>>>>>                    } mmio;
>>>>>> +               /* KVM_EXIT_LOONGARCH_IOCSR */
>>>>>> +               struct {
>>>>>> +                       __u64 phys_addr;
>>>>>> +                       __u8  data[8];
>>>>>> +                       __u32 len;
>>>>>> +                       __u8  is_write;
>>>>>> +               } iocsr_io;
>>>>>>                    /* KVM_EXIT_HYPERCALL */
>>>>>>                    struct {
>>>>>>                            __u64 nr;
>>>>>> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
>>>>>>     #define KVM_REG_ARM64          0x6000000000000000ULL
>>>>>>     #define KVM_REG_MIPS           0x7000000000000000ULL
>>>>>>     #define KVM_REG_RISCV          0x8000000000000000ULL
>>>>>> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
>>>>>>
>>>>>>     #define KVM_REG_SIZE_SHIFT     52
>>>>>>     #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
>>>>>> --
>>>>>> 2.39.1
>>>>>>
  
zhaotianrui Sept. 19, 2023, 2:58 a.m. UTC | #10
在 2023/9/18 下午2:37, Huacai Chen 写道:
> On Mon, Sep 18, 2023 at 2:32 PM zhaotianrui <zhaotianrui@loongson.cn> wrote:
>>
>> 在 2023/9/17 下午10:22, Huacai Chen 写道:
>>> Hi, Tianrui,
>>>
>>> On Fri, Sep 15, 2023 at 9:50 AM Tianrui Zhao <zhaotianrui@loongson.cn> wrote:
>>>> Add LoongArch KVM related header files, including kvm.h,
>>>> kvm_host.h, kvm_types.h. All of those are about LoongArch
>>>> virtualization features and kvm interfaces.
>>>>
>>>> Reviewed-by: Bibo Mao <maobibo@loongson.cn>
>>>> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
>>>> ---
>>>>    arch/loongarch/include/asm/kvm_host.h  | 245 +++++++++++++++++++++++++
>>>>    arch/loongarch/include/asm/kvm_types.h |  11 ++
>>>>    arch/loongarch/include/uapi/asm/kvm.h  | 108 +++++++++++
>>>>    include/uapi/linux/kvm.h               |   9 +
>>>>    4 files changed, 373 insertions(+)
>>>>    create mode 100644 arch/loongarch/include/asm/kvm_host.h
>>>>    create mode 100644 arch/loongarch/include/asm/kvm_types.h
>>>>    create mode 100644 arch/loongarch/include/uapi/asm/kvm.h
>>>>
>>>> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
>>>> new file mode 100644
>>>> index 0000000000..00e0c1876b
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_host.h
>>>> @@ -0,0 +1,245 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __ASM_LOONGARCH_KVM_HOST_H__
>>>> +#define __ASM_LOONGARCH_KVM_HOST_H__
>>>> +
>>>> +#include <linux/cpumask.h>
>>>> +#include <linux/mutex.h>
>>>> +#include <linux/hrtimer.h>
>>>> +#include <linux/interrupt.h>
>>>> +#include <linux/types.h>
>>>> +#include <linux/kvm.h>
>>>> +#include <linux/kvm_types.h>
>>>> +#include <linux/threads.h>
>>>> +#include <linux/spinlock.h>
>>>> +
>>>> +#include <asm/inst.h>
>>>> +#include <asm/kvm_mmu.h>
>>>> +#include <asm/loongarch.h>
>>>> +
>>>> +/* Loongarch KVM register ids */
>>>> +#define KVM_GET_IOC_CSRIDX(id)         ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>>>> +#define KVM_GET_IOC_CPUCFG_IDX(id)     ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
>>>> +
>>>> +#define KVM_MAX_VCPUS                  256
>>>> +#define KVM_MAX_CPUCFG_REGS            21
>>>> +/* memory slots that does not exposed to userspace */
>>>> +#define KVM_PRIVATE_MEM_SLOTS          0
>>>> +
>>>> +#define KVM_HALT_POLL_NS_DEFAULT       500000
>>>> +
>>>> +struct kvm_vm_stat {
>>>> +       struct kvm_vm_stat_generic generic;
>>>> +       u64 pages;
>>>> +       u64 hugepages;
>>>> +};
>>>> +
>>>> +struct kvm_vcpu_stat {
>>>> +       struct kvm_vcpu_stat_generic generic;
>>>> +       u64 idle_exits;
>>>> +       u64 signal_exits;
>>>> +       u64 int_exits;
>>>> +       u64 cpucfg_exits;
>>>> +};
>>>> +
>>>> +struct kvm_arch_memory_slot {
>>>> +};
>>>> +
>>>> +struct kvm_context {
>>>> +       unsigned long vpid_cache;
>>>> +       struct kvm_vcpu *last_vcpu;
>>>> +};
>>>> +
>>>> +struct kvm_world_switch {
>>>> +       int (*guest_eentry)(void);
>>>> +       int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +       unsigned long page_order;
>>>> +};
>>>> +
>>>> +#define MAX_PGTABLE_LEVELS     4
>>>> +struct kvm_arch {
>>>> +       /* Guest physical mm */
>>>> +       kvm_pte_t *pgd;
>>>> +       unsigned long gpa_size;
>>>> +       unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
>>>> +       unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
>>>> +       unsigned int  root_level;
>>>> +
>>>> +       s64 time_offset;
>>>> +       struct kvm_context __percpu *vmcs;
>>>> +};
>>>> +
>>>> +#define CSR_MAX_NUMS           0x800
>>>> +
>>>> +struct loongarch_csrs {
>>>> +       unsigned long csrs[CSR_MAX_NUMS];
>>>> +};
>>>> +
>>>> +/* Resume Flags */
>>>> +#define RESUME_HOST            0
>>>> +#define RESUME_GUEST           1
>>>> +
>>>> +enum emulation_result {
>>>> +       EMULATE_DONE,           /* no further processing */
>>>> +       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
>>>> +       EMULATE_FAIL,           /* can't emulate this instruction */
>>>> +       EMULATE_EXCEPT,         /* A guest exception has been generated */
>>>> +       EMULATE_DO_IOCSR,       /* handle IOCSR request */
>>>> +};
>>>> +
>>>> +#define KVM_LARCH_FPU          (0x1 << 0)
>>>> +#define KVM_LARCH_CSR          (0x1 << 1)
>>>> +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
>>>> +
>>>> +struct kvm_vcpu_arch {
>>>> +       /*
>>>> +        * Switch pointer-to-function type to unsigned long
>>>> +        * for loading the value into register directly.
>>>> +        */
>>>> +       unsigned long host_eentry;
>>>> +       unsigned long guest_eentry;
>>>> +
>>>> +       /* Pointers stored here for easy accessing from assembly code */
>>>> +       int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +
>>>> +       /* Host registers preserved across guest mode execution */
>>>> +       unsigned long host_sp;
>>>> +       unsigned long host_tp;
>>>> +       unsigned long host_pgd;
>>>> +
>>>> +       /* Host CSRs are used when handling exits from guest */
>>>> +       unsigned long badi;
>>>> +       unsigned long badv;
>>>> +       unsigned long host_ecfg;
>>>> +       unsigned long host_estat;
>>>> +       unsigned long host_percpu;
>>>> +
>>>> +       /* GPRs */
>>>> +       unsigned long gprs[32];
>>>> +       unsigned long pc;
>>>> +
>>>> +       /* Which auxiliary state is loaded (KVM_LARCH_*) */
>>>> +       unsigned int aux_inuse;
>>>> +       /* FPU state */
>>>> +       struct loongarch_fpu fpu FPU_ALIGN;
>>>> +
>>>> +       /* CSR state */
>>>> +       struct loongarch_csrs *csr;
>>>> +
>>>> +       /* GPR used as IO source/target */
>>>> +       u32 io_gpr;
>>>> +
>>>> +       struct hrtimer swtimer;
>>>> +       /* KVM register to control count timer */
>>>> +       u32 count_ctl;
>>>> +
>>>> +       /* Bitmask of intr that are pending */
>>>> +       unsigned long irq_pending;
>>>> +       /* Bitmask of pending intr to be cleared */
>>>> +       unsigned long irq_clear;
>>>> +
>>>> +       /* Bitmask of exceptions that are pending */
>>>> +       unsigned long exception_pending;
>>>> +       unsigned int  subcode;
>>>> +
>>>> +       /* Cache for pages needed inside spinlock regions */
>>>> +       struct kvm_mmu_memory_cache mmu_page_cache;
>>>> +
>>>> +       /* vcpu's vpid */
>>>> +       u64 vpid;
>>>> +
>>>> +       /* Frequency of stable timer in Hz */
>>>> +       u64 timer_mhz;
>>>> +       ktime_t expire;
>>>> +
>>>> +       u64 core_ext_ioisr[4];
>>>> +
>>>> +       /* Last CPU the vCPU state was loaded on */
>>>> +       int last_sched_cpu;
>>>> +       /* mp state */
>>>> +       struct kvm_mp_state mp_state;
>>>> +       /* cpucfg */
>>>> +       u32 cpucfg[KVM_MAX_CPUCFG_REGS];
>>>> +};
>>>> +
>>>> +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
>>>> +{
>>>> +       return csr->csrs[reg];
>>>> +}
>>>> +
>>>> +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
>>>> +{
>>>> +       csr->csrs[reg] = val;
>>>> +}
>>>> +
>>>> +/* Helpers */
>>>> +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
>>>> +{
>>>> +       return cpu_has_fpu;
>>>> +}
>>>> +
>>>> +void kvm_init_fault(void);
>>>> +
>>>> +/* Debug: dump vcpu state */
>>>> +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>>> +
>>>> +/* MMU handling */
>>>> +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
>>>> +void kvm_flush_tlb_all(void);
>>>> +
>>>> +#define KVM_ARCH_WANT_MMU_NOTIFIER
>>>> +int kvm_unmap_hva_range(struct kvm *kvm,
>>>> +                       unsigned long start, unsigned long end, bool blockable);
>>>> +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
>>>> +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
>>>> +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
>>>> +
>>>> +static inline void update_pc(struct kvm_vcpu_arch *arch)
>>>> +{
>>>> +       arch->pc += 4;
>>>> +}
>>>> +
>>>> +/**
>>>> + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
>>>> + * @vcpu:      Virtual CPU.
>>>> + *
>>>> + * Returns:    Whether the TLBL exception was likely due to an instruction
>>>> + *             fetch fault rather than a data load fault.
>>>> + */
>>>> +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
>>>> +{
>>>> +       return arch->pc == arch->badv;
>>>> +}
>>>> +
>>>> +/* Misc */
>>>> +static inline void kvm_arch_hardware_unsetup(void) {}
>>>> +static inline void kvm_arch_sync_events(struct kvm *kvm) {}
>>>> +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
>>>> +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
>>>> +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
>>>> +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
>>>> +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
>>>> +static inline void kvm_arch_free_memslot(struct kvm *kvm,
>>>> +                                  struct kvm_memory_slot *slot) {}
>>>> +void kvm_check_vpid(struct kvm_vcpu *vcpu);
>>>> +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
>>>> +int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
>>>> +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
>>>> +                                       const struct kvm_memory_slot *memslot);
>>>> +void kvm_init_vmcs(struct kvm *kvm);
>>>> +void kvm_vector_entry(void);
>>>> +int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +extern const unsigned long kvm_vector_size;
>>>> +extern const unsigned long kvm_enter_guest_size;
>>>> +extern unsigned long vpid_mask;
>>>> +extern struct kvm_world_switch *kvm_loongarch_ops;
>>>> +
>>>> +#define SW_GCSR                (1 << 0)
>>>> +#define HW_GCSR                (1 << 1)
>>>> +#define INVALID_GCSR   (1 << 2)
>>>> +int get_gcsr_flag(int csr);
>>>> +extern void set_hw_gcsr(int csr_id, unsigned long val);
>>>> +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
>>>> diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
>>>> new file mode 100644
>>>> index 0000000000..2fe1d4bdff
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_types.h
>>>> @@ -0,0 +1,11 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef _ASM_LOONGARCH_KVM_TYPES_H
>>>> +#define _ASM_LOONGARCH_KVM_TYPES_H
>>>> +
>>>> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE      40
>>>> +
>>>> +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
>>>> diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
>>>> new file mode 100644
>>>> index 0000000000..fafda487d6
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/uapi/asm/kvm.h
>>>> @@ -0,0 +1,108 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __UAPI_ASM_LOONGARCH_KVM_H
>>>> +#define __UAPI_ASM_LOONGARCH_KVM_H
>>>> +
>>>> +#include <linux/types.h>
>>>> +
>>>> +/*
>>>> + * KVM Loongarch specific structures and definitions.
>>>> + *
>>>> + * Some parts derived from the x86 version of this file.
>>>> + */
>>>> +
>>>> +#define __KVM_HAVE_READONLY_MEM
>>>> +
>>>> +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
>>>> +#define KVM_DIRTY_LOG_PAGE_OFFSET      64
>>>> +
>>>> +/*
>>>> + * for KVM_GET_REGS and KVM_SET_REGS
>>>> + */
>>>> +struct kvm_regs {
>>>> +       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
>>>> +       __u64 gpr[32];
>>>> +       __u64 pc;
>>>> +};
>>>> +
>>>> +/*
>>>> + * for KVM_GET_FPU and KVM_SET_FPU
>>>> + */
>>>> +struct kvm_fpu {
>>>> +       __u32 fcsr;
>>>> +       __u64 fcc;    /* 8x8 */
>>>> +       struct kvm_fpureg {
>>>> +               __u64 val64[4];
>>>> +       } fpr[32];
>>>> +};
>>>> +
>>>> +/*
>>>> + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
>>>> + * registers.  The id field is broken down as follows:
>>>> + *
>>>> + *  bits[63..52] - As per linux/kvm.h
>>>> + *  bits[51..32] - Must be zero.
>>>> + *  bits[31..16] - Register set.
>>>> + *
>>>> + * Register set = 0: GP registers from kvm_regs (see definitions below).
>>>> + *
>>>> + * Register set = 1: CSR registers.
>>>> + *
>>>> + * Register set = 2: KVM specific registers (see definitions below).
>>>> + *
>>>> + * Register set = 3: FPU / SIMD registers (see definitions below).
>>>> + *
>>>> + * Other sets registers may be added in the future.  Each set would
>>>> + * have its own identifier in bits[31..16].
>>>> + */
>>>> +
>>>> +#define KVM_REG_LOONGARCH_GPR          (KVM_REG_LOONGARCH | 0x00000ULL)
>>>> +#define KVM_REG_LOONGARCH_CSR          (KVM_REG_LOONGARCH | 0x10000ULL)
>>>> +#define KVM_REG_LOONGARCH_KVM          (KVM_REG_LOONGARCH | 0x20000ULL)
>>>> +#define KVM_REG_LOONGARCH_FPU          (KVM_REG_LOONGARCH | 0x30000ULL)
>>>> +#define KVM_REG_LOONGARCH_CPUCFG       (KVM_REG_LOONGARCH | 0x40000ULL)
>>>> +#define KVM_REG_LOONGARCH_MASK         (KVM_REG_LOONGARCH | 0x70000ULL)
>>>> +#define KVM_CSR_IDX_MASK               0x7fff
>>>> +#define KVM_CPUCFG_IDX_MASK            0x7fff
>>>> +
>>>> +/*
>>>> + * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
>>>> + */
>>>> +
>>>> +#define KVM_REG_LOONGARCH_COUNTER      (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
>>>> +#define KVM_REG_LOONGARCH_VCPU_RESET   (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
>>> Why begin with 3? 0, 1, 2 reserved for what?
>> They are keep consistent with our original codes, and 0,1,2 are not used
>> now.
> The same as KVM_REG_LOONGARCH_FPU, kernel is the first one, so I
> suggest beginning with 0 or 1, but don't begin with 3.
>
> Huacai
Thanks for your advice, it is better to change the beginning to 0,1.

Thanks
Tianrui Zhao
>
>> Thanks
>> Tianrui Zhao
>>> Huacai
>>>
>>>> +
>>>> +#define LOONGARCH_REG_SHIFT            3
>>>> +#define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
>>>> +#define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
>>>> +#define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
>>>> +
>>>> +struct kvm_debug_exit_arch {
>>>> +};
>>>> +
>>>> +/* for KVM_SET_GUEST_DEBUG */
>>>> +struct kvm_guest_debug_arch {
>>>> +};
>>>> +
>>>> +/* definition of registers in kvm_run */
>>>> +struct kvm_sync_regs {
>>>> +};
>>>> +
>>>> +/* dummy definition */
>>>> +struct kvm_sregs {
>>>> +};
>>>> +
>>>> +struct kvm_iocsr_entry {
>>>> +       __u32 addr;
>>>> +       __u32 pad;
>>>> +       __u64 data;
>>>> +};
>>>> +
>>>> +#define KVM_NR_IRQCHIPS                1
>>>> +#define KVM_IRQCHIP_NUM_PINS   64
>>>> +#define KVM_MAX_CORES          256
>>>> +
>>>> +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
>>>> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
>>>> index 13065dd961..863f84619a 100644
>>>> --- a/include/uapi/linux/kvm.h
>>>> +++ b/include/uapi/linux/kvm.h
>>>> @@ -264,6 +264,7 @@ struct kvm_xen_exit {
>>>>    #define KVM_EXIT_RISCV_SBI        35
>>>>    #define KVM_EXIT_RISCV_CSR        36
>>>>    #define KVM_EXIT_NOTIFY           37
>>>> +#define KVM_EXIT_LOONGARCH_IOCSR  38
>>>>
>>>>    /* For KVM_EXIT_INTERNAL_ERROR */
>>>>    /* Emulate instruction failed. */
>>>> @@ -336,6 +337,13 @@ struct kvm_run {
>>>>                           __u32 len;
>>>>                           __u8  is_write;
>>>>                   } mmio;
>>>> +               /* KVM_EXIT_LOONGARCH_IOCSR */
>>>> +               struct {
>>>> +                       __u64 phys_addr;
>>>> +                       __u8  data[8];
>>>> +                       __u32 len;
>>>> +                       __u8  is_write;
>>>> +               } iocsr_io;
>>>>                   /* KVM_EXIT_HYPERCALL */
>>>>                   struct {
>>>>                           __u64 nr;
>>>> @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb {
>>>>    #define KVM_REG_ARM64          0x6000000000000000ULL
>>>>    #define KVM_REG_MIPS           0x7000000000000000ULL
>>>>    #define KVM_REG_RISCV          0x8000000000000000ULL
>>>> +#define KVM_REG_LOONGARCH      0x9000000000000000ULL
>>>>
>>>>    #define KVM_REG_SIZE_SHIFT     52
>>>>    #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
>>>> --
>>>> 2.39.1
>>>>
  

Patch

diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
new file mode 100644
index 0000000000..00e0c1876b
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -0,0 +1,245 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_HOST_H__
+#define __ASM_LOONGARCH_KVM_HOST_H__
+
+#include <linux/cpumask.h>
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+#include <asm/inst.h>
+#include <asm/kvm_mmu.h>
+#include <asm/loongarch.h>
+
+/* Loongarch KVM register ids */
+#define KVM_GET_IOC_CSRIDX(id)		((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
+#define KVM_GET_IOC_CPUCFG_IDX(id)	((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
+
+#define KVM_MAX_VCPUS			256
+#define KVM_MAX_CPUCFG_REGS		21
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS		0
+
+#define KVM_HALT_POLL_NS_DEFAULT	500000
+
+struct kvm_vm_stat {
+	struct kvm_vm_stat_generic generic;
+	u64 pages;
+	u64 hugepages;
+};
+
+struct kvm_vcpu_stat {
+	struct kvm_vcpu_stat_generic generic;
+	u64 idle_exits;
+	u64 signal_exits;
+	u64 int_exits;
+	u64 cpucfg_exits;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_context {
+	unsigned long vpid_cache;
+	struct kvm_vcpu *last_vcpu;
+};
+
+struct kvm_world_switch {
+	int (*guest_eentry)(void);
+	int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+	unsigned long page_order;
+};
+
+#define MAX_PGTABLE_LEVELS	4
+struct kvm_arch {
+	/* Guest physical mm */
+	kvm_pte_t *pgd;
+	unsigned long gpa_size;
+	unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
+	unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
+	unsigned int  root_level;
+
+	s64 time_offset;
+	struct kvm_context __percpu *vmcs;
+};
+
+#define CSR_MAX_NUMS		0x800
+
+struct loongarch_csrs {
+	unsigned long csrs[CSR_MAX_NUMS];
+};
+
+/* Resume Flags */
+#define RESUME_HOST		0
+#define RESUME_GUEST		1
+
+enum emulation_result {
+	EMULATE_DONE,		/* no further processing */
+	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
+	EMULATE_FAIL,		/* can't emulate this instruction */
+	EMULATE_EXCEPT,		/* A guest exception has been generated */
+	EMULATE_DO_IOCSR,	/* handle IOCSR request */
+};
+
+#define KVM_LARCH_FPU		(0x1 << 0)
+#define KVM_LARCH_CSR		(0x1 << 1)
+#define KVM_LARCH_HWCSR_USABLE	(0x1 << 2)
+
+struct kvm_vcpu_arch {
+	/*
+	 * Switch pointer-to-function type to unsigned long
+	 * for loading the value into register directly.
+	 */
+	unsigned long host_eentry;
+	unsigned long guest_eentry;
+
+	/* Pointers stored here for easy accessing from assembly code */
+	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+	/* Host registers preserved across guest mode execution */
+	unsigned long host_sp;
+	unsigned long host_tp;
+	unsigned long host_pgd;
+
+	/* Host CSRs are used when handling exits from guest */
+	unsigned long badi;
+	unsigned long badv;
+	unsigned long host_ecfg;
+	unsigned long host_estat;
+	unsigned long host_percpu;
+
+	/* GPRs */
+	unsigned long gprs[32];
+	unsigned long pc;
+
+	/* Which auxiliary state is loaded (KVM_LARCH_*) */
+	unsigned int aux_inuse;
+	/* FPU state */
+	struct loongarch_fpu fpu FPU_ALIGN;
+
+	/* CSR state */
+	struct loongarch_csrs *csr;
+
+	/* GPR used as IO source/target */
+	u32 io_gpr;
+
+	struct hrtimer swtimer;
+	/* KVM register to control count timer */
+	u32 count_ctl;
+
+	/* Bitmask of intr that are pending */
+	unsigned long irq_pending;
+	/* Bitmask of pending intr to be cleared */
+	unsigned long irq_clear;
+
+	/* Bitmask of exceptions that are pending */
+	unsigned long exception_pending;
+	unsigned int  subcode;
+
+	/* Cache for pages needed inside spinlock regions */
+	struct kvm_mmu_memory_cache mmu_page_cache;
+
+	/* vcpu's vpid */
+	u64 vpid;
+
+	/* Frequency of stable timer in Hz */
+	u64 timer_mhz;
+	ktime_t expire;
+
+	u64 core_ext_ioisr[4];
+
+	/* Last CPU the vCPU state was loaded on */
+	int last_sched_cpu;
+	/* mp state */
+	struct kvm_mp_state mp_state;
+	/* cpucfg */
+	u32 cpucfg[KVM_MAX_CPUCFG_REGS];
+};
+
+static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
+{
+	return csr->csrs[reg];
+}
+
+static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
+{
+	csr->csrs[reg] = val;
+}
+
+/* Helpers */
+static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
+{
+	return cpu_has_fpu;
+}
+
+void kvm_init_fault(void);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* MMU handling */
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
+void kvm_flush_tlb_all(void);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva_range(struct kvm *kvm,
+			unsigned long start, unsigned long end, bool blockable);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+
+static inline void update_pc(struct kvm_vcpu_arch *arch)
+{
+	arch->pc += 4;
+}
+
+/**
+ * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
+ * @vcpu:	Virtual CPU.
+ *
+ * Returns:	Whether the TLBL exception was likely due to an instruction
+ *		fetch fault rather than a data load fault.
+ */
+static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
+{
+	return arch->pc == arch->badv;
+}
+
+/* Misc */
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm,
+				   struct kvm_memory_slot *slot) {}
+void kvm_check_vpid(struct kvm_vcpu *vcpu);
+enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
+int kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+					const struct kvm_memory_slot *memslot);
+void kvm_init_vmcs(struct kvm *kvm);
+void kvm_vector_entry(void);
+int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern const unsigned long kvm_vector_size;
+extern const unsigned long kvm_enter_guest_size;
+extern unsigned long vpid_mask;
+extern struct kvm_world_switch *kvm_loongarch_ops;
+
+#define SW_GCSR		(1 << 0)
+#define HW_GCSR		(1 << 1)
+#define INVALID_GCSR	(1 << 2)
+int get_gcsr_flag(int csr);
+extern void set_hw_gcsr(int csr_id, unsigned long val);
+#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
new file mode 100644
index 0000000000..2fe1d4bdff
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_types.h
@@ -0,0 +1,11 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_KVM_TYPES_H
+#define _ASM_LOONGARCH_KVM_TYPES_H
+
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE	40
+
+#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
new file mode 100644
index 0000000000..fafda487d6
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -0,0 +1,108 @@ 
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __UAPI_ASM_LOONGARCH_KVM_H
+#define __UAPI_ASM_LOONGARCH_KVM_H
+
+#include <linux/types.h>
+
+/*
+ * KVM Loongarch specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET	1
+#define KVM_DIRTY_LOG_PAGE_OFFSET	64
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ */
+struct kvm_regs {
+	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+	__u64 gpr[32];
+	__u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ */
+struct kvm_fpu {
+	__u32 fcsr;
+	__u64 fcc;    /* 8x8 */
+	struct kvm_fpureg {
+		__u64 val64[4];
+	} fpr[32];
+};
+
+/*
+ * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
+ * registers.  The id field is broken down as follows:
+ *
+ *  bits[63..52] - As per linux/kvm.h
+ *  bits[51..32] - Must be zero.
+ *  bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CSR registers.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / SIMD registers (see definitions below).
+ *
+ * Other sets registers may be added in the future.  Each set would
+ * have its own identifier in bits[31..16].
+ */
+
+#define KVM_REG_LOONGARCH_GPR		(KVM_REG_LOONGARCH | 0x00000ULL)
+#define KVM_REG_LOONGARCH_CSR		(KVM_REG_LOONGARCH | 0x10000ULL)
+#define KVM_REG_LOONGARCH_KVM		(KVM_REG_LOONGARCH | 0x20000ULL)
+#define KVM_REG_LOONGARCH_FPU		(KVM_REG_LOONGARCH | 0x30000ULL)
+#define KVM_REG_LOONGARCH_CPUCFG	(KVM_REG_LOONGARCH | 0x40000ULL)
+#define KVM_REG_LOONGARCH_MASK		(KVM_REG_LOONGARCH | 0x70000ULL)
+#define KVM_CSR_IDX_MASK		0x7fff
+#define KVM_CPUCFG_IDX_MASK		0x7fff
+
+/*
+ * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
+ */
+
+#define KVM_REG_LOONGARCH_COUNTER	(KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LOONGARCH_VCPU_RESET	(KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
+
+#define LOONGARCH_REG_SHIFT		3
+#define LOONGARCH_REG_64(TYPE, REG)	(TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
+#define KVM_IOC_CSRID(REG)		LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
+#define KVM_IOC_CPUCFG(REG)		LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_iocsr_entry {
+	__u32 addr;
+	__u32 pad;
+	__u64 data;
+};
+
+#define KVM_NR_IRQCHIPS		1
+#define KVM_IRQCHIP_NUM_PINS	64
+#define KVM_MAX_CORES		256
+
+#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 13065dd961..863f84619a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -264,6 +264,7 @@  struct kvm_xen_exit {
 #define KVM_EXIT_RISCV_SBI        35
 #define KVM_EXIT_RISCV_CSR        36
 #define KVM_EXIT_NOTIFY           37
+#define KVM_EXIT_LOONGARCH_IOCSR  38
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -336,6 +337,13 @@  struct kvm_run {
 			__u32 len;
 			__u8  is_write;
 		} mmio;
+		/* KVM_EXIT_LOONGARCH_IOCSR */
+		struct {
+			__u64 phys_addr;
+			__u8  data[8];
+			__u32 len;
+			__u8  is_write;
+		} iocsr_io;
 		/* KVM_EXIT_HYPERCALL */
 		struct {
 			__u64 nr;
@@ -1362,6 +1370,7 @@  struct kvm_dirty_tlb {
 #define KVM_REG_ARM64		0x6000000000000000ULL
 #define KVM_REG_MIPS		0x7000000000000000ULL
 #define KVM_REG_RISCV		0x8000000000000000ULL
+#define KVM_REG_LOONGARCH	0x9000000000000000ULL
 
 #define KVM_REG_SIZE_SHIFT	52
 #define KVM_REG_SIZE_MASK	0x00f0000000000000ULL