[v12,24/31] LoongArch: KVM: Implement handle mmio exception
Commit Message
Implement handle mmio exception, setting the mmio info into vcpu_run and
return to user space to handle it.
Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
---
arch/loongarch/kvm/exit.c | 308 ++++++++++++++++++++++++++++++++++++++
1 file changed, 308 insertions(+)
Comments
Reviewed-by: Bibo, Mao <maobibo@loongson.cn>
在 2023/5/30 09:52, Tianrui Zhao 写道:
> Implement handle mmio exception, setting the mmio info into vcpu_run and
> return to user space to handle it.
>
> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> ---
> arch/loongarch/kvm/exit.c | 308 ++++++++++++++++++++++++++++++++++++++
> 1 file changed, 308 insertions(+)
>
> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
> index 26283a9d3c6d..1237ceb06d0c 100644
> --- a/arch/loongarch/kvm/exit.c
> +++ b/arch/loongarch/kvm/exit.c
> @@ -209,6 +209,265 @@ int _kvm_emu_idle(struct kvm_vcpu *vcpu)
> return EMULATE_DONE;
> }
>
> +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
> +{
> + struct kvm_run *run = vcpu->run;
> + unsigned int rd, op8, opcode;
> + unsigned long rd_val = 0;
> + void *data = run->mmio.data;
> + unsigned long curr_pc;
> + int ret;
> +
> + /*
> + * Update PC and hold onto current PC in case there is
> + * an error and we want to rollback the PC
> + */
> + curr_pc = vcpu->arch.pc;
> + update_pc(&vcpu->arch);
> +
> + op8 = (inst.word >> 24) & 0xff;
> + run->mmio.phys_addr = vcpu->arch.badv;
> + ret = EMULATE_DO_MMIO;
> + if (op8 < 0x28) {
> + /* stptrw/d process */
> + rd = inst.reg2i14_format.rd;
> + opcode = inst.reg2i14_format.opcode;
> +
> + switch (opcode) {
> + case stptrd_op:
> + run->mmio.len = 8;
> + *(unsigned long *)data = vcpu->arch.gprs[rd];
> + break;
> + case stptrw_op:
> + run->mmio.len = 4;
> + *(unsigned int *)data = vcpu->arch.gprs[rd];
> + break;
> + default:
> + ret = EMULATE_FAIL;
> + break;
> + }
> + } else if (op8 < 0x30) {
> + /* st.b/h/w/d process */
> + rd = inst.reg2i12_format.rd;
> + opcode = inst.reg2i12_format.opcode;
> + rd_val = vcpu->arch.gprs[rd];
> +
> + switch (opcode) {
> + case std_op:
> + run->mmio.len = 8;
> + *(unsigned long *)data = rd_val;
> + break;
> + case stw_op:
> + run->mmio.len = 4;
> + *(unsigned int *)data = rd_val;
> + break;
> + case sth_op:
> + run->mmio.len = 2;
> + *(unsigned short *)data = rd_val;
> + break;
> + case stb_op:
> + run->mmio.len = 1;
> + *(unsigned char *)data = rd_val;
> + break;
> + default:
> + ret = EMULATE_FAIL;
> + break;
> + }
> + } else if (op8 == 0x38) {
> + /* stxb/h/w/d process */
> + rd = inst.reg3_format.rd;
> + opcode = inst.reg3_format.opcode;
> +
> + switch (opcode) {
> + case stxb_op:
> + run->mmio.len = 1;
> + *(unsigned char *)data = vcpu->arch.gprs[rd];
> + break;
> + case stxh_op:
> + run->mmio.len = 2;
> + *(unsigned short *)data = vcpu->arch.gprs[rd];
> + break;
> + case stxw_op:
> + run->mmio.len = 4;
> + *(unsigned int *)data = vcpu->arch.gprs[rd];
> + break;
> + case stxd_op:
> + run->mmio.len = 8;
> + *(unsigned long *)data = vcpu->arch.gprs[rd];
> + break;
> + default:
> + ret = EMULATE_FAIL;
> + break;
> + }
> + } else
> + ret = EMULATE_FAIL;
> +
> + if (ret == EMULATE_DO_MMIO) {
> + run->mmio.is_write = 1;
> + vcpu->mmio_needed = 1;
> + vcpu->mmio_is_write = 1;
> + } else {
> + vcpu->arch.pc = curr_pc;
> + kvm_err("Write not supporded inst=0x%08x @%lx BadVaddr:%#lx\n",
> + inst.word, vcpu->arch.pc, vcpu->arch.badv);
> + kvm_arch_vcpu_dump_regs(vcpu);
> + /* Rollback PC if emulation was unsuccessful */
> + }
> +
> + return ret;
> +}
> +
> +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
> +{
> + unsigned int op8, opcode, rd;
> + struct kvm_run *run = vcpu->run;
> + int ret;
> +
> + run->mmio.phys_addr = vcpu->arch.badv;
> + vcpu->mmio_needed = 2; /* signed */
> + op8 = (inst.word >> 24) & 0xff;
> + ret = EMULATE_DO_MMIO;
> +
> + if (op8 < 0x28) {
> + /* ldptr.w/d process */
> + rd = inst.reg2i14_format.rd;
> + opcode = inst.reg2i14_format.opcode;
> +
> + switch (opcode) {
> + case ldptrd_op:
> + run->mmio.len = 8;
> + break;
> + case ldptrw_op:
> + run->mmio.len = 4;
> + break;
> + default:
> + break;
> + }
> + } else if (op8 < 0x2f) {
> + /* ld.b/h/w/d, ld.bu/hu/wu process */
> + rd = inst.reg2i12_format.rd;
> + opcode = inst.reg2i12_format.opcode;
> +
> + switch (opcode) {
> + case ldd_op:
> + run->mmio.len = 8;
> + break;
> + case ldwu_op:
> + vcpu->mmio_needed = 1; /* unsigned */
> + run->mmio.len = 4;
> + break;
> + case ldw_op:
> + run->mmio.len = 4;
> + break;
> + case ldhu_op:
> + vcpu->mmio_needed = 1; /* unsigned */
> + run->mmio.len = 2;
> + break;
> + case ldh_op:
> + run->mmio.len = 2;
> + break;
> + case ldbu_op:
> + vcpu->mmio_needed = 1; /* unsigned */
> + run->mmio.len = 1;
> + break;
> + case ldb_op:
> + run->mmio.len = 1;
> + break;
> + default:
> + ret = EMULATE_FAIL;
> + break;
> + }
> + } else if (op8 == 0x38) {
> + /* ldxb/h/w/d, ldxb/h/wu, ldgtb/h/w/d, ldleb/h/w/d process */
> + rd = inst.reg3_format.rd;
> + opcode = inst.reg3_format.opcode;
> +
> + switch (opcode) {
> + case ldxb_op:
> + run->mmio.len = 1;
> + break;
> + case ldxbu_op:
> + run->mmio.len = 1;
> + vcpu->mmio_needed = 1; /* unsigned */
> + break;
> + case ldxh_op:
> + run->mmio.len = 2;
> + break;
> + case ldxhu_op:
> + run->mmio.len = 2;
> + vcpu->mmio_needed = 1; /* unsigned */
> + break;
> + case ldxw_op:
> + run->mmio.len = 4;
> + break;
> + case ldxwu_op:
> + run->mmio.len = 4;
> + vcpu->mmio_needed = 1; /* unsigned */
> + break;
> + case ldxd_op:
> + run->mmio.len = 8;
> + break;
> + default:
> + ret = EMULATE_FAIL;
> + break;
> + }
> + } else
> + ret = EMULATE_FAIL;
> +
> + if (ret == EMULATE_DO_MMIO) {
> + /* Set for _kvm_complete_mmio_read use */
> + vcpu->arch.io_gpr = rd;
> + run->mmio.is_write = 0;
> + vcpu->mmio_is_write = 0;
> + } else {
> + kvm_err("Load not supporded inst=0x%08x @%lx BadVaddr:%#lx\n",
> + inst.word, vcpu->arch.pc, vcpu->arch.badv);
> + kvm_arch_vcpu_dump_regs(vcpu);
> + vcpu->mmio_needed = 0;
> + }
> + return ret;
> +}
> +
> +int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +{
> + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
> + enum emulation_result er = EMULATE_DONE;
> +
> + /* update with new PC */
> + update_pc(&vcpu->arch);
> + switch (run->mmio.len) {
> + case 8:
> + *gpr = *(s64 *)run->mmio.data;
> + break;
> + case 4:
> + if (vcpu->mmio_needed == 2)
> + *gpr = *(int *)run->mmio.data;
> + else
> + *gpr = *(unsigned int *)run->mmio.data;
> + break;
> + case 2:
> + if (vcpu->mmio_needed == 2)
> + *gpr = *(short *) run->mmio.data;
> + else
> + *gpr = *(unsigned short *)run->mmio.data;
> +
> + break;
> + case 1:
> + if (vcpu->mmio_needed == 2)
> + *gpr = *(char *) run->mmio.data;
> + else
> + *gpr = *(unsigned char *) run->mmio.data;
> + break;
> + default:
> + kvm_err("Bad MMIO length: %d,addr is 0x%lx",
> + run->mmio.len, vcpu->arch.badv);
> + er = EMULATE_FAIL;
> + break;
> + }
> +
> + return er;
> +}
> +
> static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
> {
> enum emulation_result er = EMULATE_DONE;
> @@ -323,3 +582,52 @@ static int _kvm_handle_gspr(struct kvm_vcpu *vcpu)
> }
> return ret;
> }
> +
> +static int _kvm_handle_mmu_fault(struct kvm_vcpu *vcpu, bool write)
> +{
> + struct kvm_run *run = vcpu->run;
> + unsigned long badv = vcpu->arch.badv;
> + larch_inst inst;
> + enum emulation_result er = EMULATE_DONE;
> + int ret;
> +
> + ret = kvm_handle_mm_fault(vcpu, badv, write);
> + if (ret) {
> + /* Treat as MMIO */
> + inst.word = vcpu->arch.badi;
> + if (write) {
> + er = _kvm_emu_mmio_write(vcpu, inst);
> + } else {
> + /* A code fetch fault doesn't count as an MMIO */
> + if (kvm_is_ifetch_fault(&vcpu->arch)) {
> + kvm_err("%s ifetch error addr:%lx\n", __func__, badv);
> + run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
> + return RESUME_HOST;
> + }
> +
> + er = _kvm_emu_mmio_read(vcpu, inst);
> + }
> + }
> +
> + if (er == EMULATE_DONE) {
> + ret = RESUME_GUEST;
> + } else if (er == EMULATE_DO_MMIO) {
> + run->exit_reason = KVM_EXIT_MMIO;
> + ret = RESUME_HOST;
> + } else {
> + run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
> + ret = RESUME_HOST;
> + }
> +
> + return ret;
> +}
> +
> +static int _kvm_handle_write_fault(struct kvm_vcpu *vcpu)
> +{
> + return _kvm_handle_mmu_fault(vcpu, true);
> +}
> +
> +static int _kvm_handle_read_fault(struct kvm_vcpu *vcpu)
> +{
> + return _kvm_handle_mmu_fault(vcpu, false);
> +}
@@ -209,6 +209,265 @@ int _kvm_emu_idle(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+ struct kvm_run *run = vcpu->run;
+ unsigned int rd, op8, opcode;
+ unsigned long rd_val = 0;
+ void *data = run->mmio.data;
+ unsigned long curr_pc;
+ int ret;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ update_pc(&vcpu->arch);
+
+ op8 = (inst.word >> 24) & 0xff;
+ run->mmio.phys_addr = vcpu->arch.badv;
+ ret = EMULATE_DO_MMIO;
+ if (op8 < 0x28) {
+ /* stptrw/d process */
+ rd = inst.reg2i14_format.rd;
+ opcode = inst.reg2i14_format.opcode;
+
+ switch (opcode) {
+ case stptrd_op:
+ run->mmio.len = 8;
+ *(unsigned long *)data = vcpu->arch.gprs[rd];
+ break;
+ case stptrw_op:
+ run->mmio.len = 4;
+ *(unsigned int *)data = vcpu->arch.gprs[rd];
+ break;
+ default:
+ ret = EMULATE_FAIL;
+ break;
+ }
+ } else if (op8 < 0x30) {
+ /* st.b/h/w/d process */
+ rd = inst.reg2i12_format.rd;
+ opcode = inst.reg2i12_format.opcode;
+ rd_val = vcpu->arch.gprs[rd];
+
+ switch (opcode) {
+ case std_op:
+ run->mmio.len = 8;
+ *(unsigned long *)data = rd_val;
+ break;
+ case stw_op:
+ run->mmio.len = 4;
+ *(unsigned int *)data = rd_val;
+ break;
+ case sth_op:
+ run->mmio.len = 2;
+ *(unsigned short *)data = rd_val;
+ break;
+ case stb_op:
+ run->mmio.len = 1;
+ *(unsigned char *)data = rd_val;
+ break;
+ default:
+ ret = EMULATE_FAIL;
+ break;
+ }
+ } else if (op8 == 0x38) {
+ /* stxb/h/w/d process */
+ rd = inst.reg3_format.rd;
+ opcode = inst.reg3_format.opcode;
+
+ switch (opcode) {
+ case stxb_op:
+ run->mmio.len = 1;
+ *(unsigned char *)data = vcpu->arch.gprs[rd];
+ break;
+ case stxh_op:
+ run->mmio.len = 2;
+ *(unsigned short *)data = vcpu->arch.gprs[rd];
+ break;
+ case stxw_op:
+ run->mmio.len = 4;
+ *(unsigned int *)data = vcpu->arch.gprs[rd];
+ break;
+ case stxd_op:
+ run->mmio.len = 8;
+ *(unsigned long *)data = vcpu->arch.gprs[rd];
+ break;
+ default:
+ ret = EMULATE_FAIL;
+ break;
+ }
+ } else
+ ret = EMULATE_FAIL;
+
+ if (ret == EMULATE_DO_MMIO) {
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ } else {
+ vcpu->arch.pc = curr_pc;
+ kvm_err("Write not supporded inst=0x%08x @%lx BadVaddr:%#lx\n",
+ inst.word, vcpu->arch.pc, vcpu->arch.badv);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ /* Rollback PC if emulation was unsuccessful */
+ }
+
+ return ret;
+}
+
+int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+ unsigned int op8, opcode, rd;
+ struct kvm_run *run = vcpu->run;
+ int ret;
+
+ run->mmio.phys_addr = vcpu->arch.badv;
+ vcpu->mmio_needed = 2; /* signed */
+ op8 = (inst.word >> 24) & 0xff;
+ ret = EMULATE_DO_MMIO;
+
+ if (op8 < 0x28) {
+ /* ldptr.w/d process */
+ rd = inst.reg2i14_format.rd;
+ opcode = inst.reg2i14_format.opcode;
+
+ switch (opcode) {
+ case ldptrd_op:
+ run->mmio.len = 8;
+ break;
+ case ldptrw_op:
+ run->mmio.len = 4;
+ break;
+ default:
+ break;
+ }
+ } else if (op8 < 0x2f) {
+ /* ld.b/h/w/d, ld.bu/hu/wu process */
+ rd = inst.reg2i12_format.rd;
+ opcode = inst.reg2i12_format.opcode;
+
+ switch (opcode) {
+ case ldd_op:
+ run->mmio.len = 8;
+ break;
+ case ldwu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ run->mmio.len = 4;
+ break;
+ case ldw_op:
+ run->mmio.len = 4;
+ break;
+ case ldhu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ run->mmio.len = 2;
+ break;
+ case ldh_op:
+ run->mmio.len = 2;
+ break;
+ case ldbu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ run->mmio.len = 1;
+ break;
+ case ldb_op:
+ run->mmio.len = 1;
+ break;
+ default:
+ ret = EMULATE_FAIL;
+ break;
+ }
+ } else if (op8 == 0x38) {
+ /* ldxb/h/w/d, ldxb/h/wu, ldgtb/h/w/d, ldleb/h/w/d process */
+ rd = inst.reg3_format.rd;
+ opcode = inst.reg3_format.opcode;
+
+ switch (opcode) {
+ case ldxb_op:
+ run->mmio.len = 1;
+ break;
+ case ldxbu_op:
+ run->mmio.len = 1;
+ vcpu->mmio_needed = 1; /* unsigned */
+ break;
+ case ldxh_op:
+ run->mmio.len = 2;
+ break;
+ case ldxhu_op:
+ run->mmio.len = 2;
+ vcpu->mmio_needed = 1; /* unsigned */
+ break;
+ case ldxw_op:
+ run->mmio.len = 4;
+ break;
+ case ldxwu_op:
+ run->mmio.len = 4;
+ vcpu->mmio_needed = 1; /* unsigned */
+ break;
+ case ldxd_op:
+ run->mmio.len = 8;
+ break;
+ default:
+ ret = EMULATE_FAIL;
+ break;
+ }
+ } else
+ ret = EMULATE_FAIL;
+
+ if (ret == EMULATE_DO_MMIO) {
+ /* Set for _kvm_complete_mmio_read use */
+ vcpu->arch.io_gpr = rd;
+ run->mmio.is_write = 0;
+ vcpu->mmio_is_write = 0;
+ } else {
+ kvm_err("Load not supporded inst=0x%08x @%lx BadVaddr:%#lx\n",
+ inst.word, vcpu->arch.pc, vcpu->arch.badv);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ vcpu->mmio_needed = 0;
+ }
+ return ret;
+}
+
+int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+ enum emulation_result er = EMULATE_DONE;
+
+ /* update with new PC */
+ update_pc(&vcpu->arch);
+ switch (run->mmio.len) {
+ case 8:
+ *gpr = *(s64 *)run->mmio.data;
+ break;
+ case 4:
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(int *)run->mmio.data;
+ else
+ *gpr = *(unsigned int *)run->mmio.data;
+ break;
+ case 2:
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(short *) run->mmio.data;
+ else
+ *gpr = *(unsigned short *)run->mmio.data;
+
+ break;
+ case 1:
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(char *) run->mmio.data;
+ else
+ *gpr = *(unsigned char *) run->mmio.data;
+ break;
+ default:
+ kvm_err("Bad MMIO length: %d,addr is 0x%lx",
+ run->mmio.len, vcpu->arch.badv);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+
static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
@@ -323,3 +582,52 @@ static int _kvm_handle_gspr(struct kvm_vcpu *vcpu)
}
return ret;
}
+
+static int _kvm_handle_mmu_fault(struct kvm_vcpu *vcpu, bool write)
+{
+ struct kvm_run *run = vcpu->run;
+ unsigned long badv = vcpu->arch.badv;
+ larch_inst inst;
+ enum emulation_result er = EMULATE_DONE;
+ int ret;
+
+ ret = kvm_handle_mm_fault(vcpu, badv, write);
+ if (ret) {
+ /* Treat as MMIO */
+ inst.word = vcpu->arch.badi;
+ if (write) {
+ er = _kvm_emu_mmio_write(vcpu, inst);
+ } else {
+ /* A code fetch fault doesn't count as an MMIO */
+ if (kvm_is_ifetch_fault(&vcpu->arch)) {
+ kvm_err("%s ifetch error addr:%lx\n", __func__, badv);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ er = _kvm_emu_mmio_read(vcpu, inst);
+ }
+ }
+
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else if (er == EMULATE_DO_MMIO) {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+
+ return ret;
+}
+
+static int _kvm_handle_write_fault(struct kvm_vcpu *vcpu)
+{
+ return _kvm_handle_mmu_fault(vcpu, true);
+}
+
+static int _kvm_handle_read_fault(struct kvm_vcpu *vcpu)
+{
+ return _kvm_handle_mmu_fault(vcpu, false);
+}