In preparation for refactoring how we store the actual FP state into a
single struct let's free up the name 'fp_state' which we currently use for
the variable where we track the ownership of the FP registers to something
a bit more specific to it's usage. While we're at it also move the enum
definition next to the rest of the FP state.
No functional changes.
Signed-off-by: Mark Brown <broonie@kernel.org>
---
arch/arm64/include/asm/kvm_emulate.h | 4 ++--
arch/arm64/include/asm/kvm_host.h | 14 +++++++-------
arch/arm64/kvm/arm.c | 2 +-
arch/arm64/kvm/fpsimd.c | 10 +++++-----
arch/arm64/kvm/hyp/include/hyp/switch.h | 6 +++---
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 4 ++--
arch/arm64/kvm/hyp/nvhe/switch.c | 2 +-
arch/arm64/kvm/hyp/vhe/switch.c | 2 +-
8 files changed, 22 insertions(+), 22 deletions(-)
@@ -593,7 +593,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
if (!vcpu_has_sve(vcpu) ||
- (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
+ (vcpu->arch.fp_owner != FP_STATE_GUEST_OWNED))
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
@@ -601,7 +601,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
val = CPTR_NVHE_EL2_RES1;
if (vcpu_has_sve(vcpu) &&
- (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
+ (vcpu->arch.fp_owner == FP_STATE_GUEST_OWNED))
val |= CPTR_EL2_TZ;
if (cpus_have_final_cap(ARM64_SME))
val &= ~CPTR_EL2_TSM;
@@ -544,6 +544,13 @@ struct kvm_vcpu_arch {
unsigned int sve_max_vl;
u64 svcr;
+ /* Ownership of the FP regs */
+ enum {
+ FP_STATE_FREE,
+ FP_STATE_HOST_OWNED,
+ FP_STATE_GUEST_OWNED,
+ } fp_owner;
+
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
@@ -558,13 +565,6 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* Ownership of the FP regs */
- enum {
- FP_STATE_FREE,
- FP_STATE_HOST_OWNED,
- FP_STATE_GUEST_OWNED,
- } fp_state;
-
/* Configuration flags, set once and for all before the vcpu can run */
u8 cflags;
@@ -377,7 +377,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
* Default value for the FP state, will be overloaded at load
* time if we support FP (pretty likely)
*/
- vcpu->arch.fp_state = FP_STATE_FREE;
+ vcpu->arch.fp_owner = FP_STATE_FREE;
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
@@ -86,7 +86,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
* FP_STATE_FREE if the flag set.
*/
- vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
+ vcpu->arch.fp_owner = FP_STATE_HOST_OWNED;
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
@@ -110,7 +110,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
* been saved, this is very unlikely to happen.
*/
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
- vcpu->arch.fp_state = FP_STATE_FREE;
+ vcpu->arch.fp_owner = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
}
}
@@ -126,7 +126,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
{
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
- vcpu->arch.fp_state = FP_STATE_FREE;
+ vcpu->arch.fp_owner = FP_STATE_FREE;
}
/*
@@ -142,7 +142,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+ if (vcpu->arch.fp_owner == FP_STATE_GUEST_OWNED) {
/*
* Currently we do not support SME guests so SVCR is
@@ -195,7 +195,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
isb();
}
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+ if (vcpu->arch.fp_owner == FP_STATE_GUEST_OWNED) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
@@ -42,7 +42,7 @@ extern struct kvm_exception_table_entry __stop___kvm_ex_table;
/* Check whether the FP regs are owned by the guest */
static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
+ return vcpu->arch.fp_owner == FP_STATE_GUEST_OWNED;
}
/* Save the 32-bit only FPSIMD system register state */
@@ -370,7 +370,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
/* Write out the host state if it's in the registers */
- if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
+ if (vcpu->arch.fp_owner == FP_STATE_HOST_OWNED)
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
/* Restore the guest state */
@@ -383,7 +383,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
- vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
+ vcpu->arch.fp_owner = FP_STATE_GUEST_OWNED;
return true;
}
@@ -39,7 +39,7 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
- hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
+ hyp_vcpu->vcpu.arch.fp_owner = host_vcpu->arch.fp_owner;
hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
@@ -64,7 +64,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
- host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
+ host_vcpu->arch.fp_owner = hyp_vcpu->vcpu.arch.fp_owner;
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
@@ -337,7 +337,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(host_ctxt);
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+ if (vcpu->arch.fp_owner == FP_STATE_GUEST_OWNED)
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);
@@ -258,7 +258,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_host_state_vhe(host_ctxt);
- if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+ if (vcpu->arch.fp_owner == FP_STATE_GUEST_OWNED)
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);