@@ -12003,7 +12003,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
- kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
+ kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -2101,14 +2101,10 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
- kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
- kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
- kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
- kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
+ kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm);
+ kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm);
+ kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm);
+ kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm);
}
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
@@ -2151,7 +2147,7 @@ void kvm_xen_init_vm(struct kvm *kvm)
{
mutex_init(&kvm->arch.xen.xen_lock);
idr_init(&kvm->arch.xen.evtchn_ports);
- kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
+ kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm);
}
void kvm_xen_destroy_vm(struct kvm *kvm)
@@ -1320,21 +1320,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
*
* @gpc: struct gfn_to_pfn_cache object.
* @kvm: pointer to kvm instance.
- * @vcpu: vCPU to be used for marking pages dirty and to be woken on
- * invalidation.
- * @usage: indicates if the resulting host physical PFN is used while
- * the @vcpu is IN_GUEST_MODE (in which case invalidation of
- * the cache from MMU notifiers---but not for KVM memslot
- * changes!---will also force @vcpu to exit the guest and
- * refresh the cache); and/or if the PFN used directly
- * by KVM (and thus needs a kernel virtual mapping).
*
* This sets up a gfn_to_pfn_cache by initializing locks and assigning the
* immutable attributes. Note, the cache must be zero-allocated (or zeroed by
* the caller before init).
*/
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
/**
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
@@ -49,12 +49,6 @@ typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
-enum pfn_cache_usage {
- KVM_GUEST_USES_PFN = BIT(0),
- KVM_HOST_USES_PFN = BIT(1),
- KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
-};
-
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
@@ -69,13 +63,11 @@ struct gfn_to_pfn_cache {
unsigned long uhva;
struct kvm_memory_slot *memslot;
struct kvm *kvm;
- struct kvm_vcpu *vcpu;
struct list_head list;
rwlock_t lock;
struct mutex refresh_lock;
void *khva;
kvm_pfn_t pfn;
- enum pfn_cache_usage usage;
bool active;
bool valid;
};
@@ -25,9 +25,7 @@
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
unsigned long end, bool may_block)
{
- DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
struct gfn_to_pfn_cache *gpc;
- bool evict_vcpus = false;
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
@@ -37,43 +35,10 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
gpc->uhva >= start && gpc->uhva < end) {
gpc->valid = false;
-
- /*
- * If a guest vCPU could be using the physical address,
- * it needs to be forced out of guest mode.
- */
- if (gpc->usage & KVM_GUEST_USES_PFN) {
- if (!evict_vcpus) {
- evict_vcpus = true;
- bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
- }
- __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
- }
}
write_unlock_irq(&gpc->lock);
}
spin_unlock(&kvm->gpc_lock);
-
- if (evict_vcpus) {
- /*
- * KVM needs to ensure the vCPU is fully out of guest context
- * before allowing the invalidation to continue.
- */
- unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
- bool called;
-
- /*
- * If the OOM reaper is active, then all vCPUs should have
- * been stopped already, so perform the request without
- * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
- */
- if (!may_block)
- req &= ~KVM_REQUEST_WAIT;
-
- called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
-
- WARN_ON_ONCE(called && !may_block);
- }
}
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
@@ -206,16 +171,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
* pfn. Note, kmap() and memremap() can both sleep, so this
* too must be done outside of gpc->lock!
*/
- if (gpc->usage & KVM_HOST_USES_PFN) {
- if (new_pfn == gpc->pfn)
- new_khva = old_khva;
- else
- new_khva = gpc_map(new_pfn);
-
- if (!new_khva) {
- kvm_release_pfn_clean(new_pfn);
- goto out_error;
- }
+ if (new_pfn == gpc->pfn)
+ new_khva = old_khva;
+ else
+ new_khva = gpc_map(new_pfn);
+
+ if (!new_khva) {
+ kvm_release_pfn_clean(new_pfn);
+ goto out_error;
}
write_lock_irq(&gpc->lock);
@@ -346,18 +309,12 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
}
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
{
- WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
- WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
-
rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock);
gpc->kvm = kvm;
- gpc->vcpu = vcpu;
- gpc->usage = usage;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
}