@@ -3033,10 +3033,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
WARN_ON_ONCE(gpc->len != offset + sizeof(*guest_hv_clock));
read_lock_irqsave(&gpc->lock, flags);
- while (!kvm_gpc_check(gpc, gpc->gpa)) {
+ while (!kvm_gpc_check(gpc)) {
read_unlock_irqrestore(&gpc->lock, flags);
- if (kvm_gpc_refresh(gpc, gpc->gpa))
+ if (kvm_gpc_refresh(gpc))
return;
read_lock_irqsave(&gpc->lock, flags);
@@ -211,14 +211,14 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
return;
read_lock_irqsave(&gpc->lock, flags);
- while (!kvm_gpc_check(gpc, gpc->gpa)) {
+ while (!kvm_gpc_check(gpc)) {
read_unlock_irqrestore(&gpc->lock, flags);
/* When invoked from kvm_sched_out() we cannot sleep */
if (state == RUNSTATE_runnable)
return;
- if (kvm_gpc_refresh(gpc, gpc->gpa))
+ if (kvm_gpc_refresh(gpc))
return;
read_lock_irqsave(&gpc->lock, flags);
@@ -344,10 +344,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
* little more honest about it.
*/
read_lock_irqsave(&gpc->lock, flags);
- while (!kvm_gpc_check(gpc, gpc->gpa)) {
+ while (!kvm_gpc_check(gpc)) {
read_unlock_irqrestore(&gpc->lock, flags);
- if (kvm_gpc_refresh(gpc, gpc->gpa))
+ if (kvm_gpc_refresh(gpc))
return;
read_lock_irqsave(&gpc->lock, flags);
@@ -407,7 +407,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
read_lock_irqsave(&gpc->lock, flags);
- while (!kvm_gpc_check(gpc, gpc->gpa)) {
+ while (!kvm_gpc_check(gpc)) {
read_unlock_irqrestore(&gpc->lock, flags);
/*
@@ -421,7 +421,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
if (in_atomic() || !task_is_running(current))
return 1;
- if (kvm_gpc_refresh(gpc, gpc->gpa)) {
+ if (kvm_gpc_refresh(gpc)) {
/*
* If this failed, userspace has screwed up the
* vcpu_info mapping. No interrupts for you.
@@ -947,7 +947,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
read_lock_irqsave(&gpc->lock, flags);
idx = srcu_read_lock(&kvm->srcu);
- if (!kvm_gpc_check(gpc, gpc->gpa))
+ if (!kvm_gpc_check(gpc))
goto out_rcu;
ret = false;
@@ -1338,7 +1338,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
idx = srcu_read_lock(&kvm->srcu);
read_lock_irqsave(&gpc->lock, flags);
- if (!kvm_gpc_check(gpc, gpc->gpa))
+ if (!kvm_gpc_check(gpc))
goto out_rcu;
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
@@ -1372,7 +1372,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
gpc = &vcpu->arch.xen.vcpu_info_cache;
read_lock_irqsave(&gpc->lock, flags);
- if (!kvm_gpc_check(gpc, gpc->gpa)) {
+ if (!kvm_gpc_check(gpc)) {
/*
* Could not access the vcpu_info. Set the bit in-kernel
* and prod the vCPU to deliver it for itself.
@@ -1470,7 +1470,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
break;
idx = srcu_read_lock(&kvm->srcu);
- rc = kvm_gpc_refresh(gpc, gpc->gpa);
+ rc = kvm_gpc_refresh(gpc);
srcu_read_unlock(&kvm->srcu, idx);
} while(!rc);
@@ -1283,7 +1283,6 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa);
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
*
* @gpc: struct gfn_to_pfn_cache object.
- * @gpa: current guest physical address to map.
*
* @return: %true if the cache is still valid and the address matches.
* %false if the cache is not valid.
@@ -1295,13 +1294,12 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa);
* Callers in IN_GUEST_MODE may do so without locking, although they should
* still hold a read lock on kvm->scru for the memslot checks.
*/
-bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa);
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc);
/**
* kvm_gpc_refresh - update a previously initialized cache.
*
* @gpc: struct gfn_to_pfn_cache object.
- * @gpa: updated guest physical address to map.
*
* @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary.
@@ -1313,7 +1311,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa);
* still lock and check the cache status, as this function does not return
* with the lock still held to permit access.
*/
-int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa);
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc);
/**
* kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
@@ -76,17 +76,14 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
}
}
-bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa)
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
if (!gpc->active)
return false;
- if ((gpa & ~PAGE_MASK) + gpc->len > PAGE_SIZE)
- return false;
-
- if (gpc->gpa != gpa || gpc->generation != slots->generation ||
+ if (gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva))
return false;
@@ -237,7 +234,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
return -EFAULT;
}
-int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa)
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -326,6 +323,11 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa)
return ret;
}
+
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc)
+{
+ return __kvm_gpc_refresh(gpc, gpc->gpa);
+}
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
@@ -369,7 +371,7 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa)
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return kvm_gpc_refresh(gpc, gpa);
+ return __kvm_gpc_refresh(gpc, gpa);
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);