vfio: fix deadlock between group lock and kvm lock

Message ID 20230131200635.44227-1-mjrosato@linux.ibm.com
State New
Headers
Series vfio: fix deadlock between group lock and kvm lock |

Commit Message

Matthew Rosato Jan. 31, 2023, 8:06 p.m. UTC
  After 51cdc8bc120e, we have another deadlock scenario between the
kvm->lock and the vfio group_lock with two different codepaths acquiring
the locks in different order.  Specifically in vfio_open_device, vfio
holds the vfio group_lock when issuing device->ops->open_device but some
drivers (like vfio-ap) need to acquire kvm->lock during their open_device
routine;  Meanwhile, kvm_vfio_release will acquire the kvm->lock first
before calling vfio_file_set_kvm which will acquire the vfio group_lock.

To resolve this, let's remove the need for the vfio group_lock from the
kvm_vfio_release codepath.  This is done by introducing a new spinlock to
protect modifications to the vfio group kvm pointer, and acquiring a kvm
ref from within vfio while holding this spinlock, with the reference held
until the last close for the device in question.

Fixes: 51cdc8bc120e ("kvm/vfio: Fix potential deadlock on vfio group_lock")
Reported-by: Anthony Krowiak <akrowiak@linux.ibm.com>
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
---
 drivers/vfio/group.c     | 14 +++----
 drivers/vfio/vfio.h      |  5 ++-
 drivers/vfio/vfio_main.c | 88 ++++++++++++++++++++++++++++++++++++----
 include/linux/vfio.h     |  2 +-
 4 files changed, 88 insertions(+), 21 deletions(-)
  

Comments

Jason Gunthorpe Jan. 31, 2023, 8:25 p.m. UTC | #1
On Tue, Jan 31, 2023 at 03:06:35PM -0500, Matthew Rosato wrote:
> @@ -799,13 +794,14 @@ EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
>  void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
>  {
>  	struct vfio_group *group = file->private_data;
> +	unsigned long flags;
>  
>  	if (!vfio_file_is_group(file))
>  		return;
>  
> -	mutex_lock(&group->group_lock);
> +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
>  	group->kvm = kvm;
> -	mutex_unlock(&group->group_lock);
> +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);

We know we are in a sleeping context here so these are just
'spin_lock()', same with the other one

Otherwise it seems Ok to me

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>

Jason
  
Tian, Kevin Feb. 1, 2023, 6:13 a.m. UTC | #2
> From: Matthew Rosato <mjrosato@linux.ibm.com>
> Sent: Wednesday, February 1, 2023 4:07 AM
> 
> -	device->kvm = kvm;
> +	/*
> +	 * Get the KVM pointer currently associated with the group, if there
> +	 * is one, and obtain a reference now that will be held until the
> +	 * open_count reaches 0.  Save the pointer in the device for use by
> +	 * drivers.
> +	 */
> +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
> +	if (group->kvm && vfio_kvm_get_kvm_safe(device, group->kvm))
> +		device->kvm = group->kvm;
> +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
> +

No group reference in vfio_main.c. 

btw Yi, looks the deadlock issue also exists in your cdev work.

kvm_vfio_release() holds kvm lock and then try to acquire
device->device_set->lock in vfio_device_file_set_kvm().

vfio_device_ioctl_bind_iommufd() holds device->device_set->lock
and then call vfio_device_open() which finally hit kvm lock
acquisition in driver's open_device routine (e.g. vfio-ap).

A similar fix is required in your series.

Thanks
Kevin
  
Yi Liu Feb. 1, 2023, 12:42 p.m. UTC | #3
> From: Matthew Rosato <mjrosato@linux.ibm.com>
> Sent: Wednesday, February 1, 2023 4:07 AM
> 
> After 51cdc8bc120e, we have another deadlock scenario between the
> kvm->lock and the vfio group_lock with two different codepaths acquiring
> the locks in different order.  Specifically in vfio_open_device, vfio
> holds the vfio group_lock when issuing device->ops->open_device but
> some
> drivers (like vfio-ap) need to acquire kvm->lock during their open_device
> routine;  Meanwhile, kvm_vfio_release will acquire the kvm->lock first
> before calling vfio_file_set_kvm which will acquire the vfio group_lock.
> 
> To resolve this, let's remove the need for the vfio group_lock from the
> kvm_vfio_release codepath.  This is done by introducing a new spinlock to
> protect modifications to the vfio group kvm pointer, and acquiring a kvm
> ref from within vfio while holding this spinlock, with the reference held
> until the last close for the device in question.
> 
> Fixes: 51cdc8bc120e ("kvm/vfio: Fix potential deadlock on vfio group_lock")
> Reported-by: Anthony Krowiak <akrowiak@linux.ibm.com>
> Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
> ---
>  drivers/vfio/group.c     | 14 +++----
>  drivers/vfio/vfio.h      |  5 ++-
>  drivers/vfio/vfio_main.c | 88
> ++++++++++++++++++++++++++++++++++++----
>  include/linux/vfio.h     |  2 +-
>  4 files changed, 88 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
> index bb24b2f0271e..8f67d33e0e8d 100644
> --- a/drivers/vfio/group.c
> +++ b/drivers/vfio/group.c
> @@ -164,13 +164,7 @@ static int vfio_device_group_open(struct
> vfio_device *device)
>  		goto out_unlock;
>  	}
> 
> -	/*
> -	 * Here we pass the KVM pointer with the group under the lock.  If
> the
> -	 * device driver will use it, it must obtain a reference and release it
> -	 * during close_device.
> -	 */
> -	ret = vfio_device_open(device, device->group->iommufd,
> -			       device->group->kvm);
> +	ret = vfio_device_open(device, device->group);
> 
>  out_unlock:
>  	mutex_unlock(&device->group->group_lock);
> @@ -450,6 +444,7 @@ static struct vfio_group *vfio_group_alloc(struct
> iommu_group *iommu_group,
> 
>  	refcount_set(&group->drivers, 1);
>  	mutex_init(&group->group_lock);
> +	spin_lock_init(&group->kvm_ref_lock);
>  	INIT_LIST_HEAD(&group->device_list);
>  	mutex_init(&group->device_lock);
>  	group->iommu_group = iommu_group;
> @@ -799,13 +794,14 @@
> EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
>  void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
>  {
>  	struct vfio_group *group = file->private_data;
> +	unsigned long flags;
> 
>  	if (!vfio_file_is_group(file))
>  		return;
> 
> -	mutex_lock(&group->group_lock);
> +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
>  	group->kvm = kvm;
> -	mutex_unlock(&group->group_lock);
> +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
>  }
>  EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
> 
> diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
> index f8219a438bfb..d153ad35de24 100644
> --- a/drivers/vfio/vfio.h
> +++ b/drivers/vfio/vfio.h
> @@ -15,11 +15,11 @@ struct iommufd_ctx;
>  struct iommu_group;
>  struct vfio_device;
>  struct vfio_container;
> +struct vfio_group;
> 
>  void vfio_device_put_registration(struct vfio_device *device);
>  bool vfio_device_try_get_registration(struct vfio_device *device);
> -int vfio_device_open(struct vfio_device *device,
> -		     struct iommufd_ctx *iommufd, struct kvm *kvm);
> +int vfio_device_open(struct vfio_device *device, struct vfio_group
> *group);
>  void vfio_device_close(struct vfio_device *device,
>  		       struct iommufd_ctx *iommufd);
> 
> @@ -74,6 +74,7 @@ struct vfio_group {
>  	struct file			*opened_file;
>  	struct blocking_notifier_head	notifier;
>  	struct iommufd_ctx		*iommufd;
> +	spinlock_t			kvm_ref_lock;
>  };
> 
>  int vfio_device_set_group(struct vfio_device *device,
> diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
> index 5177bb061b17..b0defb0d0d87 100644
> --- a/drivers/vfio/vfio_main.c
> +++ b/drivers/vfio/vfio_main.c
> @@ -16,6 +16,9 @@
>  #include <linux/fs.h>
>  #include <linux/idr.h>
>  #include <linux/iommu.h>
> +#ifdef CONFIG_HAVE_KVM
> +#include <linux/kvm_host.h>
> +#endif
>  #include <linux/list.h>
>  #include <linux/miscdevice.h>
>  #include <linux/module.h>
> @@ -344,9 +347,59 @@ static bool vfio_assert_device_open(struct
> vfio_device *device)
>  	return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
>  }
> 
> +#ifdef CONFIG_HAVE_KVM
> +static bool vfio_kvm_get_kvm_safe(struct vfio_device *device, struct kvm
> *kvm)
> +{
> +	void (*pfn)(struct kvm *kvm);
> +	bool (*fn)(struct kvm *kvm);
> +	bool ret;
> +
> +	pfn = symbol_get(kvm_put_kvm);
> +	if (WARN_ON(!pfn))
> +		return false;
> +
> +	fn = symbol_get(kvm_get_kvm_safe);
> +	if (WARN_ON(!fn)) {
> +		symbol_put(kvm_put_kvm);
> +		return false;
> +	}
> +
> +	ret = fn(kvm);
> +	if (ret)
> +		device->put_kvm = pfn;
> +	else
> +		symbol_put(kvm_put_kvm);
> +
> +	symbol_put(kvm_get_kvm_safe);
> +
> +	return ret;
> +}
> +
> +static void vfio_kvm_put_kvm(struct vfio_device *device)
> +{
> +	if (WARN_ON(!device->kvm || !device->put_kvm))
> +		return;
> +
> +	device->put_kvm(device->kvm);
> +	symbol_put(kvm_put_kvm);
> +}
> +
> +#else
> +static bool vfio_kvm_get_kvm_safe(struct vfio_device *device, struct kvm
> *kvm)
> +{
> +	return false;
> +}
> +
> +static void vfio_kvm_put_kvm(struct vfio_device *device)
> +{
> +}
> +#endif
> +
>  static int vfio_device_first_open(struct vfio_device *device,
> -				  struct iommufd_ctx *iommufd, struct kvm
> *kvm)
> +				  struct vfio_group *group)
>  {
> +	struct iommufd_ctx *iommufd = group->iommufd;
> +	unsigned long flags;
>  	int ret;
> 
>  	lockdep_assert_held(&device->dev_set->lock);
> @@ -361,16 +414,30 @@ static int vfio_device_first_open(struct
> vfio_device *device,
>  	if (ret)
>  		goto err_module_put;
> 
> -	device->kvm = kvm;
> +	/*
> +	 * Get the KVM pointer currently associated with the group, if there
> +	 * is one, and obtain a reference now that will be held until the
> +	 * open_count reaches 0.  Save the pointer in the device for use by
> +	 * drivers.
> +	 */
> +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
> +	if (group->kvm && vfio_kvm_get_kvm_safe(device, group->kvm))
> +		device->kvm = group->kvm;
> +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
> +
>  	if (device->ops->open_device) {
>  		ret = device->ops->open_device(device);
>  		if (ret)
> -			goto err_unuse_iommu;
> +			goto err_put_kvm;
>  	}
>  	return 0;
> 
> -err_unuse_iommu:
> -	device->kvm = NULL;
> +err_put_kvm:
> +	if (device->kvm) {
> +		vfio_kvm_put_kvm(device);
> +		device->put_kvm = NULL;

Can "device->put_kvm = NULL;" be part of vfio_kvm_put_kvm()?

Regards,
Yi Liu
  
Yi Liu Feb. 1, 2023, 12:43 p.m. UTC | #4
> From: Jason Gunthorpe <jgg@nvidia.com>
> Sent: Wednesday, February 1, 2023 4:26 AM
> 
> On Tue, Jan 31, 2023 at 03:06:35PM -0500, Matthew Rosato wrote:
> > @@ -799,13 +794,14 @@
> EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
> >  void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
> >  {
> >  	struct vfio_group *group = file->private_data;
> > +	unsigned long flags;
> >
> >  	if (!vfio_file_is_group(file))
> >  		return;
> >
> > -	mutex_lock(&group->group_lock);
> > +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
> >  	group->kvm = kvm;
> > -	mutex_unlock(&group->group_lock);
> > +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
> 
> We know we are in a sleeping context here so these are just
> 'spin_lock()', same with the other one

a dumb question. Why spinlock is required here? 😊

Regards,
Yi Liu
  
Matthew Rosato Feb. 1, 2023, 2:27 p.m. UTC | #5
On 2/1/23 1:13 AM, Tian, Kevin wrote:
>> From: Matthew Rosato <mjrosato@linux.ibm.com>
>> Sent: Wednesday, February 1, 2023 4:07 AM
>>
>> -	device->kvm = kvm;
>> +	/*
>> +	 * Get the KVM pointer currently associated with the group, if there
>> +	 * is one, and obtain a reference now that will be held until the
>> +	 * open_count reaches 0.  Save the pointer in the device for use by
>> +	 * drivers.
>> +	 */
>> +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
>> +	if (group->kvm && vfio_kvm_get_kvm_safe(device, group->kvm))
>> +		device->kvm = group->kvm;
>> +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
>> +
> 
> No group reference in vfio_main.c. 

OK -- I think to do that I'll have to move the lock/unlock of the dev_set lock to group.c right before we call vfio_device_{open,close} and check open_count there to make decisions about the kvm ref (before calling vfio_device_open to decide to get kvm ref, after returning from vfio_device_open to see if we to drop ref on error, after close to see if we need to drop ref).

> 
> btw Yi, looks the deadlock issue also exists in your cdev work.
> 
> kvm_vfio_release() holds kvm lock and then try to acquire
> device->device_set->lock in vfio_device_file_set_kvm().
> 
> vfio_device_ioctl_bind_iommufd() holds device->device_set->lock
> and then call vfio_device_open() which finally hit kvm lock
> acquisition in driver's open_device routine (e.g. vfio-ap).
> 
> A similar fix is required in your series.
> 
> Thanks
> Kevin
  
Matthew Rosato Feb. 1, 2023, 2:41 p.m. UTC | #6
On 2/1/23 7:42 AM, Liu, Yi L wrote:
>> From: Matthew Rosato <mjrosato@linux.ibm.com>
...
>> +	if (device->kvm) {
>> +		vfio_kvm_put_kvm(device);
>> +		device->put_kvm = NULL;
> 
> Can "device->put_kvm = NULL;" be part of vfio_kvm_put_kvm()?

Sure
  
Matthew Rosato Feb. 1, 2023, 2:42 p.m. UTC | #7
On 2/1/23 7:43 AM, Liu, Yi L wrote:
>> From: Jason Gunthorpe <jgg@nvidia.com>
>> Sent: Wednesday, February 1, 2023 4:26 AM
>>
>> On Tue, Jan 31, 2023 at 03:06:35PM -0500, Matthew Rosato wrote:
>>> @@ -799,13 +794,14 @@
>> EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
>>>  void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
>>>  {
>>>  	struct vfio_group *group = file->private_data;
>>> +	unsigned long flags;
>>>
>>>  	if (!vfio_file_is_group(file))
>>>  		return;
>>>
>>> -	mutex_lock(&group->group_lock);
>>> +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
>>>  	group->kvm = kvm;
>>> -	mutex_unlock(&group->group_lock);
>>> +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
>>
>> We know we are in a sleeping context here so these are just
>> 'spin_lock()', same with the other one
> 
> a dumb question. Why spinlock is required here? 😊
> 

You mean as opposed to another mutex?  I don't think it's required per se (we are replacing a mutex so we could have again used another mutex here), but all current users of this new lock hold it over a very short window (e.g. set a pointer as above, or refcount++ and copy the pointer as in the first device_open)
  
Yi Liu Feb. 2, 2023, 3:08 a.m. UTC | #8
> From: Matthew Rosato <mjrosato@linux.ibm.com>
> Sent: Wednesday, February 1, 2023 10:43 PM
> 
> On 2/1/23 7:43 AM, Liu, Yi L wrote:
> >> From: Jason Gunthorpe <jgg@nvidia.com>
> >> Sent: Wednesday, February 1, 2023 4:26 AM
> >>
> >> On Tue, Jan 31, 2023 at 03:06:35PM -0500, Matthew Rosato wrote:
> >>> @@ -799,13 +794,14 @@
> >> EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
> >>>  void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
> >>>  {
> >>>  	struct vfio_group *group = file->private_data;
> >>> +	unsigned long flags;
> >>>
> >>>  	if (!vfio_file_is_group(file))
> >>>  		return;
> >>>
> >>> -	mutex_lock(&group->group_lock);
> >>> +	spin_lock_irqsave(&group->kvm_ref_lock, flags);
> >>>  	group->kvm = kvm;
> >>> -	mutex_unlock(&group->group_lock);
> >>> +	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
> >>
> >> We know we are in a sleeping context here so these are just
> >> 'spin_lock()', same with the other one
> >
> > a dumb question. Why spinlock is required here? 😊
> >
> 
> You mean as opposed to another mutex?  I don't think it's required per se
> (we are replacing a mutex so we could have again used another mutex
> here), but all current users of this new lock hold it over a very short window
> (e.g. set a pointer as above, or refcount++ and copy the pointer as in the
> first device_open)

I see. Just not sure if spinlock is required for a special reason.

Regards,
Yi Liu
  

Patch

diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index bb24b2f0271e..8f67d33e0e8d 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -164,13 +164,7 @@  static int vfio_device_group_open(struct vfio_device *device)
 		goto out_unlock;
 	}
 
-	/*
-	 * Here we pass the KVM pointer with the group under the lock.  If the
-	 * device driver will use it, it must obtain a reference and release it
-	 * during close_device.
-	 */
-	ret = vfio_device_open(device, device->group->iommufd,
-			       device->group->kvm);
+	ret = vfio_device_open(device, device->group);
 
 out_unlock:
 	mutex_unlock(&device->group->group_lock);
@@ -450,6 +444,7 @@  static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
 
 	refcount_set(&group->drivers, 1);
 	mutex_init(&group->group_lock);
+	spin_lock_init(&group->kvm_ref_lock);
 	INIT_LIST_HEAD(&group->device_list);
 	mutex_init(&group->device_lock);
 	group->iommu_group = iommu_group;
@@ -799,13 +794,14 @@  EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
 void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
 {
 	struct vfio_group *group = file->private_data;
+	unsigned long flags;
 
 	if (!vfio_file_is_group(file))
 		return;
 
-	mutex_lock(&group->group_lock);
+	spin_lock_irqsave(&group->kvm_ref_lock, flags);
 	group->kvm = kvm;
-	mutex_unlock(&group->group_lock);
+	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
 }
 EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
 
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index f8219a438bfb..d153ad35de24 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -15,11 +15,11 @@  struct iommufd_ctx;
 struct iommu_group;
 struct vfio_device;
 struct vfio_container;
+struct vfio_group;
 
 void vfio_device_put_registration(struct vfio_device *device);
 bool vfio_device_try_get_registration(struct vfio_device *device);
-int vfio_device_open(struct vfio_device *device,
-		     struct iommufd_ctx *iommufd, struct kvm *kvm);
+int vfio_device_open(struct vfio_device *device, struct vfio_group *group);
 void vfio_device_close(struct vfio_device *device,
 		       struct iommufd_ctx *iommufd);
 
@@ -74,6 +74,7 @@  struct vfio_group {
 	struct file			*opened_file;
 	struct blocking_notifier_head	notifier;
 	struct iommufd_ctx		*iommufd;
+	spinlock_t			kvm_ref_lock;
 };
 
 int vfio_device_set_group(struct vfio_device *device,
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 5177bb061b17..b0defb0d0d87 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -16,6 +16,9 @@ 
 #include <linux/fs.h>
 #include <linux/idr.h>
 #include <linux/iommu.h>
+#ifdef CONFIG_HAVE_KVM
+#include <linux/kvm_host.h>
+#endif
 #include <linux/list.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
@@ -344,9 +347,59 @@  static bool vfio_assert_device_open(struct vfio_device *device)
 	return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
 }
 
+#ifdef CONFIG_HAVE_KVM
+static bool vfio_kvm_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
+{
+	void (*pfn)(struct kvm *kvm);
+	bool (*fn)(struct kvm *kvm);
+	bool ret;
+
+	pfn = symbol_get(kvm_put_kvm);
+	if (WARN_ON(!pfn))
+		return false;
+
+	fn = symbol_get(kvm_get_kvm_safe);
+	if (WARN_ON(!fn)) {
+		symbol_put(kvm_put_kvm);
+		return false;
+	}
+
+	ret = fn(kvm);
+	if (ret)
+		device->put_kvm = pfn;
+	else
+		symbol_put(kvm_put_kvm);
+
+	symbol_put(kvm_get_kvm_safe);
+
+	return ret;
+}
+
+static void vfio_kvm_put_kvm(struct vfio_device *device)
+{
+	if (WARN_ON(!device->kvm || !device->put_kvm))
+		return;
+
+	device->put_kvm(device->kvm);
+	symbol_put(kvm_put_kvm);
+}
+
+#else
+static bool vfio_kvm_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
+{
+	return false;
+}
+
+static void vfio_kvm_put_kvm(struct vfio_device *device)
+{
+}
+#endif
+
 static int vfio_device_first_open(struct vfio_device *device,
-				  struct iommufd_ctx *iommufd, struct kvm *kvm)
+				  struct vfio_group *group)
 {
+	struct iommufd_ctx *iommufd = group->iommufd;
+	unsigned long flags;
 	int ret;
 
 	lockdep_assert_held(&device->dev_set->lock);
@@ -361,16 +414,30 @@  static int vfio_device_first_open(struct vfio_device *device,
 	if (ret)
 		goto err_module_put;
 
-	device->kvm = kvm;
+	/*
+	 * Get the KVM pointer currently associated with the group, if there
+	 * is one, and obtain a reference now that will be held until the
+	 * open_count reaches 0.  Save the pointer in the device for use by
+	 * drivers.
+	 */
+	spin_lock_irqsave(&group->kvm_ref_lock, flags);
+	if (group->kvm && vfio_kvm_get_kvm_safe(device, group->kvm))
+		device->kvm = group->kvm;
+	spin_unlock_irqrestore(&group->kvm_ref_lock, flags);
+
 	if (device->ops->open_device) {
 		ret = device->ops->open_device(device);
 		if (ret)
-			goto err_unuse_iommu;
+			goto err_put_kvm;
 	}
 	return 0;
 
-err_unuse_iommu:
-	device->kvm = NULL;
+err_put_kvm:
+	if (device->kvm) {
+		vfio_kvm_put_kvm(device);
+		device->put_kvm = NULL;
+		device->kvm = NULL;
+	}
 	if (iommufd)
 		vfio_iommufd_unbind(device);
 	else
@@ -387,7 +454,11 @@  static void vfio_device_last_close(struct vfio_device *device,
 
 	if (device->ops->close_device)
 		device->ops->close_device(device);
-	device->kvm = NULL;
+	if (device->kvm) {
+		vfio_kvm_put_kvm(device);
+		device->kvm = NULL;
+		device->put_kvm = NULL;
+	}
 	if (iommufd)
 		vfio_iommufd_unbind(device);
 	else
@@ -395,15 +466,14 @@  static void vfio_device_last_close(struct vfio_device *device,
 	module_put(device->dev->driver->owner);
 }
 
-int vfio_device_open(struct vfio_device *device,
-		     struct iommufd_ctx *iommufd, struct kvm *kvm)
+int vfio_device_open(struct vfio_device *device, struct vfio_group *group)
 {
 	int ret = 0;
 
 	mutex_lock(&device->dev_set->lock);
 	device->open_count++;
 	if (device->open_count == 1) {
-		ret = vfio_device_first_open(device, iommufd, kvm);
+		ret = vfio_device_first_open(device, group);
 		if (ret)
 			device->open_count--;
 	}
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 35be78e9ae57..87ff862ff555 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -46,7 +46,6 @@  struct vfio_device {
 	struct vfio_device_set *dev_set;
 	struct list_head dev_set_list;
 	unsigned int migration_flags;
-	/* Driver must reference the kvm during open_device or never touch it */
 	struct kvm *kvm;
 
 	/* Members below here are private, not for driver use */
@@ -58,6 +57,7 @@  struct vfio_device {
 	struct list_head group_next;
 	struct list_head iommu_entry;
 	struct iommufd_access *iommufd_access;
+	void (*put_kvm)(struct kvm *kvm);
 #if IS_ENABLED(CONFIG_IOMMUFD)
 	struct iommufd_device *iommufd_device;
 	struct iommufd_ctx *iommufd_ictx;