vhost_vdpa: fix the crash in unmap a large memory

Message ID 20221124062309.2081720-1-lulu@redhat.com
State New
Headers
Series vhost_vdpa: fix the crash in unmap a large memory |

Commit Message

Cindy Lu Nov. 24, 2022, 6:23 a.m. UTC
  While testing in vIOMMU, sometimes guest will unmap very large memory,
which will cause the crash. To fix this,Move the iommu_unmap to
vhost_vdpa_pa_unmap/vhost_vdpa_va_unmap and only unmap the memory
that saved in iotlb.

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 drivers/vhost/vdpa.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)
  

Comments

Jason Wang Nov. 24, 2022, 6:34 a.m. UTC | #1
On Thu, Nov 24, 2022 at 2:23 PM Cindy Lu <lulu@redhat.com> wrote:
>
> While testing in vIOMMU, sometimes guest will unmap very large memory,
> which will cause the crash.

Would you mind to post the calltrace?

> To fix this,Move the iommu_unmap to
> vhost_vdpa_pa_unmap/vhost_vdpa_va_unmap and only unmap the memory
> that saved in iotlb.
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>

Let's add a fixes tag which I believe should be the first commit that
introduces vhost-vDPA. And let's cc stable as well.

> ---
>  drivers/vhost/vdpa.c | 10 ++++++++--
>  1 file changed, 8 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index 166044642fd5..c392979702cf 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -692,6 +692,8 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
>         struct vhost_iotlb_map *map;
>         struct page *page;
>         unsigned long pfn, pinned;
> +       struct vdpa_device *vdpa = v->vdpa;
> +       const struct vdpa_config_ops *ops = vdpa->config;
>
>         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
>                 pinned = PFN_DOWN(map->size);
> @@ -703,6 +705,8 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
>                         unpin_user_page(page);
>                 }
>                 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
> +               if ((ops->dma_map == NULL) && (ops->set_map == NULL))
> +                       iommu_unmap(v->domain, map->start, map->size);
>                 vhost_iotlb_map_free(iotlb, map);
>         }
>  }
> @@ -713,11 +717,15 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
>  {
>         struct vhost_iotlb_map *map;
>         struct vdpa_map_file *map_file;
> +       struct vdpa_device *vdpa = v->vdpa;
> +       const struct vdpa_config_ops *ops = vdpa->config;
>
>         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
>                 map_file = (struct vdpa_map_file *)map->opaque;
>                 fput(map_file->file);
>                 kfree(map_file);
> +               if ((ops->dma_map == NULL) && (ops->set_map == NULL))
> +                       iommu_unmap(v->domain, map->start, map->size);

I wonder if it's better to move at least dma_map() here.

Thanks

>                 vhost_iotlb_map_free(iotlb, map);
>         }
>  }
> @@ -805,8 +813,6 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
>         } else if (ops->set_map) {
>                 if (!v->in_batch)
>                         ops->set_map(vdpa, asid, iotlb);
> -       } else {
> -               iommu_unmap(v->domain, iova, size);
>         }
>
>         /* If we are in the middle of batch processing, delay the free
> --
> 2.34.3
>
  
Cindy Lu Nov. 24, 2022, 6:54 a.m. UTC | #2
On Thu, 24 Nov 2022 at 14:34, Jason Wang <jasowang@redhat.com> wrote:
>
> On Thu, Nov 24, 2022 at 2:23 PM Cindy Lu <lulu@redhat.com> wrote:
> >
> > While testing in vIOMMU, sometimes guest will unmap very large memory,
> > which will cause the crash.
>
> Would you mind to post the calltrace?
>
Sure, Will add this, Thanks Jason

> > To fix this,Move the iommu_unmap to
> > vhost_vdpa_pa_unmap/vhost_vdpa_va_unmap and only unmap the memory
> > that saved in iotlb.
> >
> > Signed-off-by: Cindy Lu <lulu@redhat.com>
>
> Let's add a fixes tag which I believe should be the first commit that
> introduces vhost-vDPA. And let's cc stable as well.
>
sure, will do.
> > ---
> >  drivers/vhost/vdpa.c | 10 ++++++++--
> >  1 file changed, 8 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> > index 166044642fd5..c392979702cf 100644
> > --- a/drivers/vhost/vdpa.c
> > +++ b/drivers/vhost/vdpa.c
> > @@ -692,6 +692,8 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
> >         struct vhost_iotlb_map *map;
> >         struct page *page;
> >         unsigned long pfn, pinned;
> > +       struct vdpa_device *vdpa = v->vdpa;
> > +       const struct vdpa_config_ops *ops = vdpa->config;
> >
> >         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
> >                 pinned = PFN_DOWN(map->size);
> > @@ -703,6 +705,8 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
> >                         unpin_user_page(page);
> >                 }
> >                 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
> > +               if ((ops->dma_map == NULL) && (ops->set_map == NULL))
> > +                       iommu_unmap(v->domain, map->start, map->size);
> >                 vhost_iotlb_map_free(iotlb, map);
> >         }
> >  }
> > @@ -713,11 +717,15 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
> >  {
> >         struct vhost_iotlb_map *map;
> >         struct vdpa_map_file *map_file;
> > +       struct vdpa_device *vdpa = v->vdpa;
> > +       const struct vdpa_config_ops *ops = vdpa->config;
> >
> >         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
> >                 map_file = (struct vdpa_map_file *)map->opaque;
> >                 fput(map_file->file);
> >                 kfree(map_file);
> > +               if ((ops->dma_map == NULL) && (ops->set_map == NULL))
> > +                       iommu_unmap(v->domain, map->start, map->size);
>
> I wonder if it's better to move at least dma_map() here.
>
> Thanks
>
sure, will remove this , thanks will provide a new version soon
> >                 vhost_iotlb_map_free(iotlb, map);
> >         }
> >  }
> > @@ -805,8 +813,6 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
> >         } else if (ops->set_map) {
> >                 if (!v->in_batch)
> >                         ops->set_map(vdpa, asid, iotlb);
> > -       } else {
> > -               iommu_unmap(v->domain, iova, size);
> >         }
> >
> >         /* If we are in the middle of batch processing, delay the free
> > --
> > 2.34.3
> >
>
  

Patch

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 166044642fd5..c392979702cf 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -692,6 +692,8 @@  static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
 	struct vhost_iotlb_map *map;
 	struct page *page;
 	unsigned long pfn, pinned;
+	struct vdpa_device *vdpa = v->vdpa;
+	const struct vdpa_config_ops *ops = vdpa->config;
 
 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
 		pinned = PFN_DOWN(map->size);
@@ -703,6 +705,8 @@  static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
 			unpin_user_page(page);
 		}
 		atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
+		if ((ops->dma_map == NULL) && (ops->set_map == NULL))
+			iommu_unmap(v->domain, map->start, map->size);
 		vhost_iotlb_map_free(iotlb, map);
 	}
 }
@@ -713,11 +717,15 @@  static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
 {
 	struct vhost_iotlb_map *map;
 	struct vdpa_map_file *map_file;
+	struct vdpa_device *vdpa = v->vdpa;
+	const struct vdpa_config_ops *ops = vdpa->config;
 
 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
 		map_file = (struct vdpa_map_file *)map->opaque;
 		fput(map_file->file);
 		kfree(map_file);
+		if ((ops->dma_map == NULL) && (ops->set_map == NULL))
+			iommu_unmap(v->domain, map->start, map->size);
 		vhost_iotlb_map_free(iotlb, map);
 	}
 }
@@ -805,8 +813,6 @@  static void vhost_vdpa_unmap(struct vhost_vdpa *v,
 	} else if (ops->set_map) {
 		if (!v->in_batch)
 			ops->set_map(vdpa, asid, iotlb);
-	} else {
-		iommu_unmap(v->domain, iova, size);
 	}
 
 	/* If we are in the middle of batch processing, delay the free