[v2,03/11] vdpa: Add set_irq_affinity callback in vdpa_config_ops

Message ID 20221205084127.535-4-xieyongji@bytedance.com
State New
Headers
Series VDUSE: Improve performance |

Commit Message

Yongji Xie Dec. 5, 2022, 8:41 a.m. UTC
  This introduces set_irq_affinity callback in
vdpa_config_ops so that vdpa device driver can
get the interrupt affinity hint from the virtio
device driver. The interrupt affinity hint would
be needed by the interrupt affinity spreading
mechanism.

Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
---
 drivers/virtio/virtio_vdpa.c | 4 ++++
 include/linux/vdpa.h         | 8 ++++++++
 2 files changed, 12 insertions(+)
  

Comments

Jason Wang Dec. 16, 2022, 3:58 a.m. UTC | #1
On Mon, Dec 5, 2022 at 4:43 PM Xie Yongji <xieyongji@bytedance.com> wrote:
>
> This introduces set_irq_affinity callback in
> vdpa_config_ops so that vdpa device driver can
> get the interrupt affinity hint from the virtio
> device driver. The interrupt affinity hint would
> be needed by the interrupt affinity spreading
> mechanism.
>
> Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> ---
>  drivers/virtio/virtio_vdpa.c | 4 ++++
>  include/linux/vdpa.h         | 8 ++++++++
>  2 files changed, 12 insertions(+)
>
> diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> index 08084b49e5a1..4731e4616ee0 100644
> --- a/drivers/virtio/virtio_vdpa.c
> +++ b/drivers/virtio/virtio_vdpa.c
> @@ -275,9 +275,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
>         struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
>         struct vdpa_device *vdpa = vd_get_vdpa(vdev);
>         const struct vdpa_config_ops *ops = vdpa->config;
> +       struct irq_affinity default_affd = { 0 };
>         struct vdpa_callback cb;
>         int i, err, queue_idx = 0;
>
> +       if (ops->set_irq_affinity)
> +               ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);

I wonder if we need to do this in vhost-vDPA. Or it's better to have a
default affinity by the vDPA parent itself.

(Looking at virtio-pci, it doesn't do something like this).

Thanks

> +
>         for (i = 0; i < nvqs; ++i) {
>                 if (!names[i]) {
>                         vqs[i] = NULL;
> diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
> index 0ff6c9363356..482ff7d0206f 100644
> --- a/include/linux/vdpa.h
> +++ b/include/linux/vdpa.h
> @@ -256,6 +256,12 @@ struct vdpa_map_file {
>   *                             @vdev: vdpa device
>   *                             @idx: virtqueue index
>   *                             Returns the irq affinity mask
> + * @set_irq_affinity:          Pass the irq affinity hint from the virtio
> + *                             device driver to vdpa driver (optional).
> + *                             Needed by the interrupt affinity spreading
> + *                             mechanism.
> + *                             @vdev: vdpa device
> + *                             @desc: irq affinity hint
>   * @set_group_asid:            Set address space identifier for a
>   *                             virtqueue group (optional)
>   *                             @vdev: vdpa device
> @@ -344,6 +350,8 @@ struct vdpa_config_ops {
>                                const struct cpumask *cpu_mask);
>         const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev,
>                                                  u16 idx);
> +       void (*set_irq_affinity)(struct vdpa_device *vdev,
> +                                struct irq_affinity *desc);
>
>         /* DMA ops */
>         int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
> --
> 2.20.1
>
  
Yongji Xie Dec. 19, 2022, 4:38 a.m. UTC | #2
On Fri, Dec 16, 2022 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Dec 5, 2022 at 4:43 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> >
> > This introduces set_irq_affinity callback in
> > vdpa_config_ops so that vdpa device driver can
> > get the interrupt affinity hint from the virtio
> > device driver. The interrupt affinity hint would
> > be needed by the interrupt affinity spreading
> > mechanism.
> >
> > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > ---
> >  drivers/virtio/virtio_vdpa.c | 4 ++++
> >  include/linux/vdpa.h         | 8 ++++++++
> >  2 files changed, 12 insertions(+)
> >
> > diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> > index 08084b49e5a1..4731e4616ee0 100644
> > --- a/drivers/virtio/virtio_vdpa.c
> > +++ b/drivers/virtio/virtio_vdpa.c
> > @@ -275,9 +275,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
> >         struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
> >         struct vdpa_device *vdpa = vd_get_vdpa(vdev);
> >         const struct vdpa_config_ops *ops = vdpa->config;
> > +       struct irq_affinity default_affd = { 0 };
> >         struct vdpa_callback cb;
> >         int i, err, queue_idx = 0;
> >
> > +       if (ops->set_irq_affinity)
> > +               ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);
>
> I wonder if we need to do this in vhost-vDPA.

I don't get why we need to do this in vhost-vDPA? Should this be done in VM?

> Or it's better to have a
> default affinity by the vDPA parent
>

I think both are OK. But the default value should always be zero, so I
put it in a common place.

> (Looking at virtio-pci, it doesn't do something like this).
>

Yes, but we did something like this in the pci layer:
pci_alloc_irq_vectors_affinity().

Thanks,
Yongji
  
Jason Wang Dec. 19, 2022, 6:06 a.m. UTC | #3
On Mon, Dec 19, 2022 at 12:39 PM Yongji Xie <xieyongji@bytedance.com> wrote:
>
> On Fri, Dec 16, 2022 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Mon, Dec 5, 2022 at 4:43 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> > >
> > > This introduces set_irq_affinity callback in
> > > vdpa_config_ops so that vdpa device driver can
> > > get the interrupt affinity hint from the virtio
> > > device driver. The interrupt affinity hint would
> > > be needed by the interrupt affinity spreading
> > > mechanism.
> > >
> > > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > > ---
> > >  drivers/virtio/virtio_vdpa.c | 4 ++++
> > >  include/linux/vdpa.h         | 8 ++++++++
> > >  2 files changed, 12 insertions(+)
> > >
> > > diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> > > index 08084b49e5a1..4731e4616ee0 100644
> > > --- a/drivers/virtio/virtio_vdpa.c
> > > +++ b/drivers/virtio/virtio_vdpa.c
> > > @@ -275,9 +275,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
> > >         struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
> > >         struct vdpa_device *vdpa = vd_get_vdpa(vdev);
> > >         const struct vdpa_config_ops *ops = vdpa->config;
> > > +       struct irq_affinity default_affd = { 0 };
> > >         struct vdpa_callback cb;
> > >         int i, err, queue_idx = 0;
> > >
> > > +       if (ops->set_irq_affinity)
> > > +               ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);
> >
> > I wonder if we need to do this in vhost-vDPA.
>
> I don't get why we need to do this in vhost-vDPA? Should this be done in VM?

If I was not wrong, this tries to set affinity on the host instead of
the guest. More below.

>
> > Or it's better to have a
> > default affinity by the vDPA parent
> >
>
> I think both are OK. But the default value should always be zero, so I
> put it in a common place.

I think we should either:

1) document the zero default value in vdpa.c
2) set the zero in both vhost-vdpa and virtio-vdpa, or in the vdpa core

>
> > (Looking at virtio-pci, it doesn't do something like this).
> >
>
> Yes, but we did something like this in the pci layer:
> pci_alloc_irq_vectors_affinity().

Ok.

Thanks

>
> Thanks,
> Yongji
>
  
Yongji Xie Dec. 19, 2022, 7:12 a.m. UTC | #4
On Mon, Dec 19, 2022 at 2:06 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Dec 19, 2022 at 12:39 PM Yongji Xie <xieyongji@bytedance.com> wrote:
> >
> > On Fri, Dec 16, 2022 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Mon, Dec 5, 2022 at 4:43 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> > > >
> > > > This introduces set_irq_affinity callback in
> > > > vdpa_config_ops so that vdpa device driver can
> > > > get the interrupt affinity hint from the virtio
> > > > device driver. The interrupt affinity hint would
> > > > be needed by the interrupt affinity spreading
> > > > mechanism.
> > > >
> > > > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > > > ---
> > > >  drivers/virtio/virtio_vdpa.c | 4 ++++
> > > >  include/linux/vdpa.h         | 8 ++++++++
> > > >  2 files changed, 12 insertions(+)
> > > >
> > > > diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> > > > index 08084b49e5a1..4731e4616ee0 100644
> > > > --- a/drivers/virtio/virtio_vdpa.c
> > > > +++ b/drivers/virtio/virtio_vdpa.c
> > > > @@ -275,9 +275,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
> > > >         struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
> > > >         struct vdpa_device *vdpa = vd_get_vdpa(vdev);
> > > >         const struct vdpa_config_ops *ops = vdpa->config;
> > > > +       struct irq_affinity default_affd = { 0 };
> > > >         struct vdpa_callback cb;
> > > >         int i, err, queue_idx = 0;
> > > >
> > > > +       if (ops->set_irq_affinity)
> > > > +               ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);
> > >
> > > I wonder if we need to do this in vhost-vDPA.
> >
> > I don't get why we need to do this in vhost-vDPA? Should this be done in VM?
>
> If I was not wrong, this tries to set affinity on the host instead of
> the guest. More below.
>

Yes, it's host stuff. This is used by the virtio device driver to pass
the irq affinity hint (tell which irq vectors don't need affinity
management) to the irq affinity manager. In the VM case, it should
only be related to the guest's virtio device driver and pci irq
affinity manager. So I don't get why we need to do this in vhost-vDPA.

> >
> > > Or it's better to have a
> > > default affinity by the vDPA parent
> > >
> >
> > I think both are OK. But the default value should always be zero, so I
> > put it in a common place.
>
> I think we should either:
>
> 1) document the zero default value in vdpa.c
> 2) set the zero in both vhost-vdpa and virtio-vdpa, or in the vdpa core
>

Can we only call it in the virtio-vdpa case? Thus the vdpa device
driver can know whether it needs to do the automatic irq affinity
management or not. In the vhost-vdpa case, we actually don't need the
irq affinity management.

Thanks,
Yongji
  
Jason Wang Dec. 20, 2022, 6:31 a.m. UTC | #5
On Mon, Dec 19, 2022 at 3:12 PM Yongji Xie <xieyongji@bytedance.com> wrote:
>
> On Mon, Dec 19, 2022 at 2:06 PM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Mon, Dec 19, 2022 at 12:39 PM Yongji Xie <xieyongji@bytedance.com> wrote:
> > >
> > > On Fri, Dec 16, 2022 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Mon, Dec 5, 2022 at 4:43 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> > > > >
> > > > > This introduces set_irq_affinity callback in
> > > > > vdpa_config_ops so that vdpa device driver can
> > > > > get the interrupt affinity hint from the virtio
> > > > > device driver. The interrupt affinity hint would
> > > > > be needed by the interrupt affinity spreading
> > > > > mechanism.
> > > > >
> > > > > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > > > > ---
> > > > >  drivers/virtio/virtio_vdpa.c | 4 ++++
> > > > >  include/linux/vdpa.h         | 8 ++++++++
> > > > >  2 files changed, 12 insertions(+)
> > > > >
> > > > > diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> > > > > index 08084b49e5a1..4731e4616ee0 100644
> > > > > --- a/drivers/virtio/virtio_vdpa.c
> > > > > +++ b/drivers/virtio/virtio_vdpa.c
> > > > > @@ -275,9 +275,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
> > > > >         struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
> > > > >         struct vdpa_device *vdpa = vd_get_vdpa(vdev);
> > > > >         const struct vdpa_config_ops *ops = vdpa->config;
> > > > > +       struct irq_affinity default_affd = { 0 };
> > > > >         struct vdpa_callback cb;
> > > > >         int i, err, queue_idx = 0;
> > > > >
> > > > > +       if (ops->set_irq_affinity)
> > > > > +               ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);
> > > >
> > > > I wonder if we need to do this in vhost-vDPA.
> > >
> > > I don't get why we need to do this in vhost-vDPA? Should this be done in VM?
> >
> > If I was not wrong, this tries to set affinity on the host instead of
> > the guest. More below.
> >
>
> Yes, it's host stuff. This is used by the virtio device driver to pass
> the irq affinity hint (tell which irq vectors don't need affinity
> management) to the irq affinity manager. In the VM case, it should
> only be related to the guest's virtio device driver and pci irq
> affinity manager. So I don't get why we need to do this in vhost-vDPA.

It's not necessarily the VM, do we have the same requirement for
userspace (like DPDK) drivers?

Thanks

>
> > >
> > > > Or it's better to have a
> > > > default affinity by the vDPA parent
> > > >
> > >
> > > I think both are OK. But the default value should always be zero, so I
> > > put it in a common place.
> >
> > I think we should either:
> >
> > 1) document the zero default value in vdpa.c
> > 2) set the zero in both vhost-vdpa and virtio-vdpa, or in the vdpa core
> >
>
> Can we only call it in the virtio-vdpa case? Thus the vdpa device
> driver can know whether it needs to do the automatic irq affinity
> management or not. In the vhost-vdpa case, we actually don't need the
> irq affinity management.
>
> Thanks,
> Yongji
>
  
Yongji Xie Dec. 20, 2022, 10:14 a.m. UTC | #6
On Tue, Dec 20, 2022 at 2:31 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Dec 19, 2022 at 3:12 PM Yongji Xie <xieyongji@bytedance.com> wrote:
> >
> > On Mon, Dec 19, 2022 at 2:06 PM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Mon, Dec 19, 2022 at 12:39 PM Yongji Xie <xieyongji@bytedance.com> wrote:
> > > >
> > > > On Fri, Dec 16, 2022 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> > > > >
> > > > > On Mon, Dec 5, 2022 at 4:43 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> > > > > >
> > > > > > This introduces set_irq_affinity callback in
> > > > > > vdpa_config_ops so that vdpa device driver can
> > > > > > get the interrupt affinity hint from the virtio
> > > > > > device driver. The interrupt affinity hint would
> > > > > > be needed by the interrupt affinity spreading
> > > > > > mechanism.
> > > > > >
> > > > > > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > > > > > ---
> > > > > >  drivers/virtio/virtio_vdpa.c | 4 ++++
> > > > > >  include/linux/vdpa.h         | 8 ++++++++
> > > > > >  2 files changed, 12 insertions(+)
> > > > > >
> > > > > > diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> > > > > > index 08084b49e5a1..4731e4616ee0 100644
> > > > > > --- a/drivers/virtio/virtio_vdpa.c
> > > > > > +++ b/drivers/virtio/virtio_vdpa.c
> > > > > > @@ -275,9 +275,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
> > > > > >         struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
> > > > > >         struct vdpa_device *vdpa = vd_get_vdpa(vdev);
> > > > > >         const struct vdpa_config_ops *ops = vdpa->config;
> > > > > > +       struct irq_affinity default_affd = { 0 };
> > > > > >         struct vdpa_callback cb;
> > > > > >         int i, err, queue_idx = 0;
> > > > > >
> > > > > > +       if (ops->set_irq_affinity)
> > > > > > +               ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);
> > > > >
> > > > > I wonder if we need to do this in vhost-vDPA.
> > > >
> > > > I don't get why we need to do this in vhost-vDPA? Should this be done in VM?
> > >
> > > If I was not wrong, this tries to set affinity on the host instead of
> > > the guest. More below.
> > >
> >
> > Yes, it's host stuff. This is used by the virtio device driver to pass
> > the irq affinity hint (tell which irq vectors don't need affinity
> > management) to the irq affinity manager. In the VM case, it should
> > only be related to the guest's virtio device driver and pci irq
> > affinity manager. So I don't get why we need to do this in vhost-vDPA.
>
> It's not necessarily the VM, do we have the same requirement for
> userspace (like DPDK) drivers?
>

IIUC the vhost-vdpa's irq callback just signals the eventfd. I didn't
see how to use the irq affinity hint in vdpa device driver. The real
irq callback should be called in DPDK internally.

Thanks,
Yongji
  
Jason Wang Dec. 21, 2022, 3:20 a.m. UTC | #7
On Tue, Dec 20, 2022 at 6:14 PM Yongji Xie <xieyongji@bytedance.com> wrote:
>
> On Tue, Dec 20, 2022 at 2:31 PM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Mon, Dec 19, 2022 at 3:12 PM Yongji Xie <xieyongji@bytedance.com> wrote:
> > >
> > > On Mon, Dec 19, 2022 at 2:06 PM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Mon, Dec 19, 2022 at 12:39 PM Yongji Xie <xieyongji@bytedance.com> wrote:
> > > > >
> > > > > On Fri, Dec 16, 2022 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> > > > > >
> > > > > > On Mon, Dec 5, 2022 at 4:43 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> > > > > > >
> > > > > > > This introduces set_irq_affinity callback in
> > > > > > > vdpa_config_ops so that vdpa device driver can
> > > > > > > get the interrupt affinity hint from the virtio
> > > > > > > device driver. The interrupt affinity hint would
> > > > > > > be needed by the interrupt affinity spreading
> > > > > > > mechanism.
> > > > > > >
> > > > > > > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > > > > > > ---
> > > > > > >  drivers/virtio/virtio_vdpa.c | 4 ++++
> > > > > > >  include/linux/vdpa.h         | 8 ++++++++
> > > > > > >  2 files changed, 12 insertions(+)
> > > > > > >
> > > > > > > diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> > > > > > > index 08084b49e5a1..4731e4616ee0 100644
> > > > > > > --- a/drivers/virtio/virtio_vdpa.c
> > > > > > > +++ b/drivers/virtio/virtio_vdpa.c
> > > > > > > @@ -275,9 +275,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
> > > > > > >         struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
> > > > > > >         struct vdpa_device *vdpa = vd_get_vdpa(vdev);
> > > > > > >         const struct vdpa_config_ops *ops = vdpa->config;
> > > > > > > +       struct irq_affinity default_affd = { 0 };
> > > > > > >         struct vdpa_callback cb;
> > > > > > >         int i, err, queue_idx = 0;
> > > > > > >
> > > > > > > +       if (ops->set_irq_affinity)
> > > > > > > +               ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);
> > > > > >
> > > > > > I wonder if we need to do this in vhost-vDPA.
> > > > >
> > > > > I don't get why we need to do this in vhost-vDPA? Should this be done in VM?
> > > >
> > > > If I was not wrong, this tries to set affinity on the host instead of
> > > > the guest. More below.
> > > >
> > >
> > > Yes, it's host stuff. This is used by the virtio device driver to pass
> > > the irq affinity hint (tell which irq vectors don't need affinity
> > > management) to the irq affinity manager. In the VM case, it should
> > > only be related to the guest's virtio device driver and pci irq
> > > affinity manager. So I don't get why we need to do this in vhost-vDPA.
> >
> > It's not necessarily the VM, do we have the same requirement for
> > userspace (like DPDK) drivers?
> >
>
> IIUC the vhost-vdpa's irq callback just signals the eventfd. I didn't
> see how to use the irq affinity hint in vdpa device driver. The real
> irq callback should be called in DPDK internally.

I agree.

Thanks

>
> Thanks,
> Yongji
>
  

Patch

diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 08084b49e5a1..4731e4616ee0 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -275,9 +275,13 @@  static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
 	struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
 	struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 	const struct vdpa_config_ops *ops = vdpa->config;
+	struct irq_affinity default_affd = { 0 };
 	struct vdpa_callback cb;
 	int i, err, queue_idx = 0;
 
+	if (ops->set_irq_affinity)
+		ops->set_irq_affinity(vdpa, desc ? desc : &default_affd);
+
 	for (i = 0; i < nvqs; ++i) {
 		if (!names[i]) {
 			vqs[i] = NULL;
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 0ff6c9363356..482ff7d0206f 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -256,6 +256,12 @@  struct vdpa_map_file {
  *				@vdev: vdpa device
  *				@idx: virtqueue index
  *				Returns the irq affinity mask
+ * @set_irq_affinity:		Pass the irq affinity hint from the virtio
+ *				device driver to vdpa driver (optional).
+ *				Needed by the interrupt affinity spreading
+ *				mechanism.
+ *				@vdev: vdpa device
+ *				@desc: irq affinity hint
  * @set_group_asid:		Set address space identifier for a
  *				virtqueue group (optional)
  *				@vdev: vdpa device
@@ -344,6 +350,8 @@  struct vdpa_config_ops {
 			       const struct cpumask *cpu_mask);
 	const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev,
 						 u16 idx);
+	void (*set_irq_affinity)(struct vdpa_device *vdev,
+				 struct irq_affinity *desc);
 
 	/* DMA ops */
 	int (*set_map)(struct vdpa_device *vdev, unsigned int asid,