[v2,08/11] vduse: Add sysfs interface for irq callback affinity
Commit Message
Add sysfs interface for each vduse virtqueue to
show the affinity and effective affinity for irq
callback.
And we can also use this interface to change the
effective affinity which must be a subset of the
irq callback affinity mask for the virtqueue. This
might be useful for performance tuning when the irq
callback affinity mask contains more than one CPU.
Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
---
drivers/vdpa/vdpa_user/vduse_dev.c | 148 ++++++++++++++++++++++++++---
1 file changed, 137 insertions(+), 11 deletions(-)
Comments
On Mon, Dec 5, 2022 at 5:03 PM Xie Yongji <xieyongji@bytedance.com> wrote:
>
> Add sysfs interface for each vduse virtqueue to
> show the affinity and effective affinity for irq
> callback.
>
> And we can also use this interface to change the
> effective affinity which must be a subset of the
> irq callback affinity mask for the virtqueue. This
> might be useful for performance tuning when the irq
> callback affinity mask contains more than one CPU.
>
> Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> ---
> drivers/vdpa/vdpa_user/vduse_dev.c | 148 ++++++++++++++++++++++++++---
> 1 file changed, 137 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> index 6507a78abc9d..c65f84100e30 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -61,6 +61,7 @@ struct vduse_virtqueue {
> int irq_effective_cpu;
> struct cpumask irq_affinity;
> spinlock_t irq_affinity_lock;
> + struct kobject kobj;
> };
>
> struct vduse_dev;
> @@ -1419,6 +1420,120 @@ static const struct file_operations vduse_dev_fops = {
> .llseek = noop_llseek,
> };
>
> +static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf)
> +{
> + return sprintf(buf, "%*pb\n", cpumask_pr_args(&vq->irq_affinity));
> +}
> +
> +static ssize_t irq_cb_effective_affinity_show(struct vduse_virtqueue *vq,
> + char *buf)
> +{
> + struct cpumask all_mask = CPU_MASK_ALL;
> + const struct cpumask *mask = &all_mask;
> +
> + if (vq->irq_effective_cpu != -1)
> + mask = get_cpu_mask(vq->irq_effective_cpu);
Shouldn't this be vq->irq_affinity?
> +
> + return sprintf(buf, "%*pb\n", cpumask_pr_args(mask));
> +}
> +
> +static ssize_t irq_cb_effective_affinity_store(struct vduse_virtqueue *vq,
> + const char *buf, size_t count)
> +{
> + cpumask_var_t new_value;
> + int ret;
> +
> + if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
> + return -ENOMEM;
> +
> + ret = cpumask_parse(buf, new_value);
> + if (ret)
> + goto free_mask;
> +
> + ret = -EINVAL;
> + if (!cpumask_intersects(new_value, &vq->irq_affinity))
> + goto free_mask;
> +
> + spin_lock(&vq->irq_affinity_lock);
> +
> + if (vq->irq_effective_cpu != -1)
> + per_cpu(vduse_allocated_irq, vq->irq_effective_cpu) -= 1;
> +
> + vq->irq_effective_cpu = cpumask_first(new_value);
Does this mean except for the first cpu, the rest of the mask is unused?
Thanks
> + per_cpu(vduse_allocated_irq, vq->irq_effective_cpu) += 1;
> +
> + spin_unlock(&vq->irq_affinity_lock);
> + ret = count;
> +
> +free_mask:
> + free_cpumask_var(new_value);
> + return ret;
> +}
> +
> +struct vq_sysfs_entry {
> + struct attribute attr;
> + ssize_t (*show)(struct vduse_virtqueue *vq, char *buf);
> + ssize_t (*store)(struct vduse_virtqueue *vq, const char *buf,
> + size_t count);
> +};
> +
> +static struct vq_sysfs_entry irq_cb_affinity_attr = __ATTR_RO(irq_cb_affinity);
> +static struct vq_sysfs_entry irq_cb_effective_affinity_attr =
> + __ATTR_RW(irq_cb_effective_affinity);
> +
> +static struct attribute *vq_attrs[] = {
> + &irq_cb_affinity_attr.attr,
> + &irq_cb_effective_affinity_attr.attr,
> + NULL,
> +};
> +ATTRIBUTE_GROUPS(vq);
> +
> +static ssize_t vq_attr_show(struct kobject *kobj, struct attribute *attr,
> + char *buf)
> +{
> + struct vduse_virtqueue *vq = container_of(kobj,
> + struct vduse_virtqueue, kobj);
> + struct vq_sysfs_entry *entry = container_of(attr,
> + struct vq_sysfs_entry, attr);
> +
> + if (!entry->show)
> + return -EIO;
> +
> + return entry->show(vq, buf);
> +}
> +
> +static ssize_t vq_attr_store(struct kobject *kobj, struct attribute *attr,
> + const char *buf, size_t count)
> +{
> + struct vduse_virtqueue *vq = container_of(kobj,
> + struct vduse_virtqueue, kobj);
> + struct vq_sysfs_entry *entry = container_of(attr,
> + struct vq_sysfs_entry, attr);
> +
> + if (!entry->store)
> + return -EIO;
> +
> + return entry->store(vq, buf, count);
> +}
> +
> +static const struct sysfs_ops vq_sysfs_ops = {
> + .show = vq_attr_show,
> + .store = vq_attr_store,
> +};
> +
> +static void vq_release(struct kobject *kobj)
> +{
> + struct vduse_virtqueue *vq = container_of(kobj,
> + struct vduse_virtqueue, kobj);
> + kfree(vq);
> +}
> +
> +static struct kobj_type vq_type = {
> + .release = vq_release,
> + .sysfs_ops = &vq_sysfs_ops,
> + .default_groups = vq_groups,
> +};
> +
> static void vduse_dev_deinit_vqs(struct vduse_dev *dev)
> {
> int i;
> @@ -1427,13 +1542,13 @@ static void vduse_dev_deinit_vqs(struct vduse_dev *dev)
> return;
>
> for (i = 0; i < dev->vq_num; i++)
> - kfree(dev->vqs[i]);
> + kobject_put(&dev->vqs[i]->kobj);
> kfree(dev->vqs);
> }
>
> static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
> {
> - int i;
> + int ret, i;
>
> dev->vq_align = vq_align;
> dev->vq_num = vq_num;
> @@ -1443,8 +1558,10 @@ static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
>
> for (i = 0; i < vq_num; i++) {
> dev->vqs[i] = kzalloc(sizeof(*dev->vqs[i]), GFP_KERNEL);
> - if (!dev->vqs[i])
> + if (!dev->vqs[i]) {
> + ret = -ENOMEM;
> goto err;
> + }
>
> dev->vqs[i]->index = i;
> dev->vqs[i]->irq_effective_cpu = -1;
> @@ -1454,15 +1571,23 @@ static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
> spin_lock_init(&dev->vqs[i]->irq_lock);
> spin_lock_init(&dev->vqs[i]->irq_affinity_lock);
> cpumask_setall(&dev->vqs[i]->irq_affinity);
> +
> + kobject_init(&dev->vqs[i]->kobj, &vq_type);
> + ret = kobject_add(&dev->vqs[i]->kobj,
> + &dev->dev->kobj, "vq%d", i);
> + if (ret) {
> + kfree(dev->vqs[i]);
> + goto err;
> + }
> }
>
> return 0;
> err:
> while (i--)
> - kfree(dev->vqs[i]);
> + kobject_put(&dev->vqs[i]->kobj);
> kfree(dev->vqs);
> dev->vqs = NULL;
> - return -ENOMEM;
> + return ret;
> }
>
> static struct vduse_dev *vduse_dev_create(void)
> @@ -1637,10 +1762,6 @@ static int vduse_create_dev(struct vduse_dev_config *config,
> dev->config = config_buf;
> dev->config_size = config->config_size;
>
> - ret = vduse_dev_init_vqs(dev, config->vq_align, config->vq_num);
> - if (ret)
> - goto err_vqs;
> -
> ret = idr_alloc(&vduse_idr, dev, 1, VDUSE_DEV_MAX, GFP_KERNEL);
> if (ret < 0)
> goto err_idr;
> @@ -1654,14 +1775,19 @@ static int vduse_create_dev(struct vduse_dev_config *config,
> ret = PTR_ERR(dev->dev);
> goto err_dev;
> }
> +
> + ret = vduse_dev_init_vqs(dev, config->vq_align, config->vq_num);
> + if (ret)
> + goto err_vqs;
> +
> __module_get(THIS_MODULE);
>
> return 0;
> +err_vqs:
> + device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
> err_dev:
> idr_remove(&vduse_idr, dev->minor);
> err_idr:
> - vduse_dev_deinit_vqs(dev);
> -err_vqs:
> vduse_domain_destroy(dev->domain);
> err_domain:
> kfree(dev->name);
> --
> 2.20.1
>
On Fri, Dec 16, 2022 at 1:35 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Dec 5, 2022 at 5:03 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> >
> > Add sysfs interface for each vduse virtqueue to
> > show the affinity and effective affinity for irq
> > callback.
> >
> > And we can also use this interface to change the
> > effective affinity which must be a subset of the
> > irq callback affinity mask for the virtqueue. This
> > might be useful for performance tuning when the irq
> > callback affinity mask contains more than one CPU.
> >
> > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > ---
> > drivers/vdpa/vdpa_user/vduse_dev.c | 148 ++++++++++++++++++++++++++---
> > 1 file changed, 137 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > index 6507a78abc9d..c65f84100e30 100644
> > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > @@ -61,6 +61,7 @@ struct vduse_virtqueue {
> > int irq_effective_cpu;
> > struct cpumask irq_affinity;
> > spinlock_t irq_affinity_lock;
> > + struct kobject kobj;
> > };
> >
> > struct vduse_dev;
> > @@ -1419,6 +1420,120 @@ static const struct file_operations vduse_dev_fops = {
> > .llseek = noop_llseek,
> > };
> >
> > +static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf)
> > +{
> > + return sprintf(buf, "%*pb\n", cpumask_pr_args(&vq->irq_affinity));
> > +}
> > +
> > +static ssize_t irq_cb_effective_affinity_show(struct vduse_virtqueue *vq,
> > + char *buf)
> > +{
> > + struct cpumask all_mask = CPU_MASK_ALL;
> > + const struct cpumask *mask = &all_mask;
> > +
> > + if (vq->irq_effective_cpu != -1)
> > + mask = get_cpu_mask(vq->irq_effective_cpu);
>
> Shouldn't this be vq->irq_affinity?
>
This sysfs interface is provided for effective irq affinity rather
than irq affinity. We created another read-only sysfs interface for
irq affinity.
> > +
> > + return sprintf(buf, "%*pb\n", cpumask_pr_args(mask));
> > +}
> > +
> > +static ssize_t irq_cb_effective_affinity_store(struct vduse_virtqueue *vq,
> > + const char *buf, size_t count)
> > +{
> > + cpumask_var_t new_value;
> > + int ret;
> > +
> > + if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
> > + return -ENOMEM;
> > +
> > + ret = cpumask_parse(buf, new_value);
> > + if (ret)
> > + goto free_mask;
> > +
> > + ret = -EINVAL;
> > + if (!cpumask_intersects(new_value, &vq->irq_affinity))
> > + goto free_mask;
> > +
> > + spin_lock(&vq->irq_affinity_lock);
> > +
> > + if (vq->irq_effective_cpu != -1)
> > + per_cpu(vduse_allocated_irq, vq->irq_effective_cpu) -= 1;
> > +
> > + vq->irq_effective_cpu = cpumask_first(new_value);
>
> Does this mean except for the first cpu, the rest of the mask is unused?
>
Yes, but the user should always specify a mask that only contains one
CPU to make it work as expected. This sysfs interface is used to
specify the effective irq affinity rather than irq affinity.
Thanks,
Yongji
On Mon, Dec 19, 2022 at 1:16 PM Yongji Xie <xieyongji@bytedance.com> wrote:
>
> On Fri, Dec 16, 2022 at 1:35 PM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Mon, Dec 5, 2022 at 5:03 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> > >
> > > Add sysfs interface for each vduse virtqueue to
> > > show the affinity and effective affinity for irq
> > > callback.
> > >
> > > And we can also use this interface to change the
> > > effective affinity which must be a subset of the
> > > irq callback affinity mask for the virtqueue. This
> > > might be useful for performance tuning when the irq
> > > callback affinity mask contains more than one CPU.
> > >
> > > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > > ---
> > > drivers/vdpa/vdpa_user/vduse_dev.c | 148 ++++++++++++++++++++++++++---
> > > 1 file changed, 137 insertions(+), 11 deletions(-)
> > >
> > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > index 6507a78abc9d..c65f84100e30 100644
> > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > @@ -61,6 +61,7 @@ struct vduse_virtqueue {
> > > int irq_effective_cpu;
> > > struct cpumask irq_affinity;
> > > spinlock_t irq_affinity_lock;
> > > + struct kobject kobj;
> > > };
> > >
> > > struct vduse_dev;
> > > @@ -1419,6 +1420,120 @@ static const struct file_operations vduse_dev_fops = {
> > > .llseek = noop_llseek,
> > > };
> > >
> > > +static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf)
> > > +{
> > > + return sprintf(buf, "%*pb\n", cpumask_pr_args(&vq->irq_affinity));
> > > +}
> > > +
> > > +static ssize_t irq_cb_effective_affinity_show(struct vduse_virtqueue *vq,
> > > + char *buf)
> > > +{
> > > + struct cpumask all_mask = CPU_MASK_ALL;
> > > + const struct cpumask *mask = &all_mask;
> > > +
> > > + if (vq->irq_effective_cpu != -1)
> > > + mask = get_cpu_mask(vq->irq_effective_cpu);
> >
> > Shouldn't this be vq->irq_affinity?
> >
>
> This sysfs interface is provided for effective irq affinity rather
> than irq affinity. We created another read-only sysfs interface for
> irq affinity.
>
> > > +
> > > + return sprintf(buf, "%*pb\n", cpumask_pr_args(mask));
> > > +}
> > > +
> > > +static ssize_t irq_cb_effective_affinity_store(struct vduse_virtqueue *vq,
> > > + const char *buf, size_t count)
> > > +{
> > > + cpumask_var_t new_value;
> > > + int ret;
> > > +
> > > + if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
> > > + return -ENOMEM;
> > > +
> > > + ret = cpumask_parse(buf, new_value);
> > > + if (ret)
> > > + goto free_mask;
> > > +
> > > + ret = -EINVAL;
> > > + if (!cpumask_intersects(new_value, &vq->irq_affinity))
> > > + goto free_mask;
> > > +
> > > + spin_lock(&vq->irq_affinity_lock);
> > > +
> > > + if (vq->irq_effective_cpu != -1)
> > > + per_cpu(vduse_allocated_irq, vq->irq_effective_cpu) -= 1;
> > > +
> > > + vq->irq_effective_cpu = cpumask_first(new_value);
> >
> > Does this mean except for the first cpu, the rest of the mask is unused?
> >
>
> Yes, but the user should always specify a mask that only contains one
> CPU to make it work as expected.
This doesn't seem to be the way that the IRQ affinity{hint} exported
via /sys work. Any reason for doing this?
(E.g we may have the require to limit the IRQ/callback to a NUMA node
instead of a specific cpu)
Thanks
> This sysfs interface is used to
> specify the effective irq affinity rather than irq affinity.
>
> Thanks,
> Yongji
>
On Tue, Dec 20, 2022 at 2:29 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Dec 19, 2022 at 1:16 PM Yongji Xie <xieyongji@bytedance.com> wrote:
> >
> > On Fri, Dec 16, 2022 at 1:35 PM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Mon, Dec 5, 2022 at 5:03 PM Xie Yongji <xieyongji@bytedance.com> wrote:
> > > >
> > > > Add sysfs interface for each vduse virtqueue to
> > > > show the affinity and effective affinity for irq
> > > > callback.
> > > >
> > > > And we can also use this interface to change the
> > > > effective affinity which must be a subset of the
> > > > irq callback affinity mask for the virtqueue. This
> > > > might be useful for performance tuning when the irq
> > > > callback affinity mask contains more than one CPU.
> > > >
> > > > Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
> > > > ---
> > > > drivers/vdpa/vdpa_user/vduse_dev.c | 148 ++++++++++++++++++++++++++---
> > > > 1 file changed, 137 insertions(+), 11 deletions(-)
> > > >
> > > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > index 6507a78abc9d..c65f84100e30 100644
> > > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > @@ -61,6 +61,7 @@ struct vduse_virtqueue {
> > > > int irq_effective_cpu;
> > > > struct cpumask irq_affinity;
> > > > spinlock_t irq_affinity_lock;
> > > > + struct kobject kobj;
> > > > };
> > > >
> > > > struct vduse_dev;
> > > > @@ -1419,6 +1420,120 @@ static const struct file_operations vduse_dev_fops = {
> > > > .llseek = noop_llseek,
> > > > };
> > > >
> > > > +static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf)
> > > > +{
> > > > + return sprintf(buf, "%*pb\n", cpumask_pr_args(&vq->irq_affinity));
> > > > +}
> > > > +
> > > > +static ssize_t irq_cb_effective_affinity_show(struct vduse_virtqueue *vq,
> > > > + char *buf)
> > > > +{
> > > > + struct cpumask all_mask = CPU_MASK_ALL;
> > > > + const struct cpumask *mask = &all_mask;
> > > > +
> > > > + if (vq->irq_effective_cpu != -1)
> > > > + mask = get_cpu_mask(vq->irq_effective_cpu);
> > >
> > > Shouldn't this be vq->irq_affinity?
> > >
> >
> > This sysfs interface is provided for effective irq affinity rather
> > than irq affinity. We created another read-only sysfs interface for
> > irq affinity.
> >
> > > > +
> > > > + return sprintf(buf, "%*pb\n", cpumask_pr_args(mask));
> > > > +}
> > > > +
> > > > +static ssize_t irq_cb_effective_affinity_store(struct vduse_virtqueue *vq,
> > > > + const char *buf, size_t count)
> > > > +{
> > > > + cpumask_var_t new_value;
> > > > + int ret;
> > > > +
> > > > + if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
> > > > + return -ENOMEM;
> > > > +
> > > > + ret = cpumask_parse(buf, new_value);
> > > > + if (ret)
> > > > + goto free_mask;
> > > > +
> > > > + ret = -EINVAL;
> > > > + if (!cpumask_intersects(new_value, &vq->irq_affinity))
> > > > + goto free_mask;
> > > > +
> > > > + spin_lock(&vq->irq_affinity_lock);
> > > > +
> > > > + if (vq->irq_effective_cpu != -1)
> > > > + per_cpu(vduse_allocated_irq, vq->irq_effective_cpu) -= 1;
> > > > +
> > > > + vq->irq_effective_cpu = cpumask_first(new_value);
> > >
> > > Does this mean except for the first cpu, the rest of the mask is unused?
> > >
> >
> > Yes, but the user should always specify a mask that only contains one
> > CPU to make it work as expected.
>
> This doesn't seem to be the way that the IRQ affinity{hint} exported
> via /sys work. Any reason for doing this?
>
> (E.g we may have the require to limit the IRQ/callback to a NUMA node
> instead of a specific cpu)
>
Yes, I think we need to make the sysfs interface for irq affinity
writable. The effective irq affinity can be removed now since we
choose using round-robin to spread IRQs between CPUs.
Thanks,
Yongji
@@ -61,6 +61,7 @@ struct vduse_virtqueue {
int irq_effective_cpu;
struct cpumask irq_affinity;
spinlock_t irq_affinity_lock;
+ struct kobject kobj;
};
struct vduse_dev;
@@ -1419,6 +1420,120 @@ static const struct file_operations vduse_dev_fops = {
.llseek = noop_llseek,
};
+static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf)
+{
+ return sprintf(buf, "%*pb\n", cpumask_pr_args(&vq->irq_affinity));
+}
+
+static ssize_t irq_cb_effective_affinity_show(struct vduse_virtqueue *vq,
+ char *buf)
+{
+ struct cpumask all_mask = CPU_MASK_ALL;
+ const struct cpumask *mask = &all_mask;
+
+ if (vq->irq_effective_cpu != -1)
+ mask = get_cpu_mask(vq->irq_effective_cpu);
+
+ return sprintf(buf, "%*pb\n", cpumask_pr_args(mask));
+}
+
+static ssize_t irq_cb_effective_affinity_store(struct vduse_virtqueue *vq,
+ const char *buf, size_t count)
+{
+ cpumask_var_t new_value;
+ int ret;
+
+ if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = cpumask_parse(buf, new_value);
+ if (ret)
+ goto free_mask;
+
+ ret = -EINVAL;
+ if (!cpumask_intersects(new_value, &vq->irq_affinity))
+ goto free_mask;
+
+ spin_lock(&vq->irq_affinity_lock);
+
+ if (vq->irq_effective_cpu != -1)
+ per_cpu(vduse_allocated_irq, vq->irq_effective_cpu) -= 1;
+
+ vq->irq_effective_cpu = cpumask_first(new_value);
+ per_cpu(vduse_allocated_irq, vq->irq_effective_cpu) += 1;
+
+ spin_unlock(&vq->irq_affinity_lock);
+ ret = count;
+
+free_mask:
+ free_cpumask_var(new_value);
+ return ret;
+}
+
+struct vq_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct vduse_virtqueue *vq, char *buf);
+ ssize_t (*store)(struct vduse_virtqueue *vq, const char *buf,
+ size_t count);
+};
+
+static struct vq_sysfs_entry irq_cb_affinity_attr = __ATTR_RO(irq_cb_affinity);
+static struct vq_sysfs_entry irq_cb_effective_affinity_attr =
+ __ATTR_RW(irq_cb_effective_affinity);
+
+static struct attribute *vq_attrs[] = {
+ &irq_cb_affinity_attr.attr,
+ &irq_cb_effective_affinity_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vq);
+
+static ssize_t vq_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct vduse_virtqueue *vq = container_of(kobj,
+ struct vduse_virtqueue, kobj);
+ struct vq_sysfs_entry *entry = container_of(attr,
+ struct vq_sysfs_entry, attr);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(vq, buf);
+}
+
+static ssize_t vq_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vduse_virtqueue *vq = container_of(kobj,
+ struct vduse_virtqueue, kobj);
+ struct vq_sysfs_entry *entry = container_of(attr,
+ struct vq_sysfs_entry, attr);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(vq, buf, count);
+}
+
+static const struct sysfs_ops vq_sysfs_ops = {
+ .show = vq_attr_show,
+ .store = vq_attr_store,
+};
+
+static void vq_release(struct kobject *kobj)
+{
+ struct vduse_virtqueue *vq = container_of(kobj,
+ struct vduse_virtqueue, kobj);
+ kfree(vq);
+}
+
+static struct kobj_type vq_type = {
+ .release = vq_release,
+ .sysfs_ops = &vq_sysfs_ops,
+ .default_groups = vq_groups,
+};
+
static void vduse_dev_deinit_vqs(struct vduse_dev *dev)
{
int i;
@@ -1427,13 +1542,13 @@ static void vduse_dev_deinit_vqs(struct vduse_dev *dev)
return;
for (i = 0; i < dev->vq_num; i++)
- kfree(dev->vqs[i]);
+ kobject_put(&dev->vqs[i]->kobj);
kfree(dev->vqs);
}
static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
{
- int i;
+ int ret, i;
dev->vq_align = vq_align;
dev->vq_num = vq_num;
@@ -1443,8 +1558,10 @@ static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
for (i = 0; i < vq_num; i++) {
dev->vqs[i] = kzalloc(sizeof(*dev->vqs[i]), GFP_KERNEL);
- if (!dev->vqs[i])
+ if (!dev->vqs[i]) {
+ ret = -ENOMEM;
goto err;
+ }
dev->vqs[i]->index = i;
dev->vqs[i]->irq_effective_cpu = -1;
@@ -1454,15 +1571,23 @@ static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
spin_lock_init(&dev->vqs[i]->irq_lock);
spin_lock_init(&dev->vqs[i]->irq_affinity_lock);
cpumask_setall(&dev->vqs[i]->irq_affinity);
+
+ kobject_init(&dev->vqs[i]->kobj, &vq_type);
+ ret = kobject_add(&dev->vqs[i]->kobj,
+ &dev->dev->kobj, "vq%d", i);
+ if (ret) {
+ kfree(dev->vqs[i]);
+ goto err;
+ }
}
return 0;
err:
while (i--)
- kfree(dev->vqs[i]);
+ kobject_put(&dev->vqs[i]->kobj);
kfree(dev->vqs);
dev->vqs = NULL;
- return -ENOMEM;
+ return ret;
}
static struct vduse_dev *vduse_dev_create(void)
@@ -1637,10 +1762,6 @@ static int vduse_create_dev(struct vduse_dev_config *config,
dev->config = config_buf;
dev->config_size = config->config_size;
- ret = vduse_dev_init_vqs(dev, config->vq_align, config->vq_num);
- if (ret)
- goto err_vqs;
-
ret = idr_alloc(&vduse_idr, dev, 1, VDUSE_DEV_MAX, GFP_KERNEL);
if (ret < 0)
goto err_idr;
@@ -1654,14 +1775,19 @@ static int vduse_create_dev(struct vduse_dev_config *config,
ret = PTR_ERR(dev->dev);
goto err_dev;
}
+
+ ret = vduse_dev_init_vqs(dev, config->vq_align, config->vq_num);
+ if (ret)
+ goto err_vqs;
+
__module_get(THIS_MODULE);
return 0;
+err_vqs:
+ device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
err_dev:
idr_remove(&vduse_idr, dev->minor);
err_idr:
- vduse_dev_deinit_vqs(dev);
-err_vqs:
vduse_domain_destroy(dev->domain);
err_domain:
kfree(dev->name);