[1/2] virtio: abstract virtqueue related methods

Message ID 20230512094618.433707-2-pizhenwei@bytedance.com
State New
Headers
Series virtio: abstract virtqueue related methods |

Commit Message

zhenwei pi May 12, 2023, 9:46 a.m. UTC
  There is already a virtqueue abstract structure in virtio subsystem
(see struct virtqueue in include/linux/virtio.h), however the vring
based virtqueue is used only in the past years, the virtqueue related
methods mix much with vring(just look like the codes, virtqueue_xxx
functions are implemented in virtio_ring.c).

Abstract virtqueue related methods(see struct virtqueue_ops), and
separate virtqueue_xxx symbols from vring. This allows a non-vring
based transport in the future. With this change, the following symbols
are exported from virtio.ko instead of virtio_ring.ko:
  virtqueue_add_sgs
  virtqueue_add_outbuf
  virtqueue_add_inbuf
  virtqueue_add_inbuf_ctx
  virtqueue_kick_prepare
  virtqueue_notify
  virtqueue_kick
  virtqueue_enable_cb_prepare
  virtqueue_enable_cb
  virtqueue_enable_cb_delayed
  virtqueue_disable_cb
  virtqueue_poll
  virtqueue_get_buf_ctx
  virtqueue_get_buf
  virtqueue_detach_unused_buf
  virtqueue_get_vring_size
  virtqueue_resize
  virtqueue_is_broken
  virtio_break_device
  __virtio_unbreak_device

Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
---
 drivers/virtio/virtio.c      | 362 +++++++++++++++++++++++++++++++++++
 drivers/virtio/virtio_ring.c | 282 +++++----------------------
 include/linux/virtio.h       |  29 +++
 3 files changed, 443 insertions(+), 230 deletions(-)
  

Comments

Michael S. Tsirkin May 12, 2023, 10:46 a.m. UTC | #1
On Fri, May 12, 2023 at 05:46:17PM +0800, zhenwei pi wrote:
> There is already a virtqueue abstract structure in virtio subsystem
> (see struct virtqueue in include/linux/virtio.h), however the vring
> based virtqueue is used only in the past years, the virtqueue related
> methods mix much with vring(just look like the codes, virtqueue_xxx
> functions are implemented in virtio_ring.c).
> 
> Abstract virtqueue related methods(see struct virtqueue_ops), and
> separate virtqueue_xxx symbols from vring. This allows a non-vring
> based transport in the future. With this change, the following symbols
> are exported from virtio.ko instead of virtio_ring.ko:
>   virtqueue_add_sgs
>   virtqueue_add_outbuf
>   virtqueue_add_inbuf
>   virtqueue_add_inbuf_ctx
>   virtqueue_kick_prepare
>   virtqueue_notify
>   virtqueue_kick
>   virtqueue_enable_cb_prepare
>   virtqueue_enable_cb
>   virtqueue_enable_cb_delayed
>   virtqueue_disable_cb
>   virtqueue_poll
>   virtqueue_get_buf_ctx
>   virtqueue_get_buf
>   virtqueue_detach_unused_buf
>   virtqueue_get_vring_size
>   virtqueue_resize
>   virtqueue_is_broken
>   virtio_break_device
>   __virtio_unbreak_device
> 
> Cc: Stefan Hajnoczi <stefanha@redhat.com>
> Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
> ---
>  drivers/virtio/virtio.c      | 362 +++++++++++++++++++++++++++++++++++
>  drivers/virtio/virtio_ring.c | 282 +++++----------------------
>  include/linux/virtio.h       |  29 +++
>  3 files changed, 443 insertions(+), 230 deletions(-)
> 
> diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
> index 3893dc29eb26..8d8715a10f26 100644
> --- a/drivers/virtio/virtio.c
> +++ b/drivers/virtio/virtio.c
> @@ -553,6 +553,368 @@ int virtio_device_restore(struct virtio_device *dev)
>  EXPORT_SYMBOL_GPL(virtio_device_restore);
>  #endif
>  
> +/**
> + * virtqueue_add_sgs - expose buffers to other end
> + * @vq: the struct virtqueue we're talking about.
> + * @sgs: array of terminated scatterlists.
> + * @out_sgs: the number of scatterlists readable by other side
> + * @in_sgs: the number of scatterlists which are writable (after readable ones)
> + * @data: the token identifying the buffer.
> + * @gfp: how to do memory allocations (if necessary).
> + *
> + * Caller must ensure we don't call this with other virtqueue operations
> + * at the same time (except where noted).
> + *
> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> + */
> +int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[],
> +		      unsigned int out_sgs, unsigned int in_sgs,
> +		      void *data, gfp_t gfp)
> +{
> +	unsigned int i, total_sg = 0;
> +
> +	/* Count them first. */
> +	for (i = 0; i < out_sgs + in_sgs; i++) {
> +		struct scatterlist *sg;
> +
> +		for (sg = sgs[i]; sg; sg = sg_next(sg))
> +			total_sg++;
> +	}
> +	return vq->ops->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
> +				data, NULL, gfp);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_add_sgs);


Hmm this kind of indirection on data path is going to be costly
performance-wise especially when retpolines are in place.

Any data on this?

> +
> +/**
> + * virtqueue_add_outbuf - expose output buffers to other end
> + * @vq: the struct virtqueue we're talking about.
> + * @sg: scatterlist (must be well-formed and terminated!)
> + * @num: the number of entries in @sg readable by other side
> + * @data: the token identifying the buffer.
> + * @gfp: how to do memory allocations (if necessary).
> + *
> + * Caller must ensure we don't call this with other virtqueue operations
> + * at the same time (except where noted).
> + *
> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> + */
> +int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg,
> +			 unsigned int num, void *data, gfp_t gfp)
> +{
> +	return vq->ops->add_sgs(vq, &sg, num, 1, 0, data, NULL, gfp);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
> +
> +/**
> + * virtqueue_add_inbuf - expose input buffers to other end
> + * @vq: the struct virtqueue we're talking about.
> + * @sg: scatterlist (must be well-formed and terminated!)
> + * @num: the number of entries in @sg writable by other side
> + * @data: the token identifying the buffer.
> + * @gfp: how to do memory allocations (if necessary).
> + *
> + * Caller must ensure we don't call this with other virtqueue operations
> + * at the same time (except where noted).
> + *
> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> + */
> +int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist *sg,
> +			unsigned int num, void *data, gfp_t gfp)
> +{
> +	return vq->ops->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
> +
> +/**
> + * virtqueue_add_inbuf_ctx - expose input buffers to other end
> + * @vq: the struct virtqueue we're talking about.
> + * @sg: scatterlist (must be well-formed and terminated!)
> + * @num: the number of entries in @sg writable by other side
> + * @data: the token identifying the buffer.
> + * @ctx: extra context for the token
> + * @gfp: how to do memory allocations (if necessary).
> + *
> + * Caller must ensure we don't call this with other virtqueue operations
> + * at the same time (except where noted).
> + *
> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> + */
> +int virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg,
> +			    unsigned int num, void *data, void *ctx, gfp_t gfp)
> +{
> +	return vq->ops->add_sgs(vq, &sg, num, 0, 1, data, ctx, gfp);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
> +
> +/**
> + * virtqueue_kick_prepare - first half of split virtqueue_kick call.
> + * @vq: the struct virtqueue
> + *
> + * Instead of virtqueue_kick(), you can do:
> + *	if (virtqueue_kick_prepare(vq))
> + *		virtqueue_notify(vq);
> + *
> + * This is sometimes useful because the virtqueue_kick_prepare() needs
> + * to be serialized, but the actual virtqueue_notify() call does not.
> + */
> +bool virtqueue_kick_prepare(struct virtqueue *vq)
> +{
> +	return vq->ops->kick_prepare(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
> +
> +/**
> + * virtqueue_notify - second half of split virtqueue_kick call.
> + * @vq: the struct virtqueue
> + *
> + * This does not need to be serialized.
> + *
> + * Returns false if host notify failed or queue is broken, otherwise true.
> + */
> +bool virtqueue_notify(struct virtqueue *vq)
> +{
> +	return vq->ops->notify(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_notify);
> +
> +/**
> + * virtqueue_kick - update after add_buf
> + * @vq: the struct virtqueue
> + *
> + * After one or more virtqueue_add_* calls, invoke this to kick
> + * the other side.
> + *
> + * Caller must ensure we don't call this with other virtqueue
> + * operations at the same time (except where noted).
> + *
> + * Returns false if kick failed, otherwise true.
> + */
> +bool virtqueue_kick(struct virtqueue *vq)
> +{
> +	if (virtqueue_kick_prepare(vq))
> +		return virtqueue_notify(vq);
> +	return true;
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_kick);
> +
> +/**
> + * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
> + * @vq: the struct virtqueue we're talking about.
> + *
> + * This re-enables callbacks; it returns current queue state
> + * in an opaque unsigned value. This value should be later tested by
> + * virtqueue_poll, to detect a possible race between the driver checking for
> + * more work, and enabling callbacks.
> + *
> + * Caller must ensure we don't call this with other virtqueue
> + * operations at the same time (except where noted).
> + */
> +unsigned int virtqueue_enable_cb_prepare(struct virtqueue *vq)
> +{
> +	return vq->ops->enable_cb_prepare(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
> +
> +/**
> + * virtqueue_enable_cb - restart callbacks after disable_cb.
> + * @vq: the struct virtqueue we're talking about.
> + *
> + * This re-enables callbacks; it returns "false" if there are pending
> + * buffers in the queue, to detect a possible race between the driver
> + * checking for more work, and enabling callbacks.
> + *
> + * Caller must ensure we don't call this with other virtqueue
> + * operations at the same time (except where noted).
> + */
> +bool virtqueue_enable_cb(struct virtqueue *vq)
> +{
> +	unsigned int val = vq->ops->enable_cb_prepare(vq);
> +
> +	return !vq->ops->poll(vq, val);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
> +
> +/**
> + * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
> + * @vq: the struct virtqueue we're talking about.
> + *
> + * This re-enables callbacks but hints to the other side to delay
> + * interrupts until most of the available buffers have been processed;
> + * it returns "false" if there are many pending buffers in the queue,
> + * to detect a possible race between the driver checking for more work,
> + * and enabling callbacks.
> + *
> + * Caller must ensure we don't call this with other virtqueue
> + * operations at the same time (except where noted).
> + */
> +bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
> +{
> +	return vq->ops->enable_cb_delayed(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
> +
> +/**
> + * virtqueue_disable_cb - disable callbacks
> + * @vq: the struct virtqueue we're talking about.
> + *
> + * Note that this is not necessarily synchronous, hence unreliable and only
> + * useful as an optimization.
> + *
> + * Unlike other operations, this need not be serialized.
> + */
> +void virtqueue_disable_cb(struct virtqueue *vq)
> +{
> +	vq->ops->disable_cb(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
> +
> +/**
> + * virtqueue_poll - query pending used buffers
> + * @vq: the struct virtqueue we're talking about.
> + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
> + *
> + * Returns "true" if there are pending used buffers in the queue.
> + *
> + * This does not need to be serialized.
> + */
> +bool virtqueue_poll(struct virtqueue *vq, unsigned int idx)
> +{
> +	return vq->ops->poll(vq, idx);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_poll);
> +
> +/**
> + * virtqueue_get_buf_ctx - get the next used buffer
> + * @vq: the struct virtqueue we're talking about.
> + * @len: the length written into the buffer
> + * @ctx: extra context for the token
> + *
> + * If the device wrote data into the buffer, @len will be set to the
> + * amount written.  This means you don't need to clear the buffer
> + * beforehand to ensure there's no data leakage in the case of short
> + * writes.
> + *
> + * Caller must ensure we don't call this with other virtqueue
> + * operations at the same time (except where noted).
> + *
> + * Returns NULL if there are no used buffers, or the "data" token
> + * handed to virtqueue_add_*().
> + */
> +void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
> +			    void **ctx)
> +{
> +	return vq->ops->get_buf_ctx(vq, len, ctx);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
> +
> +void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
> +{
> +	return vq->ops->get_buf_ctx(vq, len, NULL);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_get_buf);
> +
> +/**
> + * virtqueue_detach_unused_buf - detach first unused buffer
> + * @vq: the struct virtqueue we're talking about.
> + *
> + * Returns NULL or the "data" token handed to virtqueue_add_*().
> + * This is not valid on an active queue; it is useful for device
> + * shutdown or the reset queue.
> + */
> +void *virtqueue_detach_unused_buf(struct virtqueue *vq)
> +{
> +	return vq->ops->detach_unused_buf(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
> +
> +/**
> + * virtqueue_get_vring_size - return the size of the virtqueue's vring
> + * @vq: the struct virtqueue containing the vring of interest.
> + *
> + * Returns the size of the vring.  This is mainly used for boasting to
> + * userspace.  Unlike other operations, this need not be serialized.
> + */
> +unsigned int virtqueue_get_vring_size(const struct virtqueue *vq)
> +{
> +	return vq->ops->get_vring_size(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
> +
> +/**
> + * virtqueue_resize - resize the vring of vq
> + * @vq: the struct virtqueue we're talking about.
> + * @num: new ring num
> + * @recycle: callback for recycle the useless buffer
> + *
> + * When it is really necessary to create a new vring, it will set the current vq
> + * into the reset state. Then call the passed callback to recycle the buffer
> + * that is no longer used. Only after the new vring is successfully created, the
> + * old vring will be released.
> + *
> + * Caller must ensure we don't call this with other virtqueue operations
> + * at the same time (except where noted).
> + *
> + * Returns zero or a negative error.
> + * 0: success.
> + * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
> + *  vq can still work normally
> + * -EBUSY: Failed to sync with device, vq may not work properly
> + * -ENOENT: Transport or device not supported
> + * -E2BIG/-EINVAL: num error
> + * -EPERM: Operation not permitted
> + *
> + */
> +int virtqueue_resize(struct virtqueue *vq, u32 num,
> +		     void (*recycle)(struct virtqueue *vq, void *buf))
> +{
> +	if (vq->ops->resize)
> +		return vq->ops->resize(vq, num, recycle);
> +
> +	return -ENOENT;
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_resize);
> +
> +bool virtqueue_is_broken(const struct virtqueue *vq)
> +{
> +	return vq->ops->is_broken(vq);
> +}
> +EXPORT_SYMBOL_GPL(virtqueue_is_broken);
> +
> +/*
> + * This should prevent the device from being used, allowing drivers to
> + * recover.  You may need to grab appropriate locks to flush.
> + */
> +void virtio_break_device(struct virtio_device *dev)
> +{
> +	struct virtqueue *vq;
> +
> +	spin_lock(&dev->vqs_list_lock);
> +	list_for_each_entry(vq, &dev->vqs, list) {
> +		vq->ops->__break(vq);
> +	}
> +	spin_unlock(&dev->vqs_list_lock);
> +}
> +EXPORT_SYMBOL_GPL(virtio_break_device);
> +
> +/*
> + * This should allow the device to be used by the driver. You may
> + * need to grab appropriate locks to flush the write to
> + * vq->broken. This should only be used in some specific case e.g
> + * (probing and restoring). This function should only be called by the
> + * core, not directly by the driver.
> + */
> +void __virtio_unbreak_device(struct virtio_device *dev)
> +{
> +	struct virtqueue *vq;
> +
> +	spin_lock(&dev->vqs_list_lock);
> +	list_for_each_entry(vq, &dev->vqs, list) {
> +		vq->ops->__unbreak(vq);
> +	}
> +	spin_unlock(&dev->vqs_list_lock);
> +}
> +EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
> +
>  static int virtio_init(void)
>  {
>  	if (bus_register(&virtio_bus) != 0)
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index c5310eaf8b46..7b86417255db 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -227,6 +227,8 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
>  static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
>  static void vring_free(struct virtqueue *_vq);
>  
> +static struct virtqueue_ops vring_ops;
> +
>  /*
>   * Helpers.
>   */
> @@ -2041,6 +2043,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
>  	vq->vq.name = name;
>  	vq->vq.index = index;
>  	vq->vq.reset = false;
> +	vq->vq.ops = &vring_ops;
>  	vq->we_own_ring = true;
>  	vq->notify = notify;
>  	vq->weak_barriers = weak_barriers;
> @@ -2114,17 +2117,17 @@ static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
>  
>  
>  /*
> - * Generic functions and exported symbols.
> + * Vring specific operation functions
>   */
>  
> -static inline int virtqueue_add(struct virtqueue *_vq,
> -				struct scatterlist *sgs[],
> -				unsigned int total_sg,
> -				unsigned int out_sgs,
> -				unsigned int in_sgs,
> -				void *data,
> -				void *ctx,
> -				gfp_t gfp)
> +static inline int vring_virtqueue_add_sgs(struct virtqueue *_vq,
> +					  struct scatterlist *sgs[],
> +					  unsigned int total_sg,
> +					  unsigned int out_sgs,
> +					  unsigned int in_sgs,
> +					  void *data,
> +					  void *ctx,
> +					  gfp_t gfp)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
> @@ -2135,110 +2138,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
>  }
>  
>  /**
> - * virtqueue_add_sgs - expose buffers to other end
> - * @_vq: the struct virtqueue we're talking about.
> - * @sgs: array of terminated scatterlists.
> - * @out_sgs: the number of scatterlists readable by other side
> - * @in_sgs: the number of scatterlists which are writable (after readable ones)
> - * @data: the token identifying the buffer.
> - * @gfp: how to do memory allocations (if necessary).
> - *
> - * Caller must ensure we don't call this with other virtqueue operations
> - * at the same time (except where noted).
> - *
> - * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> - */
> -int virtqueue_add_sgs(struct virtqueue *_vq,
> -		      struct scatterlist *sgs[],
> -		      unsigned int out_sgs,
> -		      unsigned int in_sgs,
> -		      void *data,
> -		      gfp_t gfp)
> -{
> -	unsigned int i, total_sg = 0;
> -
> -	/* Count them first. */
> -	for (i = 0; i < out_sgs + in_sgs; i++) {
> -		struct scatterlist *sg;
> -
> -		for (sg = sgs[i]; sg; sg = sg_next(sg))
> -			total_sg++;
> -	}
> -	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
> -			     data, NULL, gfp);
> -}
> -EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
> -
> -/**
> - * virtqueue_add_outbuf - expose output buffers to other end
> - * @vq: the struct virtqueue we're talking about.
> - * @sg: scatterlist (must be well-formed and terminated!)
> - * @num: the number of entries in @sg readable by other side
> - * @data: the token identifying the buffer.
> - * @gfp: how to do memory allocations (if necessary).
> - *
> - * Caller must ensure we don't call this with other virtqueue operations
> - * at the same time (except where noted).
> - *
> - * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> - */
> -int virtqueue_add_outbuf(struct virtqueue *vq,
> -			 struct scatterlist *sg, unsigned int num,
> -			 void *data,
> -			 gfp_t gfp)
> -{
> -	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
> -}
> -EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
> -
> -/**
> - * virtqueue_add_inbuf - expose input buffers to other end
> - * @vq: the struct virtqueue we're talking about.
> - * @sg: scatterlist (must be well-formed and terminated!)
> - * @num: the number of entries in @sg writable by other side
> - * @data: the token identifying the buffer.
> - * @gfp: how to do memory allocations (if necessary).
> - *
> - * Caller must ensure we don't call this with other virtqueue operations
> - * at the same time (except where noted).
> - *
> - * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> - */
> -int virtqueue_add_inbuf(struct virtqueue *vq,
> -			struct scatterlist *sg, unsigned int num,
> -			void *data,
> -			gfp_t gfp)
> -{
> -	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
> -}
> -EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
> -
> -/**
> - * virtqueue_add_inbuf_ctx - expose input buffers to other end
> - * @vq: the struct virtqueue we're talking about.
> - * @sg: scatterlist (must be well-formed and terminated!)
> - * @num: the number of entries in @sg writable by other side
> - * @data: the token identifying the buffer.
> - * @ctx: extra context for the token
> - * @gfp: how to do memory allocations (if necessary).
> - *
> - * Caller must ensure we don't call this with other virtqueue operations
> - * at the same time (except where noted).
> - *
> - * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> - */
> -int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
> -			struct scatterlist *sg, unsigned int num,
> -			void *data,
> -			void *ctx,
> -			gfp_t gfp)
> -{
> -	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
> -}
> -EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
> -
> -/**
> - * virtqueue_kick_prepare - first half of split virtqueue_kick call.
> + * vring_virtqueue_kick_prepare - first half of split virtqueue_kick call.
>   * @_vq: the struct virtqueue
>   *
>   * Instead of virtqueue_kick(), you can do:
> @@ -2248,24 +2148,23 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
>   * This is sometimes useful because the virtqueue_kick_prepare() needs
>   * to be serialized, but the actual virtqueue_notify() call does not.
>   */
> -bool virtqueue_kick_prepare(struct virtqueue *_vq)
> +static bool vring_virtqueue_kick_prepare(struct virtqueue *_vq)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
>  	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
>  				 virtqueue_kick_prepare_split(_vq);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
>  
>  /**
> - * virtqueue_notify - second half of split virtqueue_kick call.
> + * vring_virtqueue_notify - second half of split virtqueue_kick call.
>   * @_vq: the struct virtqueue
>   *
>   * This does not need to be serialized.
>   *
>   * Returns false if host notify failed or queue is broken, otherwise true.
>   */
> -bool virtqueue_notify(struct virtqueue *_vq)
> +static bool vring_virtqueue_notify(struct virtqueue *_vq)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
> @@ -2279,30 +2178,9 @@ bool virtqueue_notify(struct virtqueue *_vq)
>  	}
>  	return true;
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_notify);
>  
>  /**
> - * virtqueue_kick - update after add_buf
> - * @vq: the struct virtqueue
> - *
> - * After one or more virtqueue_add_* calls, invoke this to kick
> - * the other side.
> - *
> - * Caller must ensure we don't call this with other virtqueue
> - * operations at the same time (except where noted).
> - *
> - * Returns false if kick failed, otherwise true.
> - */
> -bool virtqueue_kick(struct virtqueue *vq)
> -{
> -	if (virtqueue_kick_prepare(vq))
> -		return virtqueue_notify(vq);
> -	return true;
> -}
> -EXPORT_SYMBOL_GPL(virtqueue_kick);
> -
> -/**
> - * virtqueue_get_buf_ctx - get the next used buffer
> + * vring_virtqueue_get_buf_ctx - get the next used buffer
>   * @_vq: the struct virtqueue we're talking about.
>   * @len: the length written into the buffer
>   * @ctx: extra context for the token
> @@ -2318,7 +2196,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
>   * Returns NULL if there are no used buffers, or the "data" token
>   * handed to virtqueue_add_*().
>   */
> -void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
> +static void *vring_virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
>  			    void **ctx)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
> @@ -2326,15 +2204,9 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
>  	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
>  				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
>  
> -void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
> -{
> -	return virtqueue_get_buf_ctx(_vq, len, NULL);
> -}
> -EXPORT_SYMBOL_GPL(virtqueue_get_buf);
>  /**
> - * virtqueue_disable_cb - disable callbacks
> + * vring_virtqueue_disable_cb - disable callbacks
>   * @_vq: the struct virtqueue we're talking about.
>   *
>   * Note that this is not necessarily synchronous, hence unreliable and only
> @@ -2342,7 +2214,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_buf);
>   *
>   * Unlike other operations, this need not be serialized.
>   */
> -void virtqueue_disable_cb(struct virtqueue *_vq)
> +static void vring_virtqueue_disable_cb(struct virtqueue *_vq)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
> @@ -2351,10 +2223,9 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
>  	else
>  		virtqueue_disable_cb_split(_vq);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
>  
>  /**
> - * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
> + * vring_virtqueue_enable_cb_prepare - restart callbacks after disable_cb
>   * @_vq: the struct virtqueue we're talking about.
>   *
>   * This re-enables callbacks; it returns current queue state
> @@ -2365,7 +2236,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
>   * Caller must ensure we don't call this with other virtqueue
>   * operations at the same time (except where noted).
>   */
> -unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
> +static unsigned int vring_virtqueue_enable_cb_prepare(struct virtqueue *_vq)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
> @@ -2375,10 +2246,9 @@ unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
>  	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
>  				 virtqueue_enable_cb_prepare_split(_vq);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
>  
>  /**
> - * virtqueue_poll - query pending used buffers
> + * vring_virtqueue_poll - query pending used buffers
>   * @_vq: the struct virtqueue we're talking about.
>   * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
>   *
> @@ -2386,7 +2256,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
>   *
>   * This does not need to be serialized.
>   */
> -bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
> +static bool vring_virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
> @@ -2397,29 +2267,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
>  	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
>  				 virtqueue_poll_split(_vq, last_used_idx);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_poll);
>  
>  /**
> - * virtqueue_enable_cb - restart callbacks after disable_cb.
> - * @_vq: the struct virtqueue we're talking about.
> - *
> - * This re-enables callbacks; it returns "false" if there are pending
> - * buffers in the queue, to detect a possible race between the driver
> - * checking for more work, and enabling callbacks.
> - *
> - * Caller must ensure we don't call this with other virtqueue
> - * operations at the same time (except where noted).
> - */
> -bool virtqueue_enable_cb(struct virtqueue *_vq)
> -{
> -	unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
> -
> -	return !virtqueue_poll(_vq, last_used_idx);
> -}
> -EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
> -
> -/**
> - * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
> + * vring_virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
>   * @_vq: the struct virtqueue we're talking about.
>   *
>   * This re-enables callbacks but hints to the other side to delay
> @@ -2431,7 +2281,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
>   * Caller must ensure we don't call this with other virtqueue
>   * operations at the same time (except where noted).
>   */
> -bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
> +static bool vring_virtqueue_enable_cb_delayed(struct virtqueue *_vq)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
> @@ -2441,24 +2291,22 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
>  	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
>  				 virtqueue_enable_cb_delayed_split(_vq);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
>  
>  /**
> - * virtqueue_detach_unused_buf - detach first unused buffer
> + * vring_virtqueue_detach_unused_buf - detach first unused buffer
>   * @_vq: the struct virtqueue we're talking about.
>   *
>   * Returns NULL or the "data" token handed to virtqueue_add_*().
>   * This is not valid on an active queue; it is useful for device
>   * shutdown or the reset queue.
>   */
> -void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
> +static void *vring_virtqueue_detach_unused_buf(struct virtqueue *_vq)
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  
>  	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
>  				 virtqueue_detach_unused_buf_split(_vq);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
>  
>  static inline bool more_used(const struct vring_virtqueue *vq)
>  {
> @@ -2531,6 +2379,7 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
>  	vq->vq.name = name;
>  	vq->vq.index = index;
>  	vq->vq.reset = false;
> +	vq->vq.ops = &vring_ops;
>  	vq->we_own_ring = false;
>  	vq->notify = notify;
>  	vq->weak_barriers = weak_barriers;
> @@ -2616,7 +2465,7 @@ struct virtqueue *vring_create_virtqueue_dma(
>  EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
>  
>  /**
> - * virtqueue_resize - resize the vring of vq
> + * vring_virtqueue_resize - resize the vring of vq
>   * @_vq: the struct virtqueue we're talking about.
>   * @num: new ring num
>   * @recycle: callback for recycle the useless buffer
> @@ -2639,8 +2488,8 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
>   * -EPERM: Operation not permitted
>   *
>   */
> -int virtqueue_resize(struct virtqueue *_vq, u32 num,
> -		     void (*recycle)(struct virtqueue *vq, void *buf))
> +static int vring_virtqueue_resize(struct virtqueue *_vq, u32 num,
> +				  void (*recycle)(struct virtqueue *vq, void *buf))
>  {
>  	struct vring_virtqueue *vq = to_vvq(_vq);
>  	struct virtio_device *vdev = vq->vq.vdev;
> @@ -2669,7 +2518,7 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
>  	if (err)
>  		return err;
>  
> -	while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
> +	while ((buf = vring_virtqueue_detach_unused_buf(_vq)) != NULL)
>  		recycle(_vq, buf);
>  
>  	if (vq->packed_ring)
> @@ -2682,7 +2531,6 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
>  
>  	return err;
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_resize);
>  
>  /* Only available for split ring */
>  struct virtqueue *vring_new_virtqueue(unsigned int index,
> @@ -2809,20 +2657,19 @@ void vring_transport_features(struct virtio_device *vdev)
>  EXPORT_SYMBOL_GPL(vring_transport_features);
>  
>  /**
> - * virtqueue_get_vring_size - return the size of the virtqueue's vring
> + * vring_virtqueue_get_vring_size - return the size of the virtqueue's vring
>   * @_vq: the struct virtqueue containing the vring of interest.
>   *
>   * Returns the size of the vring.  This is mainly used for boasting to
>   * userspace.  Unlike other operations, this need not be serialized.
>   */
> -unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
> +static unsigned int vring_virtqueue_get_vring_size(const struct virtqueue *_vq)
>  {
>  
>  	const struct vring_virtqueue *vq = to_vvq(_vq);
>  
>  	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
>  
>  /*
>   * This function should only be called by the core, not directly by the driver.
> @@ -2848,54 +2695,29 @@ void __virtqueue_unbreak(struct virtqueue *_vq)
>  }
>  EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
>  
> -bool virtqueue_is_broken(const struct virtqueue *_vq)
> +static bool vring_virtqueue_is_broken(const struct virtqueue *_vq)
>  {
>  	const struct vring_virtqueue *vq = to_vvq(_vq);
>  
>  	return READ_ONCE(vq->broken);
>  }
> -EXPORT_SYMBOL_GPL(virtqueue_is_broken);
> -
> -/*
> - * This should prevent the device from being used, allowing drivers to
> - * recover.  You may need to grab appropriate locks to flush.
> - */
> -void virtio_break_device(struct virtio_device *dev)
> -{
> -	struct virtqueue *_vq;
> -
> -	spin_lock(&dev->vqs_list_lock);
> -	list_for_each_entry(_vq, &dev->vqs, list) {
> -		struct vring_virtqueue *vq = to_vvq(_vq);
> -
> -		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
> -		WRITE_ONCE(vq->broken, true);
> -	}
> -	spin_unlock(&dev->vqs_list_lock);
> -}
> -EXPORT_SYMBOL_GPL(virtio_break_device);
> -
> -/*
> - * This should allow the device to be used by the driver. You may
> - * need to grab appropriate locks to flush the write to
> - * vq->broken. This should only be used in some specific case e.g
> - * (probing and restoring). This function should only be called by the
> - * core, not directly by the driver.
> - */
> -void __virtio_unbreak_device(struct virtio_device *dev)
> -{
> -	struct virtqueue *_vq;
> -
> -	spin_lock(&dev->vqs_list_lock);
> -	list_for_each_entry(_vq, &dev->vqs, list) {
> -		struct vring_virtqueue *vq = to_vvq(_vq);
>  
> -		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
> -		WRITE_ONCE(vq->broken, false);
> -	}
> -	spin_unlock(&dev->vqs_list_lock);
> -}
> -EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
> +static struct virtqueue_ops vring_ops = {
> +	.add_sgs = vring_virtqueue_add_sgs,
> +	.kick_prepare = vring_virtqueue_kick_prepare,
> +	.notify = vring_virtqueue_notify,
> +	.enable_cb_prepare = vring_virtqueue_enable_cb_prepare,
> +	.enable_cb_delayed = vring_virtqueue_enable_cb_delayed,
> +	.disable_cb = vring_virtqueue_disable_cb,
> +	.poll = vring_virtqueue_poll,
> +	.get_buf_ctx = vring_virtqueue_get_buf_ctx,
> +	.detach_unused_buf = vring_virtqueue_detach_unused_buf,
> +	.get_vring_size = vring_virtqueue_get_vring_size,
> +	.resize = vring_virtqueue_resize,
> +	.__break = __virtqueue_break,
> +	.__unbreak = __virtqueue_unbreak,
> +	.is_broken = vring_virtqueue_is_broken,
> +};
>  
>  dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
>  {
> diff --git a/include/linux/virtio.h b/include/linux/virtio.h
> index b93238db94e3..845858b8761e 100644
> --- a/include/linux/virtio.h
> +++ b/include/linux/virtio.h
> @@ -10,6 +10,34 @@
>  #include <linux/mod_devicetable.h>
>  #include <linux/gfp.h>
>  
> +struct virtqueue;
> +
> +/**
> + * struct virtqueue_ops - abstract operations for a virtqueue.
> + *
> + * Descriptions of each field see the comments in virtio.c
> + */
> +struct virtqueue_ops {
> +	int (*add_sgs)(struct virtqueue *vq, struct scatterlist *sgs[],
> +		       unsigned int total_sg,
> +		       unsigned int out_sgs, unsigned int in_sgs,
> +		       void *data, void *ctx, gfp_t gfp);
> +	bool (*kick_prepare)(struct virtqueue *vq);
> +	bool (*notify)(struct virtqueue *vq);
> +	unsigned int (*enable_cb_prepare)(struct virtqueue *vq);
> +	bool (*enable_cb_delayed)(struct virtqueue *vq);
> +	void (*disable_cb)(struct virtqueue *vq);
> +	bool (*poll)(struct virtqueue *vq, unsigned int idx);
> +	void *(*get_buf_ctx)(struct virtqueue *vq, unsigned int *len, void **ctx);
> +	void *(*detach_unused_buf)(struct virtqueue *vq);
> +	unsigned int (*get_vring_size)(const struct virtqueue *vq);
> +	int (*resize)(struct virtqueue *vq, u32 num,
> +		      void (*recycle)(struct virtqueue *vq, void *buf));
> +	void (*__break)(struct virtqueue *vq);
> +	void (*__unbreak)(struct virtqueue *vq);
> +	bool (*is_broken)(const struct virtqueue *vq);
> +};
> +
>  /**
>   * struct virtqueue - a queue to register buffers for sending or receiving.
>   * @list: the chain of virtqueues for this device
> @@ -36,6 +64,7 @@ struct virtqueue {
>  	unsigned int num_max;
>  	bool reset;
>  	void *priv;
> +	struct virtqueue_ops *ops;
>  };
>  
>  int virtqueue_add_outbuf(struct virtqueue *vq,
> -- 
> 2.20.1
  
zhenwei pi May 12, 2023, 11:09 a.m. UTC | #2
On 5/12/23 18:46, Michael S. Tsirkin wrote:
> On Fri, May 12, 2023 at 05:46:17PM +0800, zhenwei pi wrote:
>> There is already a virtqueue abstract structure in virtio subsystem
>> (see struct virtqueue in include/linux/virtio.h), however the vring
>> based virtqueue is used only in the past years, the virtqueue related
>> methods mix much with vring(just look like the codes, virtqueue_xxx
>> functions are implemented in virtio_ring.c).
>>
>> Abstract virtqueue related methods(see struct virtqueue_ops), and
>> separate virtqueue_xxx symbols from vring. This allows a non-vring
>> based transport in the future. With this change, the following symbols
>> are exported from virtio.ko instead of virtio_ring.ko:
>>    virtqueue_add_sgs
>>    virtqueue_add_outbuf
>>    virtqueue_add_inbuf
>>    virtqueue_add_inbuf_ctx
>>    virtqueue_kick_prepare
>>    virtqueue_notify
>>    virtqueue_kick
>>    virtqueue_enable_cb_prepare
>>    virtqueue_enable_cb
>>    virtqueue_enable_cb_delayed
>>    virtqueue_disable_cb
>>    virtqueue_poll
>>    virtqueue_get_buf_ctx
>>    virtqueue_get_buf
>>    virtqueue_detach_unused_buf
>>    virtqueue_get_vring_size
>>    virtqueue_resize
>>    virtqueue_is_broken
>>    virtio_break_device
>>    __virtio_unbreak_device
>>
>> Cc: Stefan Hajnoczi <stefanha@redhat.com>
>> Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
>> ---
>>   drivers/virtio/virtio.c      | 362 +++++++++++++++++++++++++++++++++++
>>   drivers/virtio/virtio_ring.c | 282 +++++----------------------
>>   include/linux/virtio.h       |  29 +++
>>   3 files changed, 443 insertions(+), 230 deletions(-)
>>
>> diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
>> index 3893dc29eb26..8d8715a10f26 100644
>> --- a/drivers/virtio/virtio.c
>> +++ b/drivers/virtio/virtio.c
>> @@ -553,6 +553,368 @@ int virtio_device_restore(struct virtio_device *dev)
>>   EXPORT_SYMBOL_GPL(virtio_device_restore);
>>   #endif
>>   
>> +/**
>> + * virtqueue_add_sgs - expose buffers to other end
>> + * @vq: the struct virtqueue we're talking about.
>> + * @sgs: array of terminated scatterlists.
>> + * @out_sgs: the number of scatterlists readable by other side
>> + * @in_sgs: the number of scatterlists which are writable (after readable ones)
>> + * @data: the token identifying the buffer.
>> + * @gfp: how to do memory allocations (if necessary).
>> + *
>> + * Caller must ensure we don't call this with other virtqueue operations
>> + * at the same time (except where noted).
>> + *
>> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
>> + */
>> +int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[],
>> +		      unsigned int out_sgs, unsigned int in_sgs,
>> +		      void *data, gfp_t gfp)
>> +{
>> +	unsigned int i, total_sg = 0;
>> +
>> +	/* Count them first. */
>> +	for (i = 0; i < out_sgs + in_sgs; i++) {
>> +		struct scatterlist *sg;
>> +
>> +		for (sg = sgs[i]; sg; sg = sg_next(sg))
>> +			total_sg++;
>> +	}
>> +	return vq->ops->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
>> +				data, NULL, gfp);
>> +}
>> +EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
> 
> 
> Hmm this kind of indirection on data path is going to be costly
> performance-wise especially when retpolines are in place.
> 
> Any data on this?
> 

Hi,

1, What about moving these functions into virtio.h and declare them as 
static inline?
2, what about moving method fields into struct virtqueue?

Then we have struct like:
struct virtqueue {
	struct list_head list;
	...
	void *priv;

	/* virtqueue specific operations */
         int (*add_sgs)(struct virtqueue *vq, struct scatterlist *sgs[],
                        unsigned int total_sg,
                        unsigned int out_sgs, unsigned int in_sgs,
                        void *data, void *ctx, gfp_t gfp);
	...
}

and functions like:
static inline int virtqueue_add_sgs(...)
{
         unsigned int i, total_sg = 0;

         /* Count them first. */
         for (i = 0; i < out_sgs + in_sgs; i++) {
                 struct scatterlist *sg;

                 for (sg = sgs[i]; sg; sg = sg_next(sg))
                         total_sg++;
         }
         return vq->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
                            data, NULL, gfp);
}

If [1] is acceptable, we can also reduce changes in patch 'tools/virtio: 
implement virtqueue in test'.
  
Michael S. Tsirkin May 12, 2023, 11:35 a.m. UTC | #3
On Fri, May 12, 2023 at 07:09:40PM +0800, zhenwei pi wrote:
> On 5/12/23 18:46, Michael S. Tsirkin wrote:
> > On Fri, May 12, 2023 at 05:46:17PM +0800, zhenwei pi wrote:
> > > There is already a virtqueue abstract structure in virtio subsystem
> > > (see struct virtqueue in include/linux/virtio.h), however the vring
> > > based virtqueue is used only in the past years, the virtqueue related
> > > methods mix much with vring(just look like the codes, virtqueue_xxx
> > > functions are implemented in virtio_ring.c).
> > > 
> > > Abstract virtqueue related methods(see struct virtqueue_ops), and
> > > separate virtqueue_xxx symbols from vring. This allows a non-vring
> > > based transport in the future. With this change, the following symbols
> > > are exported from virtio.ko instead of virtio_ring.ko:
> > >    virtqueue_add_sgs
> > >    virtqueue_add_outbuf
> > >    virtqueue_add_inbuf
> > >    virtqueue_add_inbuf_ctx
> > >    virtqueue_kick_prepare
> > >    virtqueue_notify
> > >    virtqueue_kick
> > >    virtqueue_enable_cb_prepare
> > >    virtqueue_enable_cb
> > >    virtqueue_enable_cb_delayed
> > >    virtqueue_disable_cb
> > >    virtqueue_poll
> > >    virtqueue_get_buf_ctx
> > >    virtqueue_get_buf
> > >    virtqueue_detach_unused_buf
> > >    virtqueue_get_vring_size
> > >    virtqueue_resize
> > >    virtqueue_is_broken
> > >    virtio_break_device
> > >    __virtio_unbreak_device
> > > 
> > > Cc: Stefan Hajnoczi <stefanha@redhat.com>
> > > Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
> > > ---
> > >   drivers/virtio/virtio.c      | 362 +++++++++++++++++++++++++++++++++++
> > >   drivers/virtio/virtio_ring.c | 282 +++++----------------------
> > >   include/linux/virtio.h       |  29 +++
> > >   3 files changed, 443 insertions(+), 230 deletions(-)
> > > 
> > > diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
> > > index 3893dc29eb26..8d8715a10f26 100644
> > > --- a/drivers/virtio/virtio.c
> > > +++ b/drivers/virtio/virtio.c
> > > @@ -553,6 +553,368 @@ int virtio_device_restore(struct virtio_device *dev)
> > >   EXPORT_SYMBOL_GPL(virtio_device_restore);
> > >   #endif
> > > +/**
> > > + * virtqueue_add_sgs - expose buffers to other end
> > > + * @vq: the struct virtqueue we're talking about.
> > > + * @sgs: array of terminated scatterlists.
> > > + * @out_sgs: the number of scatterlists readable by other side
> > > + * @in_sgs: the number of scatterlists which are writable (after readable ones)
> > > + * @data: the token identifying the buffer.
> > > + * @gfp: how to do memory allocations (if necessary).
> > > + *
> > > + * Caller must ensure we don't call this with other virtqueue operations
> > > + * at the same time (except where noted).
> > > + *
> > > + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
> > > + */
> > > +int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[],
> > > +		      unsigned int out_sgs, unsigned int in_sgs,
> > > +		      void *data, gfp_t gfp)
> > > +{
> > > +	unsigned int i, total_sg = 0;
> > > +
> > > +	/* Count them first. */
> > > +	for (i = 0; i < out_sgs + in_sgs; i++) {
> > > +		struct scatterlist *sg;
> > > +
> > > +		for (sg = sgs[i]; sg; sg = sg_next(sg))
> > > +			total_sg++;
> > > +	}
> > > +	return vq->ops->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
> > > +				data, NULL, gfp);
> > > +}
> > > +EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
> > 
> > 
> > Hmm this kind of indirection on data path is going to be costly
> > performance-wise especially when retpolines are in place.
> > 
> > Any data on this?
> > 
> 
> Hi,
> 
> 1, What about moving these functions into virtio.h and declare them as
> static inline?

This will do nothing to remove indirection.

> 2, what about moving method fields into struct virtqueue?

This gets rid of one level of indirection but the big problem
is indirect function call due to retpolines, this remains.


> Then we have struct like:
> struct virtqueue {
> 	struct list_head list;
> 	...
> 	void *priv;
> 
> 	/* virtqueue specific operations */
>         int (*add_sgs)(struct virtqueue *vq, struct scatterlist *sgs[],
>                        unsigned int total_sg,
>                        unsigned int out_sgs, unsigned int in_sgs,
>                        void *data, void *ctx, gfp_t gfp);
> 	...
> }
> 
> and functions like:
> static inline int virtqueue_add_sgs(...)
> {
>         unsigned int i, total_sg = 0;
> 
>         /* Count them first. */
>         for (i = 0; i < out_sgs + in_sgs; i++) {
>                 struct scatterlist *sg;
> 
>                 for (sg = sgs[i]; sg; sg = sg_next(sg))
>                         total_sg++;
>         }
>         return vq->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
>                            data, NULL, gfp);
> }

Maybe a flag in vq: 
	bool abstract; /* use ops to add/get bufs and kick */
and then
	if (unlikely(vq->abstract))
		 return vq->ops->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
				    	 data, NULL, gfp);

transport then just sets the ops if it wants abstract vqs,
and core then skips the vring.


> If [1] is acceptable, we can also reduce changes in patch 'tools/virtio:
> implement virtqueue in test'.

Yea that one shouldn't be there.

> -- 
> zhenwei pi
  
zhenwei pi May 12, 2023, 11:53 a.m. UTC | #4
On 5/12/23 19:35, Michael S. Tsirkin wrote:
> On Fri, May 12, 2023 at 07:09:40PM +0800, zhenwei pi wrote:
>> On 5/12/23 18:46, Michael S. Tsirkin wrote:
>>> On Fri, May 12, 2023 at 05:46:17PM +0800, zhenwei pi wrote:
>>>> There is already a virtqueue abstract structure in virtio subsystem
>>>> (see struct virtqueue in include/linux/virtio.h), however the vring
>>>> based virtqueue is used only in the past years, the virtqueue related
>>>> methods mix much with vring(just look like the codes, virtqueue_xxx
>>>> functions are implemented in virtio_ring.c).
>>>>
>>>> Abstract virtqueue related methods(see struct virtqueue_ops), and
>>>> separate virtqueue_xxx symbols from vring. This allows a non-vring
>>>> based transport in the future. With this change, the following symbols
>>>> are exported from virtio.ko instead of virtio_ring.ko:
>>>>     virtqueue_add_sgs
>>>>     virtqueue_add_outbuf
>>>>     virtqueue_add_inbuf
>>>>     virtqueue_add_inbuf_ctx
>>>>     virtqueue_kick_prepare
>>>>     virtqueue_notify
>>>>     virtqueue_kick
>>>>     virtqueue_enable_cb_prepare
>>>>     virtqueue_enable_cb
>>>>     virtqueue_enable_cb_delayed
>>>>     virtqueue_disable_cb
>>>>     virtqueue_poll
>>>>     virtqueue_get_buf_ctx
>>>>     virtqueue_get_buf
>>>>     virtqueue_detach_unused_buf
>>>>     virtqueue_get_vring_size
>>>>     virtqueue_resize
>>>>     virtqueue_is_broken
>>>>     virtio_break_device
>>>>     __virtio_unbreak_device
>>>>
>>>> Cc: Stefan Hajnoczi <stefanha@redhat.com>
>>>> Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
>>>> ---
>>>>    drivers/virtio/virtio.c      | 362 +++++++++++++++++++++++++++++++++++
>>>>    drivers/virtio/virtio_ring.c | 282 +++++----------------------
>>>>    include/linux/virtio.h       |  29 +++
>>>>    3 files changed, 443 insertions(+), 230 deletions(-)
>>>>
>>>> diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
>>>> index 3893dc29eb26..8d8715a10f26 100644
>>>> --- a/drivers/virtio/virtio.c
>>>> +++ b/drivers/virtio/virtio.c
>>>> @@ -553,6 +553,368 @@ int virtio_device_restore(struct virtio_device *dev)
>>>>    EXPORT_SYMBOL_GPL(virtio_device_restore);
>>>>    #endif
>>>> +/**
>>>> + * virtqueue_add_sgs - expose buffers to other end
>>>> + * @vq: the struct virtqueue we're talking about.
>>>> + * @sgs: array of terminated scatterlists.
>>>> + * @out_sgs: the number of scatterlists readable by other side
>>>> + * @in_sgs: the number of scatterlists which are writable (after readable ones)
>>>> + * @data: the token identifying the buffer.
>>>> + * @gfp: how to do memory allocations (if necessary).
>>>> + *
>>>> + * Caller must ensure we don't call this with other virtqueue operations
>>>> + * at the same time (except where noted).
>>>> + *
>>>> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
>>>> + */
>>>> +int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[],
>>>> +		      unsigned int out_sgs, unsigned int in_sgs,
>>>> +		      void *data, gfp_t gfp)
>>>> +{
>>>> +	unsigned int i, total_sg = 0;
>>>> +
>>>> +	/* Count them first. */
>>>> +	for (i = 0; i < out_sgs + in_sgs; i++) {
>>>> +		struct scatterlist *sg;
>>>> +
>>>> +		for (sg = sgs[i]; sg; sg = sg_next(sg))
>>>> +			total_sg++;
>>>> +	}
>>>> +	return vq->ops->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
>>>> +				data, NULL, gfp);
>>>> +}
>>>> +EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
>>>
>>>
>>> Hmm this kind of indirection on data path is going to be costly
>>> performance-wise especially when retpolines are in place.
>>>
>>> Any data on this?
>>>
>>
>> Hi,
>>
>> 1, What about moving these functions into virtio.h and declare them as
>> static inline?
> 
> This will do nothing to remove indirection.
> 
>> 2, what about moving method fields into struct virtqueue?
> 
> This gets rid of one level of indirection but the big problem
> is indirect function call due to retpolines, this remains.
> 
> 
>> Then we have struct like:
>> struct virtqueue {
>> 	struct list_head list;
>> 	...
>> 	void *priv;
>>
>> 	/* virtqueue specific operations */
>>          int (*add_sgs)(struct virtqueue *vq, struct scatterlist *sgs[],
>>                         unsigned int total_sg,
>>                         unsigned int out_sgs, unsigned int in_sgs,
>>                         void *data, void *ctx, gfp_t gfp);
>> 	...
>> }
>>
>> and functions like:
>> static inline int virtqueue_add_sgs(...)
>> {
>>          unsigned int i, total_sg = 0;
>>
>>          /* Count them first. */
>>          for (i = 0; i < out_sgs + in_sgs; i++) {
>>                  struct scatterlist *sg;
>>
>>                  for (sg = sgs[i]; sg; sg = sg_next(sg))
>>                          total_sg++;
>>          }
>>          return vq->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
>>                             data, NULL, gfp);
>> }
> 
> Maybe a flag in vq:
> 	bool abstract; /* use ops to add/get bufs and kick */
> and then
> 	if (unlikely(vq->abstract))
> 		 return vq->ops->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
> 				    	 data, NULL, gfp);
> 
> transport then just sets the ops if it wants abstract vqs,
> and core then skips the vring.
> 
> 
>> If [1] is acceptable, we can also reduce changes in patch 'tools/virtio:
>> implement virtqueue in test'.
> 
> Yea that one shouldn't be there.
> 
>> -- 
>> zhenwei pi
> 

OK, I'll try and send a next version a few days later. Thanks!
  
kernel test robot May 12, 2023, 4:40 p.m. UTC | #5
Hi zhenwei,

kernel test robot noticed the following build errors:

[auto build test ERROR on mst-vhost/linux-next]
[also build test ERROR on linus/master v6.4-rc1 next-20230512]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230512-174928
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link:    https://lore.kernel.org/r/20230512094618.433707-2-pizhenwei%40bytedance.com
patch subject: [PATCH 1/2] virtio: abstract virtqueue related methods
config: loongarch-allyesconfig (https://download.01.org/0day-ci/archive/20230513/202305130012.LQ2KTO5C-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/372bc1a0371968752fe0f5ec6e81edee6f9c44dd
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230512-174928
        git checkout 372bc1a0371968752fe0f5ec6e81edee6f9c44dd
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch SHELL=/bin/bash drivers/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Link: https://lore.kernel.org/oe-kbuild-all/202305130012.LQ2KTO5C-lkp@intel.com/

All errors (new ones prefixed by >>):

   drivers/virtio/virtio.c: In function 'virtio_break_device':
>> drivers/virtio/virtio.c:893:24: error: 'struct virtqueue_ops' has no member named '__builtin_loongarch_break'
     893 |                 vq->ops->__break(vq);
         |                        ^~


vim +893 drivers/virtio/virtio.c

   882	
   883	/*
   884	 * This should prevent the device from being used, allowing drivers to
   885	 * recover.  You may need to grab appropriate locks to flush.
   886	 */
   887	void virtio_break_device(struct virtio_device *dev)
   888	{
   889		struct virtqueue *vq;
   890	
   891		spin_lock(&dev->vqs_list_lock);
   892		list_for_each_entry(vq, &dev->vqs, list) {
 > 893			vq->ops->__break(vq);
   894		}
   895		spin_unlock(&dev->vqs_list_lock);
   896	}
   897	EXPORT_SYMBOL_GPL(virtio_break_device);
   898
  
kernel test robot May 13, 2023, 5:22 p.m. UTC | #6
Hi zhenwei,

kernel test robot noticed the following build warnings:

[auto build test WARNING on mst-vhost/linux-next]
[also build test WARNING on linus/master v6.4-rc1 next-20230512]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230512-174928
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link:    https://lore.kernel.org/r/20230512094618.433707-2-pizhenwei%40bytedance.com
patch subject: [PATCH 1/2] virtio: abstract virtqueue related methods
reproduce:
        # https://github.com/intel-lab-lkp/linux/commit/372bc1a0371968752fe0f5ec6e81edee6f9c44dd
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230512-174928
        git checkout 372bc1a0371968752fe0f5ec6e81edee6f9c44dd
        make menuconfig
        # enable CONFIG_COMPILE_TEST, CONFIG_WARN_MISSING_DOCUMENTS, CONFIG_WARN_ABI_ERRORS
        make htmldocs

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Link: https://lore.kernel.org/oe-kbuild-all/202305140142.c0QQq9wZ-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> ./drivers/virtio/virtio_ring.c:1: warning: 'virtqueue_add_inbuf' not found
>> ./drivers/virtio/virtio_ring.c:1: warning: 'virtqueue_add_outbuf' not found
>> ./drivers/virtio/virtio_ring.c:1: warning: 'virtqueue_add_sgs' not found
>> ./drivers/virtio/virtio_ring.c:1: warning: 'virtqueue_get_buf_ctx' not found
>> ./drivers/virtio/virtio_ring.c:1: warning: 'virtqueue_disable_cb' not found
>> ./drivers/virtio/virtio_ring.c:1: warning: 'virtqueue_enable_cb' not found

vim +/virtqueue_add_inbuf +1 ./drivers/virtio/virtio_ring.c

fd534e9b5fdcf9 Thomas Gleixner     2019-05-23  @1  // SPDX-License-Identifier: GPL-2.0-or-later
0a8a69dd77ddbd Rusty Russell       2007-10-22   2  /* Virtio ring implementation.
0a8a69dd77ddbd Rusty Russell       2007-10-22   3   *
0a8a69dd77ddbd Rusty Russell       2007-10-22   4   *  Copyright 2007 Rusty Russell IBM Corporation
0a8a69dd77ddbd Rusty Russell       2007-10-22   5   */
0a8a69dd77ddbd Rusty Russell       2007-10-22   6  #include <linux/virtio.h>
0a8a69dd77ddbd Rusty Russell       2007-10-22   7  #include <linux/virtio_ring.h>
e34f87256794b8 Rusty Russell       2008-07-25   8  #include <linux/virtio_config.h>
0a8a69dd77ddbd Rusty Russell       2007-10-22   9  #include <linux/device.h>
5a0e3ad6af8660 Tejun Heo           2010-03-24  10  #include <linux/slab.h>
b5a2c4f1996d1d Paul Gortmaker      2011-07-03  11  #include <linux/module.h>
e93300b1afc7cd Rusty Russell       2012-01-12  12  #include <linux/hrtimer.h>
780bc7903a32ed Andy Lutomirski     2016-02-02  13  #include <linux/dma-mapping.h>
88938359e2dfe1 Alexander Potapenko 2022-09-15  14  #include <linux/kmsan.h>
f8ce72632fa7ed Michael S. Tsirkin  2021-08-10  15  #include <linux/spinlock.h>
78fe39872378b0 Andy Lutomirski     2016-02-02  16  #include <xen/xen.h>
0a8a69dd77ddbd Rusty Russell       2007-10-22  17
  

Patch

diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 3893dc29eb26..8d8715a10f26 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -553,6 +553,368 @@  int virtio_device_restore(struct virtio_device *dev)
 EXPORT_SYMBOL_GPL(virtio_device_restore);
 #endif
 
+/**
+ * virtqueue_add_sgs - expose buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of terminated scatterlists.
+ * @out_sgs: the number of scatterlists readable by other side
+ * @in_sgs: the number of scatterlists which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[],
+		      unsigned int out_sgs, unsigned int in_sgs,
+		      void *data, gfp_t gfp)
+{
+	unsigned int i, total_sg = 0;
+
+	/* Count them first. */
+	for (i = 0; i < out_sgs + in_sgs; i++) {
+		struct scatterlist *sg;
+
+		for (sg = sgs[i]; sg; sg = sg_next(sg))
+			total_sg++;
+	}
+	return vq->ops->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs,
+				data, NULL, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
+
+/**
+ * virtqueue_add_outbuf - expose output buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg readable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg,
+			 unsigned int num, void *data, gfp_t gfp)
+{
+	return vq->ops->add_sgs(vq, &sg, num, 1, 0, data, NULL, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
+
+/**
+ * virtqueue_add_inbuf - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist *sg,
+			unsigned int num, void *data, gfp_t gfp)
+{
+	return vq->ops->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
+
+/**
+ * virtqueue_add_inbuf_ctx - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @ctx: extra context for the token
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg,
+			    unsigned int num, void *data, void *ctx, gfp_t gfp)
+{
+	return vq->ops->add_sgs(vq, &sg, num, 0, 1, data, ctx, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
+
+/**
+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * Instead of virtqueue_kick(), you can do:
+ *	if (virtqueue_kick_prepare(vq))
+ *		virtqueue_notify(vq);
+ *
+ * This is sometimes useful because the virtqueue_kick_prepare() needs
+ * to be serialized, but the actual virtqueue_notify() call does not.
+ */
+bool virtqueue_kick_prepare(struct virtqueue *vq)
+{
+	return vq->ops->kick_prepare(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
+
+/**
+ * virtqueue_notify - second half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
+ */
+bool virtqueue_notify(struct virtqueue *vq)
+{
+	return vq->ops->notify(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_notify);
+
+/**
+ * virtqueue_kick - update after add_buf
+ * @vq: the struct virtqueue
+ *
+ * After one or more virtqueue_add_* calls, invoke this to kick
+ * the other side.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns false if kick failed, otherwise true.
+ */
+bool virtqueue_kick(struct virtqueue *vq)
+{
+	if (virtqueue_kick_prepare(vq))
+		return virtqueue_notify(vq);
+	return true;
+}
+EXPORT_SYMBOL_GPL(virtqueue_kick);
+
+/**
+ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns current queue state
+ * in an opaque unsigned value. This value should be later tested by
+ * virtqueue_poll, to detect a possible race between the driver checking for
+ * more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+unsigned int virtqueue_enable_cb_prepare(struct virtqueue *vq)
+{
+	return vq->ops->enable_cb_prepare(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
+
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+bool virtqueue_enable_cb(struct virtqueue *vq)
+{
+	unsigned int val = vq->ops->enable_cb_prepare(vq);
+
+	return !vq->ops->poll(vq, val);
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
+{
+	return vq->ops->enable_cb_delayed(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
+
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
+void virtqueue_disable_cb(struct virtqueue *vq)
+{
+	vq->ops->disable_cb(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
+
+/**
+ * virtqueue_poll - query pending used buffers
+ * @vq: the struct virtqueue we're talking about.
+ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
+ *
+ * Returns "true" if there are pending used buffers in the queue.
+ *
+ * This does not need to be serialized.
+ */
+bool virtqueue_poll(struct virtqueue *vq, unsigned int idx)
+{
+	return vq->ops->poll(vq, idx);
+}
+EXPORT_SYMBOL_GPL(virtqueue_poll);
+
+/**
+ * virtqueue_get_buf_ctx - get the next used buffer
+ * @vq: the struct virtqueue we're talking about.
+ * @len: the length written into the buffer
+ * @ctx: extra context for the token
+ *
+ * If the device wrote data into the buffer, @len will be set to the
+ * amount written.  This means you don't need to clear the buffer
+ * beforehand to ensure there's no data leakage in the case of short
+ * writes.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns NULL if there are no used buffers, or the "data" token
+ * handed to virtqueue_add_*().
+ */
+void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
+			    void **ctx)
+{
+	return vq->ops->get_buf_ctx(vq, len, ctx);
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
+
+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
+{
+	return vq->ops->get_buf_ctx(vq, len, NULL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_buf);
+
+/**
+ * virtqueue_detach_unused_buf - detach first unused buffer
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Returns NULL or the "data" token handed to virtqueue_add_*().
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
+ */
+void *virtqueue_detach_unused_buf(struct virtqueue *vq)
+{
+	return vq->ops->detach_unused_buf(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
+
+/**
+ * virtqueue_get_vring_size - return the size of the virtqueue's vring
+ * @vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring.  This is mainly used for boasting to
+ * userspace.  Unlike other operations, this need not be serialized.
+ */
+unsigned int virtqueue_get_vring_size(const struct virtqueue *vq)
+{
+	return vq->ops->get_vring_size(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ *  vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+int virtqueue_resize(struct virtqueue *vq, u32 num,
+		     void (*recycle)(struct virtqueue *vq, void *buf))
+{
+	if (vq->ops->resize)
+		return vq->ops->resize(vq, num, recycle);
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(virtqueue_resize);
+
+bool virtqueue_is_broken(const struct virtqueue *vq)
+{
+	return vq->ops->is_broken(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+
+/*
+ * This should prevent the device from being used, allowing drivers to
+ * recover.  You may need to grab appropriate locks to flush.
+ */
+void virtio_break_device(struct virtio_device *dev)
+{
+	struct virtqueue *vq;
+
+	spin_lock(&dev->vqs_list_lock);
+	list_for_each_entry(vq, &dev->vqs, list) {
+		vq->ops->__break(vq);
+	}
+	spin_unlock(&dev->vqs_list_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_break_device);
+
+/*
+ * This should allow the device to be used by the driver. You may
+ * need to grab appropriate locks to flush the write to
+ * vq->broken. This should only be used in some specific case e.g
+ * (probing and restoring). This function should only be called by the
+ * core, not directly by the driver.
+ */
+void __virtio_unbreak_device(struct virtio_device *dev)
+{
+	struct virtqueue *vq;
+
+	spin_lock(&dev->vqs_list_lock);
+	list_for_each_entry(vq, &dev->vqs, list) {
+		vq->ops->__unbreak(vq);
+	}
+	spin_unlock(&dev->vqs_list_lock);
+}
+EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
+
 static int virtio_init(void)
 {
 	if (bus_register(&virtio_bus) != 0)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c5310eaf8b46..7b86417255db 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -227,6 +227,8 @@  static struct virtqueue *__vring_new_virtqueue(unsigned int index,
 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
 static void vring_free(struct virtqueue *_vq);
 
+static struct virtqueue_ops vring_ops;
+
 /*
  * Helpers.
  */
@@ -2041,6 +2043,7 @@  static struct virtqueue *vring_create_virtqueue_packed(
 	vq->vq.name = name;
 	vq->vq.index = index;
 	vq->vq.reset = false;
+	vq->vq.ops = &vring_ops;
 	vq->we_own_ring = true;
 	vq->notify = notify;
 	vq->weak_barriers = weak_barriers;
@@ -2114,17 +2117,17 @@  static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
 
 
 /*
- * Generic functions and exported symbols.
+ * Vring specific operation functions
  */
 
-static inline int virtqueue_add(struct virtqueue *_vq,
-				struct scatterlist *sgs[],
-				unsigned int total_sg,
-				unsigned int out_sgs,
-				unsigned int in_sgs,
-				void *data,
-				void *ctx,
-				gfp_t gfp)
+static inline int vring_virtqueue_add_sgs(struct virtqueue *_vq,
+					  struct scatterlist *sgs[],
+					  unsigned int total_sg,
+					  unsigned int out_sgs,
+					  unsigned int in_sgs,
+					  void *data,
+					  void *ctx,
+					  gfp_t gfp)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2135,110 +2138,7 @@  static inline int virtqueue_add(struct virtqueue *_vq,
 }
 
 /**
- * virtqueue_add_sgs - expose buffers to other end
- * @_vq: the struct virtqueue we're talking about.
- * @sgs: array of terminated scatterlists.
- * @out_sgs: the number of scatterlists readable by other side
- * @in_sgs: the number of scatterlists which are writable (after readable ones)
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_sgs(struct virtqueue *_vq,
-		      struct scatterlist *sgs[],
-		      unsigned int out_sgs,
-		      unsigned int in_sgs,
-		      void *data,
-		      gfp_t gfp)
-{
-	unsigned int i, total_sg = 0;
-
-	/* Count them first. */
-	for (i = 0; i < out_sgs + in_sgs; i++) {
-		struct scatterlist *sg;
-
-		for (sg = sgs[i]; sg; sg = sg_next(sg))
-			total_sg++;
-	}
-	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
-			     data, NULL, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
-
-/**
- * virtqueue_add_outbuf - expose output buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg readable by other side
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_outbuf(struct virtqueue *vq,
-			 struct scatterlist *sg, unsigned int num,
-			 void *data,
-			 gfp_t gfp)
-{
-	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
-
-/**
- * virtqueue_add_inbuf - expose input buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg writable by other side
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_inbuf(struct virtqueue *vq,
-			struct scatterlist *sg, unsigned int num,
-			void *data,
-			gfp_t gfp)
-{
-	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
-
-/**
- * virtqueue_add_inbuf_ctx - expose input buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg writable by other side
- * @data: the token identifying the buffer.
- * @ctx: extra context for the token
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
-			struct scatterlist *sg, unsigned int num,
-			void *data,
-			void *ctx,
-			gfp_t gfp)
-{
-	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
-
-/**
- * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * vring_virtqueue_kick_prepare - first half of split virtqueue_kick call.
  * @_vq: the struct virtqueue
  *
  * Instead of virtqueue_kick(), you can do:
@@ -2248,24 +2148,23 @@  EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
  * This is sometimes useful because the virtqueue_kick_prepare() needs
  * to be serialized, but the actual virtqueue_notify() call does not.
  */
-bool virtqueue_kick_prepare(struct virtqueue *_vq)
+static bool vring_virtqueue_kick_prepare(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
 				 virtqueue_kick_prepare_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
 
 /**
- * virtqueue_notify - second half of split virtqueue_kick call.
+ * vring_virtqueue_notify - second half of split virtqueue_kick call.
  * @_vq: the struct virtqueue
  *
  * This does not need to be serialized.
  *
  * Returns false if host notify failed or queue is broken, otherwise true.
  */
-bool virtqueue_notify(struct virtqueue *_vq)
+static bool vring_virtqueue_notify(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2279,30 +2178,9 @@  bool virtqueue_notify(struct virtqueue *_vq)
 	}
 	return true;
 }
-EXPORT_SYMBOL_GPL(virtqueue_notify);
 
 /**
- * virtqueue_kick - update after add_buf
- * @vq: the struct virtqueue
- *
- * After one or more virtqueue_add_* calls, invoke this to kick
- * the other side.
- *
- * Caller must ensure we don't call this with other virtqueue
- * operations at the same time (except where noted).
- *
- * Returns false if kick failed, otherwise true.
- */
-bool virtqueue_kick(struct virtqueue *vq)
-{
-	if (virtqueue_kick_prepare(vq))
-		return virtqueue_notify(vq);
-	return true;
-}
-EXPORT_SYMBOL_GPL(virtqueue_kick);
-
-/**
- * virtqueue_get_buf_ctx - get the next used buffer
+ * vring_virtqueue_get_buf_ctx - get the next used buffer
  * @_vq: the struct virtqueue we're talking about.
  * @len: the length written into the buffer
  * @ctx: extra context for the token
@@ -2318,7 +2196,7 @@  EXPORT_SYMBOL_GPL(virtqueue_kick);
  * Returns NULL if there are no used buffers, or the "data" token
  * handed to virtqueue_add_*().
  */
-void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
+static void *vring_virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
 			    void **ctx)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2326,15 +2204,9 @@  void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
 }
-EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
 
-void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
-{
-	return virtqueue_get_buf_ctx(_vq, len, NULL);
-}
-EXPORT_SYMBOL_GPL(virtqueue_get_buf);
 /**
- * virtqueue_disable_cb - disable callbacks
+ * vring_virtqueue_disable_cb - disable callbacks
  * @_vq: the struct virtqueue we're talking about.
  *
  * Note that this is not necessarily synchronous, hence unreliable and only
@@ -2342,7 +2214,7 @@  EXPORT_SYMBOL_GPL(virtqueue_get_buf);
  *
  * Unlike other operations, this need not be serialized.
  */
-void virtqueue_disable_cb(struct virtqueue *_vq)
+static void vring_virtqueue_disable_cb(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2351,10 +2223,9 @@  void virtqueue_disable_cb(struct virtqueue *_vq)
 	else
 		virtqueue_disable_cb_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
 
 /**
- * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
+ * vring_virtqueue_enable_cb_prepare - restart callbacks after disable_cb
  * @_vq: the struct virtqueue we're talking about.
  *
  * This re-enables callbacks; it returns current queue state
@@ -2365,7 +2236,7 @@  EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+static unsigned int vring_virtqueue_enable_cb_prepare(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2375,10 +2246,9 @@  unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
 				 virtqueue_enable_cb_prepare_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
 
 /**
- * virtqueue_poll - query pending used buffers
+ * vring_virtqueue_poll - query pending used buffers
  * @_vq: the struct virtqueue we're talking about.
  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
  *
@@ -2386,7 +2256,7 @@  EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
  *
  * This does not need to be serialized.
  */
-bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
+static bool vring_virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2397,29 +2267,9 @@  bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
 				 virtqueue_poll_split(_vq, last_used_idx);
 }
-EXPORT_SYMBOL_GPL(virtqueue_poll);
 
 /**
- * virtqueue_enable_cb - restart callbacks after disable_cb.
- * @_vq: the struct virtqueue we're talking about.
- *
- * This re-enables callbacks; it returns "false" if there are pending
- * buffers in the queue, to detect a possible race between the driver
- * checking for more work, and enabling callbacks.
- *
- * Caller must ensure we don't call this with other virtqueue
- * operations at the same time (except where noted).
- */
-bool virtqueue_enable_cb(struct virtqueue *_vq)
-{
-	unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
-
-	return !virtqueue_poll(_vq, last_used_idx);
-}
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
-
-/**
- * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * vring_virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
  * @_vq: the struct virtqueue we're talking about.
  *
  * This re-enables callbacks but hints to the other side to delay
@@ -2431,7 +2281,7 @@  EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+static bool vring_virtqueue_enable_cb_delayed(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2441,24 +2291,22 @@  bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
 				 virtqueue_enable_cb_delayed_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
 
 /**
- * virtqueue_detach_unused_buf - detach first unused buffer
+ * vring_virtqueue_detach_unused_buf - detach first unused buffer
  * @_vq: the struct virtqueue we're talking about.
  *
  * Returns NULL or the "data" token handed to virtqueue_add_*().
  * This is not valid on an active queue; it is useful for device
  * shutdown or the reset queue.
  */
-void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
+static void *vring_virtqueue_detach_unused_buf(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
 				 virtqueue_detach_unused_buf_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
 
 static inline bool more_used(const struct vring_virtqueue *vq)
 {
@@ -2531,6 +2379,7 @@  static struct virtqueue *__vring_new_virtqueue(unsigned int index,
 	vq->vq.name = name;
 	vq->vq.index = index;
 	vq->vq.reset = false;
+	vq->vq.ops = &vring_ops;
 	vq->we_own_ring = false;
 	vq->notify = notify;
 	vq->weak_barriers = weak_barriers;
@@ -2616,7 +2465,7 @@  struct virtqueue *vring_create_virtqueue_dma(
 EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
 
 /**
- * virtqueue_resize - resize the vring of vq
+ * vring_virtqueue_resize - resize the vring of vq
  * @_vq: the struct virtqueue we're talking about.
  * @num: new ring num
  * @recycle: callback for recycle the useless buffer
@@ -2639,8 +2488,8 @@  EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
  * -EPERM: Operation not permitted
  *
  */
-int virtqueue_resize(struct virtqueue *_vq, u32 num,
-		     void (*recycle)(struct virtqueue *vq, void *buf))
+static int vring_virtqueue_resize(struct virtqueue *_vq, u32 num,
+				  void (*recycle)(struct virtqueue *vq, void *buf))
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	struct virtio_device *vdev = vq->vq.vdev;
@@ -2669,7 +2518,7 @@  int virtqueue_resize(struct virtqueue *_vq, u32 num,
 	if (err)
 		return err;
 
-	while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+	while ((buf = vring_virtqueue_detach_unused_buf(_vq)) != NULL)
 		recycle(_vq, buf);
 
 	if (vq->packed_ring)
@@ -2682,7 +2531,6 @@  int virtqueue_resize(struct virtqueue *_vq, u32 num,
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(virtqueue_resize);
 
 /* Only available for split ring */
 struct virtqueue *vring_new_virtqueue(unsigned int index,
@@ -2809,20 +2657,19 @@  void vring_transport_features(struct virtio_device *vdev)
 EXPORT_SYMBOL_GPL(vring_transport_features);
 
 /**
- * virtqueue_get_vring_size - return the size of the virtqueue's vring
+ * vring_virtqueue_get_vring_size - return the size of the virtqueue's vring
  * @_vq: the struct virtqueue containing the vring of interest.
  *
  * Returns the size of the vring.  This is mainly used for boasting to
  * userspace.  Unlike other operations, this need not be serialized.
  */
-unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
+static unsigned int vring_virtqueue_get_vring_size(const struct virtqueue *_vq)
 {
 
 	const struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
 }
-EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
 
 /*
  * This function should only be called by the core, not directly by the driver.
@@ -2848,54 +2695,29 @@  void __virtqueue_unbreak(struct virtqueue *_vq)
 }
 EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
 
-bool virtqueue_is_broken(const struct virtqueue *_vq)
+static bool vring_virtqueue_is_broken(const struct virtqueue *_vq)
 {
 	const struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return READ_ONCE(vq->broken);
 }
-EXPORT_SYMBOL_GPL(virtqueue_is_broken);
-
-/*
- * This should prevent the device from being used, allowing drivers to
- * recover.  You may need to grab appropriate locks to flush.
- */
-void virtio_break_device(struct virtio_device *dev)
-{
-	struct virtqueue *_vq;
-
-	spin_lock(&dev->vqs_list_lock);
-	list_for_each_entry(_vq, &dev->vqs, list) {
-		struct vring_virtqueue *vq = to_vvq(_vq);
-
-		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
-		WRITE_ONCE(vq->broken, true);
-	}
-	spin_unlock(&dev->vqs_list_lock);
-}
-EXPORT_SYMBOL_GPL(virtio_break_device);
-
-/*
- * This should allow the device to be used by the driver. You may
- * need to grab appropriate locks to flush the write to
- * vq->broken. This should only be used in some specific case e.g
- * (probing and restoring). This function should only be called by the
- * core, not directly by the driver.
- */
-void __virtio_unbreak_device(struct virtio_device *dev)
-{
-	struct virtqueue *_vq;
-
-	spin_lock(&dev->vqs_list_lock);
-	list_for_each_entry(_vq, &dev->vqs, list) {
-		struct vring_virtqueue *vq = to_vvq(_vq);
 
-		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
-		WRITE_ONCE(vq->broken, false);
-	}
-	spin_unlock(&dev->vqs_list_lock);
-}
-EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
+static struct virtqueue_ops vring_ops = {
+	.add_sgs = vring_virtqueue_add_sgs,
+	.kick_prepare = vring_virtqueue_kick_prepare,
+	.notify = vring_virtqueue_notify,
+	.enable_cb_prepare = vring_virtqueue_enable_cb_prepare,
+	.enable_cb_delayed = vring_virtqueue_enable_cb_delayed,
+	.disable_cb = vring_virtqueue_disable_cb,
+	.poll = vring_virtqueue_poll,
+	.get_buf_ctx = vring_virtqueue_get_buf_ctx,
+	.detach_unused_buf = vring_virtqueue_detach_unused_buf,
+	.get_vring_size = vring_virtqueue_get_vring_size,
+	.resize = vring_virtqueue_resize,
+	.__break = __virtqueue_break,
+	.__unbreak = __virtqueue_unbreak,
+	.is_broken = vring_virtqueue_is_broken,
+};
 
 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
 {
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b93238db94e3..845858b8761e 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -10,6 +10,34 @@ 
 #include <linux/mod_devicetable.h>
 #include <linux/gfp.h>
 
+struct virtqueue;
+
+/**
+ * struct virtqueue_ops - abstract operations for a virtqueue.
+ *
+ * Descriptions of each field see the comments in virtio.c
+ */
+struct virtqueue_ops {
+	int (*add_sgs)(struct virtqueue *vq, struct scatterlist *sgs[],
+		       unsigned int total_sg,
+		       unsigned int out_sgs, unsigned int in_sgs,
+		       void *data, void *ctx, gfp_t gfp);
+	bool (*kick_prepare)(struct virtqueue *vq);
+	bool (*notify)(struct virtqueue *vq);
+	unsigned int (*enable_cb_prepare)(struct virtqueue *vq);
+	bool (*enable_cb_delayed)(struct virtqueue *vq);
+	void (*disable_cb)(struct virtqueue *vq);
+	bool (*poll)(struct virtqueue *vq, unsigned int idx);
+	void *(*get_buf_ctx)(struct virtqueue *vq, unsigned int *len, void **ctx);
+	void *(*detach_unused_buf)(struct virtqueue *vq);
+	unsigned int (*get_vring_size)(const struct virtqueue *vq);
+	int (*resize)(struct virtqueue *vq, u32 num,
+		      void (*recycle)(struct virtqueue *vq, void *buf));
+	void (*__break)(struct virtqueue *vq);
+	void (*__unbreak)(struct virtqueue *vq);
+	bool (*is_broken)(const struct virtqueue *vq);
+};
+
 /**
  * struct virtqueue - a queue to register buffers for sending or receiving.
  * @list: the chain of virtqueues for this device
@@ -36,6 +64,7 @@  struct virtqueue {
 	unsigned int num_max;
 	bool reset;
 	void *priv;
+	struct virtqueue_ops *ops;
 };
 
 int virtqueue_add_outbuf(struct virtqueue *vq,