[v2,1/2] virtio: abstract virtqueue related methods

Message ID 20230517025424.601141-2-pizhenwei@bytedance.com
State New
Headers
Series virtio: abstract virtqueue related methods |

Commit Message

zhenwei pi May 17, 2023, 2:54 a.m. UTC
  There is already a virtqueue abstract structure in virtio subsystem
(see struct virtqueue in include/linux/virtio.h), however the vring
based virtqueue is used only in the past years, the virtqueue related
methods mix much with vring(just look like the codes, virtqueue_xxx
functions are implemented in virtio_ring.c).

Abstract virtqueue related methods(see struct virtqueue_ops), and
separate virtqueue_xxx symbols from vring. This allows a non-vring
based transport in the future. See changes in virtio.h.

All the vring based virtqueue methods could be abstratct in theory,
MST suggested that add/get bufs and kick functions are quite perfmance
sensitive, so export these functions from virtio_ring.ko, drivers
still call them in a fast path.

Cc: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
---
 drivers/virtio/virtio_ring.c | 285 +++++-----------------
 include/linux/virtio.h       | 441 +++++++++++++++++++++++++++++++----
 include/linux/virtio_ring.h  |  26 +++
 3 files changed, 483 insertions(+), 269 deletions(-)
  

Comments

kernel test robot May 17, 2023, 6:57 a.m. UTC | #1
Hi zhenwei,

kernel test robot noticed the following build warnings:

[auto build test WARNING on mst-vhost/linux-next]
[also build test WARNING on linus/master v6.4-rc2 next-20230517]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230517-110311
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link:    https://lore.kernel.org/r/20230517025424.601141-2-pizhenwei%40bytedance.com
patch subject: [PATCH v2 1/2] virtio: abstract virtqueue related methods
config: alpha-randconfig-r003-20230517
compiler: alpha-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/146086b281eebe5c5368c387f96a0395c6252d41
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230517-110311
        git checkout 146086b281eebe5c5368c387f96a0395c6252d41
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=alpha olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=alpha SHELL=/bin/bash drivers/virtio/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202305171458.bzUep6rL-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/virtio/virtio_ring.c:2310: warning: expecting prototype for virtqueue_detach_unused_buf(). Prototype was for vring_virtqueue_detach_unused_buf() instead
>> drivers/virtio/virtio_ring.c:2499: warning: expecting prototype for virtqueue_resize(). Prototype was for vring_virtqueue_resize() instead
>> drivers/virtio/virtio_ring.c:2673: warning: expecting prototype for virtqueue_get_vring_size(). Prototype was for vring_virtqueue_get_vring_size() instead


vim +2310 drivers/virtio/virtio_ring.c

e6f633e5beab65 Tiwei Bie          2018-11-21  2300  
138fd25148638a Tiwei Bie          2018-11-21  2301  /**
138fd25148638a Tiwei Bie          2018-11-21  2302   * virtqueue_detach_unused_buf - detach first unused buffer
a5581206c565a7 Jiang Biao         2019-04-23  2303   * @_vq: the struct virtqueue we're talking about.
138fd25148638a Tiwei Bie          2018-11-21  2304   *
138fd25148638a Tiwei Bie          2018-11-21  2305   * Returns NULL or the "data" token handed to virtqueue_add_*().
a62eecb3a9c086 Xuan Zhuo          2022-08-01  2306   * This is not valid on an active queue; it is useful for device
a62eecb3a9c086 Xuan Zhuo          2022-08-01  2307   * shutdown or the reset queue.
138fd25148638a Tiwei Bie          2018-11-21  2308   */
146086b281eebe zhenwei pi         2023-05-17  2309  static void *vring_virtqueue_detach_unused_buf(struct virtqueue *_vq)
138fd25148638a Tiwei Bie          2018-11-21 @2310  {
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2311  	struct vring_virtqueue *vq = to_vvq(_vq);
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2312  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2313  	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2314  				 virtqueue_detach_unused_buf_split(_vq);
138fd25148638a Tiwei Bie          2018-11-21  2315  }
c021eac4148c16 Shirley Ma         2010-01-18  2316  
138fd25148638a Tiwei Bie          2018-11-21  2317  static inline bool more_used(const struct vring_virtqueue *vq)
138fd25148638a Tiwei Bie          2018-11-21  2318  {
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2319  	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
138fd25148638a Tiwei Bie          2018-11-21  2320  }
138fd25148638a Tiwei Bie          2018-11-21  2321  
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2322  /**
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2323   * vring_interrupt - notify a virtqueue on an interrupt
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2324   * @irq: the IRQ number (ignored)
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2325   * @_vq: the struct virtqueue to notify
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2326   *
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2327   * Calls the callback function of @_vq to process the virtqueue
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2328   * notification.
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2329   */
0a8a69dd77ddbd Rusty Russell      2007-10-22  2330  irqreturn_t vring_interrupt(int irq, void *_vq)
0a8a69dd77ddbd Rusty Russell      2007-10-22  2331  {
0a8a69dd77ddbd Rusty Russell      2007-10-22  2332  	struct vring_virtqueue *vq = to_vvq(_vq);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2333  
0a8a69dd77ddbd Rusty Russell      2007-10-22  2334  	if (!more_used(vq)) {
0a8a69dd77ddbd Rusty Russell      2007-10-22  2335  		pr_debug("virtqueue interrupt with no work for %p\n", vq);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2336  		return IRQ_NONE;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2337  	}
0a8a69dd77ddbd Rusty Russell      2007-10-22  2338  
8b4ec69d7e098a Jason Wang         2022-05-27  2339  	if (unlikely(vq->broken)) {
c346dae4f3fbce Jason Wang         2022-06-22  2340  #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
8b4ec69d7e098a Jason Wang         2022-05-27  2341  		dev_warn_once(&vq->vq.vdev->dev,
8b4ec69d7e098a Jason Wang         2022-05-27  2342  			      "virtio vring IRQ raised before DRIVER_OK");
8b4ec69d7e098a Jason Wang         2022-05-27  2343  		return IRQ_NONE;
c346dae4f3fbce Jason Wang         2022-06-22  2344  #else
c346dae4f3fbce Jason Wang         2022-06-22  2345  		return IRQ_HANDLED;
c346dae4f3fbce Jason Wang         2022-06-22  2346  #endif
8b4ec69d7e098a Jason Wang         2022-05-27  2347  	}
0a8a69dd77ddbd Rusty Russell      2007-10-22  2348  
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2349  	/* Just a hint for performance: so it's ok that this can be racy! */
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2350  	if (vq->event)
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2351  		vq->event_triggered = true;
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2352  
0a8a69dd77ddbd Rusty Russell      2007-10-22  2353  	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d501b9a Rusty Russell      2008-02-04  2354  	if (vq->vq.callback)
18445c4d501b9a Rusty Russell      2008-02-04  2355  		vq->vq.callback(&vq->vq);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2356  
0a8a69dd77ddbd Rusty Russell      2007-10-22  2357  	return IRQ_HANDLED;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2358  }
c6fd47011b4bde Rusty Russell      2008-02-04  2359  EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2360  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2361  /* Only available for split ring */
07d9629d49584b Xuan Zhuo          2022-08-01  2362  static struct virtqueue *__vring_new_virtqueue(unsigned int index,
cd4c812acb8390 Xuan Zhuo          2022-08-01  2363  					       struct vring_virtqueue_split *vring_split,
0a8a69dd77ddbd Rusty Russell      2007-10-22  2364  					       struct virtio_device *vdev,
7b21e34fd1c272 Rusty Russell      2012-01-12  2365  					       bool weak_barriers,
f94682dde5ed23 Michael S. Tsirkin 2017-03-06  2366  					       bool context,
46f9c2b925ac12 Heinz Graalfs      2013-10-29  2367  					       bool (*notify)(struct virtqueue *),
9499f5e7ed5224 Rusty Russell      2009-06-12  2368  					       void (*callback)(struct virtqueue *),
2713ea3c7d930a Jason Wang         2023-01-19  2369  					       const char *name,
2713ea3c7d930a Jason Wang         2023-01-19  2370  					       struct device *dma_dev)
0a8a69dd77ddbd Rusty Russell      2007-10-22  2371  {
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2372  	struct vring_virtqueue *vq;
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2373  	int err;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2374  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2375  	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2376  		return NULL;
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2377  
cbeedb72b97ad8 Tiwei Bie          2018-11-21  2378  	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2379  	if (!vq)
0a8a69dd77ddbd Rusty Russell      2007-10-22  2380  		return NULL;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2381  
146086b281eebe zhenwei pi         2023-05-17  2382  	vring_virtqueue_set_ops(&vq->vq);
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2383  	vq->packed_ring = false;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2384  	vq->vq.callback = callback;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2385  	vq->vq.vdev = vdev;
9499f5e7ed5224 Rusty Russell      2009-06-12  2386  	vq->vq.name = name;
06ca287dbac9cc Rusty Russell      2012-10-16  2387  	vq->vq.index = index;
4913e85441b403 Xuan Zhuo          2022-08-01  2388  	vq->vq.reset = false;
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2389  	vq->we_own_ring = false;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2390  	vq->notify = notify;
7b21e34fd1c272 Rusty Russell      2012-01-12  2391  	vq->weak_barriers = weak_barriers;
c346dae4f3fbce Jason Wang         2022-06-22  2392  #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
8b4ec69d7e098a Jason Wang         2022-05-27  2393  	vq->broken = true;
c346dae4f3fbce Jason Wang         2022-06-22  2394  #else
c346dae4f3fbce Jason Wang         2022-06-22  2395  	vq->broken = false;
c346dae4f3fbce Jason Wang         2022-06-22  2396  #endif
2713ea3c7d930a Jason Wang         2023-01-19  2397  	vq->dma_dev = dma_dev;
fb3fba6b162aaa Tiwei Bie          2018-11-21  2398  	vq->use_dma_api = vring_use_dma_api(vdev);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2399  
5a08b04f637921 Michael S. Tsirkin 2017-02-07  2400  	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
5a08b04f637921 Michael S. Tsirkin 2017-02-07  2401  		!context;
a5c262c5fd83ec Michael S. Tsirkin 2011-05-20  2402  	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9df32ba4 Mark McLoughlin    2009-05-11  2403  
45383fb0f42db3 Tiwei Bie          2019-01-23  2404  	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
45383fb0f42db3 Tiwei Bie          2019-01-23  2405  		vq->weak_barriers = false;
45383fb0f42db3 Tiwei Bie          2019-01-23  2406  
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2407  	err = vring_alloc_state_extra_split(vring_split);
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2408  	if (err) {
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2409  		kfree(vq);
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2410  		return NULL;
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2411  	}
72b5e8958738aa Jason Wang         2021-06-04  2412  
198fa7be96e52b Xuan Zhuo          2022-08-01  2413  	virtqueue_vring_init_split(vring_split, vq);
198fa7be96e52b Xuan Zhuo          2022-08-01  2414  
cd4c812acb8390 Xuan Zhuo          2022-08-01  2415  	virtqueue_init(vq, vring_split->vring.num);
e1d6a423ea1867 Xuan Zhuo          2022-08-01  2416  	virtqueue_vring_attach_split(vq, vring_split);
3a897128d31934 Xuan Zhuo          2022-08-01  2417  
0e566c8f0f2e83 Parav Pandit       2021-07-21  2418  	spin_lock(&vdev->vqs_list_lock);
e152d8af4220a0 Dan Carpenter      2020-12-04  2419  	list_add_tail(&vq->vq.list, &vdev->vqs);
0e566c8f0f2e83 Parav Pandit       2021-07-21  2420  	spin_unlock(&vdev->vqs_list_lock);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2421  	return &vq->vq;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2422  }
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2423  
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2424  struct virtqueue *vring_create_virtqueue(
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2425  	unsigned int index,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2426  	unsigned int num,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2427  	unsigned int vring_align,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2428  	struct virtio_device *vdev,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2429  	bool weak_barriers,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2430  	bool may_reduce_num,
f94682dde5ed23 Michael S. Tsirkin 2017-03-06  2431  	bool context,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2432  	bool (*notify)(struct virtqueue *),
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2433  	void (*callback)(struct virtqueue *),
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2434  	const char *name)
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2435  {
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2436  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2437  	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2438  		return vring_create_virtqueue_packed(index, num, vring_align,
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2439  				vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2440  				context, notify, callback, name, vdev->dev.parent);
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2441  
d79dca75c79680 Tiwei Bie          2018-11-21  2442  	return vring_create_virtqueue_split(index, num, vring_align,
d79dca75c79680 Tiwei Bie          2018-11-21  2443  			vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2444  			context, notify, callback, name, vdev->dev.parent);
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2445  }
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2446  EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2447  
2713ea3c7d930a Jason Wang         2023-01-19  2448  struct virtqueue *vring_create_virtqueue_dma(
2713ea3c7d930a Jason Wang         2023-01-19  2449  	unsigned int index,
2713ea3c7d930a Jason Wang         2023-01-19  2450  	unsigned int num,
2713ea3c7d930a Jason Wang         2023-01-19  2451  	unsigned int vring_align,
2713ea3c7d930a Jason Wang         2023-01-19  2452  	struct virtio_device *vdev,
2713ea3c7d930a Jason Wang         2023-01-19  2453  	bool weak_barriers,
2713ea3c7d930a Jason Wang         2023-01-19  2454  	bool may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2455  	bool context,
2713ea3c7d930a Jason Wang         2023-01-19  2456  	bool (*notify)(struct virtqueue *),
2713ea3c7d930a Jason Wang         2023-01-19  2457  	void (*callback)(struct virtqueue *),
2713ea3c7d930a Jason Wang         2023-01-19  2458  	const char *name,
2713ea3c7d930a Jason Wang         2023-01-19  2459  	struct device *dma_dev)
2713ea3c7d930a Jason Wang         2023-01-19  2460  {
2713ea3c7d930a Jason Wang         2023-01-19  2461  
2713ea3c7d930a Jason Wang         2023-01-19  2462  	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2713ea3c7d930a Jason Wang         2023-01-19  2463  		return vring_create_virtqueue_packed(index, num, vring_align,
2713ea3c7d930a Jason Wang         2023-01-19  2464  				vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2465  				context, notify, callback, name, dma_dev);
2713ea3c7d930a Jason Wang         2023-01-19  2466  
2713ea3c7d930a Jason Wang         2023-01-19  2467  	return vring_create_virtqueue_split(index, num, vring_align,
2713ea3c7d930a Jason Wang         2023-01-19  2468  			vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2469  			context, notify, callback, name, dma_dev);
2713ea3c7d930a Jason Wang         2023-01-19  2470  }
2713ea3c7d930a Jason Wang         2023-01-19  2471  EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
2713ea3c7d930a Jason Wang         2023-01-19  2472  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2473  /**
c790e8e1817f1a Xuan Zhuo          2022-08-01  2474   * virtqueue_resize - resize the vring of vq
c790e8e1817f1a Xuan Zhuo          2022-08-01  2475   * @_vq: the struct virtqueue we're talking about.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2476   * @num: new ring num
c790e8e1817f1a Xuan Zhuo          2022-08-01  2477   * @recycle: callback for recycle the useless buffer
c790e8e1817f1a Xuan Zhuo          2022-08-01  2478   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2479   * When it is really necessary to create a new vring, it will set the current vq
c790e8e1817f1a Xuan Zhuo          2022-08-01  2480   * into the reset state. Then call the passed callback to recycle the buffer
c790e8e1817f1a Xuan Zhuo          2022-08-01  2481   * that is no longer used. Only after the new vring is successfully created, the
c790e8e1817f1a Xuan Zhuo          2022-08-01  2482   * old vring will be released.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2483   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2484   * Caller must ensure we don't call this with other virtqueue operations
c790e8e1817f1a Xuan Zhuo          2022-08-01  2485   * at the same time (except where noted).
c790e8e1817f1a Xuan Zhuo          2022-08-01  2486   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2487   * Returns zero or a negative error.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2488   * 0: success.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2489   * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2490   *  vq can still work normally
c790e8e1817f1a Xuan Zhuo          2022-08-01  2491   * -EBUSY: Failed to sync with device, vq may not work properly
c790e8e1817f1a Xuan Zhuo          2022-08-01  2492   * -ENOENT: Transport or device not supported
c790e8e1817f1a Xuan Zhuo          2022-08-01  2493   * -E2BIG/-EINVAL: num error
c790e8e1817f1a Xuan Zhuo          2022-08-01  2494   * -EPERM: Operation not permitted
c790e8e1817f1a Xuan Zhuo          2022-08-01  2495   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2496   */
146086b281eebe zhenwei pi         2023-05-17  2497  static int vring_virtqueue_resize(struct virtqueue *_vq, u32 num,
c790e8e1817f1a Xuan Zhuo          2022-08-01  2498  				  void (*recycle)(struct virtqueue *vq, void *buf))
c790e8e1817f1a Xuan Zhuo          2022-08-01 @2499  {
c790e8e1817f1a Xuan Zhuo          2022-08-01  2500  	struct vring_virtqueue *vq = to_vvq(_vq);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2501  	struct virtio_device *vdev = vq->vq.vdev;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2502  	void *buf;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2503  	int err;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2504  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2505  	if (!vq->we_own_ring)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2506  		return -EPERM;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2507  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2508  	if (num > vq->vq.num_max)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2509  		return -E2BIG;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2510  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2511  	if (!num)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2512  		return -EINVAL;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2513  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2514  	if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2515  		return 0;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2516  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2517  	if (!vdev->config->disable_vq_and_reset)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2518  		return -ENOENT;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2519  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2520  	if (!vdev->config->enable_vq_after_reset)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2521  		return -ENOENT;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2522  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2523  	err = vdev->config->disable_vq_and_reset(_vq);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2524  	if (err)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2525  		return err;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2526  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2527  	while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2528  		recycle(_vq, buf);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2529  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2530  	if (vq->packed_ring)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2531  		err = virtqueue_resize_packed(_vq, num);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2532  	else
c790e8e1817f1a Xuan Zhuo          2022-08-01  2533  		err = virtqueue_resize_split(_vq, num);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2534  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2535  	if (vdev->config->enable_vq_after_reset(_vq))
c790e8e1817f1a Xuan Zhuo          2022-08-01  2536  		return -EBUSY;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2537  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2538  	return err;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2539  }
c790e8e1817f1a Xuan Zhuo          2022-08-01  2540
  
kernel test robot May 17, 2023, 7:10 a.m. UTC | #2
Hi zhenwei,

kernel test robot noticed the following build warnings:

[auto build test WARNING on mst-vhost/linux-next]
[also build test WARNING on linus/master v6.4-rc2 next-20230517]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230517-110311
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link:    https://lore.kernel.org/r/20230517025424.601141-2-pizhenwei%40bytedance.com
patch subject: [PATCH v2 1/2] virtio: abstract virtqueue related methods
config: x86_64-defconfig
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/146086b281eebe5c5368c387f96a0395c6252d41
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230517-110311
        git checkout 146086b281eebe5c5368c387f96a0395c6252d41
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=x86_64 olddefconfig
        make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash drivers/virtio/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202305171421.VtD4kp78-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/virtio/virtio_ring.c:2310: warning: expecting prototype for virtqueue_detach_unused_buf(). Prototype was for vring_virtqueue_detach_unused_buf() instead
>> drivers/virtio/virtio_ring.c:2499: warning: expecting prototype for virtqueue_resize(). Prototype was for vring_virtqueue_resize() instead
>> drivers/virtio/virtio_ring.c:2673: warning: expecting prototype for virtqueue_get_vring_size(). Prototype was for vring_virtqueue_get_vring_size() instead


vim +2310 drivers/virtio/virtio_ring.c

e6f633e5beab65 Tiwei Bie          2018-11-21  2300  
138fd25148638a Tiwei Bie          2018-11-21  2301  /**
138fd25148638a Tiwei Bie          2018-11-21  2302   * virtqueue_detach_unused_buf - detach first unused buffer
a5581206c565a7 Jiang Biao         2019-04-23  2303   * @_vq: the struct virtqueue we're talking about.
138fd25148638a Tiwei Bie          2018-11-21  2304   *
138fd25148638a Tiwei Bie          2018-11-21  2305   * Returns NULL or the "data" token handed to virtqueue_add_*().
a62eecb3a9c086 Xuan Zhuo          2022-08-01  2306   * This is not valid on an active queue; it is useful for device
a62eecb3a9c086 Xuan Zhuo          2022-08-01  2307   * shutdown or the reset queue.
138fd25148638a Tiwei Bie          2018-11-21  2308   */
146086b281eebe zhenwei pi         2023-05-17  2309  static void *vring_virtqueue_detach_unused_buf(struct virtqueue *_vq)
138fd25148638a Tiwei Bie          2018-11-21 @2310  {
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2311  	struct vring_virtqueue *vq = to_vvq(_vq);
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2312  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2313  	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2314  				 virtqueue_detach_unused_buf_split(_vq);
138fd25148638a Tiwei Bie          2018-11-21  2315  }
c021eac4148c16 Shirley Ma         2010-01-18  2316  
138fd25148638a Tiwei Bie          2018-11-21  2317  static inline bool more_used(const struct vring_virtqueue *vq)
138fd25148638a Tiwei Bie          2018-11-21  2318  {
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2319  	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
138fd25148638a Tiwei Bie          2018-11-21  2320  }
138fd25148638a Tiwei Bie          2018-11-21  2321  
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2322  /**
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2323   * vring_interrupt - notify a virtqueue on an interrupt
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2324   * @irq: the IRQ number (ignored)
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2325   * @_vq: the struct virtqueue to notify
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2326   *
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2327   * Calls the callback function of @_vq to process the virtqueue
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2328   * notification.
5c669c4a4c6aa0 Ricardo Cañuelo    2022-08-10  2329   */
0a8a69dd77ddbd Rusty Russell      2007-10-22  2330  irqreturn_t vring_interrupt(int irq, void *_vq)
0a8a69dd77ddbd Rusty Russell      2007-10-22  2331  {
0a8a69dd77ddbd Rusty Russell      2007-10-22  2332  	struct vring_virtqueue *vq = to_vvq(_vq);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2333  
0a8a69dd77ddbd Rusty Russell      2007-10-22  2334  	if (!more_used(vq)) {
0a8a69dd77ddbd Rusty Russell      2007-10-22  2335  		pr_debug("virtqueue interrupt with no work for %p\n", vq);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2336  		return IRQ_NONE;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2337  	}
0a8a69dd77ddbd Rusty Russell      2007-10-22  2338  
8b4ec69d7e098a Jason Wang         2022-05-27  2339  	if (unlikely(vq->broken)) {
c346dae4f3fbce Jason Wang         2022-06-22  2340  #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
8b4ec69d7e098a Jason Wang         2022-05-27  2341  		dev_warn_once(&vq->vq.vdev->dev,
8b4ec69d7e098a Jason Wang         2022-05-27  2342  			      "virtio vring IRQ raised before DRIVER_OK");
8b4ec69d7e098a Jason Wang         2022-05-27  2343  		return IRQ_NONE;
c346dae4f3fbce Jason Wang         2022-06-22  2344  #else
c346dae4f3fbce Jason Wang         2022-06-22  2345  		return IRQ_HANDLED;
c346dae4f3fbce Jason Wang         2022-06-22  2346  #endif
8b4ec69d7e098a Jason Wang         2022-05-27  2347  	}
0a8a69dd77ddbd Rusty Russell      2007-10-22  2348  
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2349  	/* Just a hint for performance: so it's ok that this can be racy! */
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2350  	if (vq->event)
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2351  		vq->event_triggered = true;
8d622d21d24803 Michael S. Tsirkin 2021-04-13  2352  
0a8a69dd77ddbd Rusty Russell      2007-10-22  2353  	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d501b9a Rusty Russell      2008-02-04  2354  	if (vq->vq.callback)
18445c4d501b9a Rusty Russell      2008-02-04  2355  		vq->vq.callback(&vq->vq);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2356  
0a8a69dd77ddbd Rusty Russell      2007-10-22  2357  	return IRQ_HANDLED;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2358  }
c6fd47011b4bde Rusty Russell      2008-02-04  2359  EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2360  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2361  /* Only available for split ring */
07d9629d49584b Xuan Zhuo          2022-08-01  2362  static struct virtqueue *__vring_new_virtqueue(unsigned int index,
cd4c812acb8390 Xuan Zhuo          2022-08-01  2363  					       struct vring_virtqueue_split *vring_split,
0a8a69dd77ddbd Rusty Russell      2007-10-22  2364  					       struct virtio_device *vdev,
7b21e34fd1c272 Rusty Russell      2012-01-12  2365  					       bool weak_barriers,
f94682dde5ed23 Michael S. Tsirkin 2017-03-06  2366  					       bool context,
46f9c2b925ac12 Heinz Graalfs      2013-10-29  2367  					       bool (*notify)(struct virtqueue *),
9499f5e7ed5224 Rusty Russell      2009-06-12  2368  					       void (*callback)(struct virtqueue *),
2713ea3c7d930a Jason Wang         2023-01-19  2369  					       const char *name,
2713ea3c7d930a Jason Wang         2023-01-19  2370  					       struct device *dma_dev)
0a8a69dd77ddbd Rusty Russell      2007-10-22  2371  {
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2372  	struct vring_virtqueue *vq;
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2373  	int err;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2374  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2375  	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2376  		return NULL;
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2377  
cbeedb72b97ad8 Tiwei Bie          2018-11-21  2378  	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2379  	if (!vq)
0a8a69dd77ddbd Rusty Russell      2007-10-22  2380  		return NULL;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2381  
146086b281eebe zhenwei pi         2023-05-17  2382  	vring_virtqueue_set_ops(&vq->vq);
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2383  	vq->packed_ring = false;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2384  	vq->vq.callback = callback;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2385  	vq->vq.vdev = vdev;
9499f5e7ed5224 Rusty Russell      2009-06-12  2386  	vq->vq.name = name;
06ca287dbac9cc Rusty Russell      2012-10-16  2387  	vq->vq.index = index;
4913e85441b403 Xuan Zhuo          2022-08-01  2388  	vq->vq.reset = false;
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2389  	vq->we_own_ring = false;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2390  	vq->notify = notify;
7b21e34fd1c272 Rusty Russell      2012-01-12  2391  	vq->weak_barriers = weak_barriers;
c346dae4f3fbce Jason Wang         2022-06-22  2392  #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
8b4ec69d7e098a Jason Wang         2022-05-27  2393  	vq->broken = true;
c346dae4f3fbce Jason Wang         2022-06-22  2394  #else
c346dae4f3fbce Jason Wang         2022-06-22  2395  	vq->broken = false;
c346dae4f3fbce Jason Wang         2022-06-22  2396  #endif
2713ea3c7d930a Jason Wang         2023-01-19  2397  	vq->dma_dev = dma_dev;
fb3fba6b162aaa Tiwei Bie          2018-11-21  2398  	vq->use_dma_api = vring_use_dma_api(vdev);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2399  
5a08b04f637921 Michael S. Tsirkin 2017-02-07  2400  	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
5a08b04f637921 Michael S. Tsirkin 2017-02-07  2401  		!context;
a5c262c5fd83ec Michael S. Tsirkin 2011-05-20  2402  	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9df32ba4 Mark McLoughlin    2009-05-11  2403  
45383fb0f42db3 Tiwei Bie          2019-01-23  2404  	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
45383fb0f42db3 Tiwei Bie          2019-01-23  2405  		vq->weak_barriers = false;
45383fb0f42db3 Tiwei Bie          2019-01-23  2406  
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2407  	err = vring_alloc_state_extra_split(vring_split);
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2408  	if (err) {
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2409  		kfree(vq);
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2410  		return NULL;
a2b36c8d7ddbaf Xuan Zhuo          2022-08-01  2411  	}
72b5e8958738aa Jason Wang         2021-06-04  2412  
198fa7be96e52b Xuan Zhuo          2022-08-01  2413  	virtqueue_vring_init_split(vring_split, vq);
198fa7be96e52b Xuan Zhuo          2022-08-01  2414  
cd4c812acb8390 Xuan Zhuo          2022-08-01  2415  	virtqueue_init(vq, vring_split->vring.num);
e1d6a423ea1867 Xuan Zhuo          2022-08-01  2416  	virtqueue_vring_attach_split(vq, vring_split);
3a897128d31934 Xuan Zhuo          2022-08-01  2417  
0e566c8f0f2e83 Parav Pandit       2021-07-21  2418  	spin_lock(&vdev->vqs_list_lock);
e152d8af4220a0 Dan Carpenter      2020-12-04  2419  	list_add_tail(&vq->vq.list, &vdev->vqs);
0e566c8f0f2e83 Parav Pandit       2021-07-21  2420  	spin_unlock(&vdev->vqs_list_lock);
0a8a69dd77ddbd Rusty Russell      2007-10-22  2421  	return &vq->vq;
0a8a69dd77ddbd Rusty Russell      2007-10-22  2422  }
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2423  
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2424  struct virtqueue *vring_create_virtqueue(
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2425  	unsigned int index,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2426  	unsigned int num,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2427  	unsigned int vring_align,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2428  	struct virtio_device *vdev,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2429  	bool weak_barriers,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2430  	bool may_reduce_num,
f94682dde5ed23 Michael S. Tsirkin 2017-03-06  2431  	bool context,
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2432  	bool (*notify)(struct virtqueue *),
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2433  	void (*callback)(struct virtqueue *),
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2434  	const char *name)
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2435  {
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2436  
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2437  	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2438  		return vring_create_virtqueue_packed(index, num, vring_align,
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2439  				vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2440  				context, notify, callback, name, vdev->dev.parent);
1ce9e6055fa0a9 Tiwei Bie          2018-11-21  2441  
d79dca75c79680 Tiwei Bie          2018-11-21  2442  	return vring_create_virtqueue_split(index, num, vring_align,
d79dca75c79680 Tiwei Bie          2018-11-21  2443  			vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2444  			context, notify, callback, name, vdev->dev.parent);
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2445  }
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2446  EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2a2d1382fe9dcc Andy Lutomirski    2016-02-02  2447  
2713ea3c7d930a Jason Wang         2023-01-19  2448  struct virtqueue *vring_create_virtqueue_dma(
2713ea3c7d930a Jason Wang         2023-01-19  2449  	unsigned int index,
2713ea3c7d930a Jason Wang         2023-01-19  2450  	unsigned int num,
2713ea3c7d930a Jason Wang         2023-01-19  2451  	unsigned int vring_align,
2713ea3c7d930a Jason Wang         2023-01-19  2452  	struct virtio_device *vdev,
2713ea3c7d930a Jason Wang         2023-01-19  2453  	bool weak_barriers,
2713ea3c7d930a Jason Wang         2023-01-19  2454  	bool may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2455  	bool context,
2713ea3c7d930a Jason Wang         2023-01-19  2456  	bool (*notify)(struct virtqueue *),
2713ea3c7d930a Jason Wang         2023-01-19  2457  	void (*callback)(struct virtqueue *),
2713ea3c7d930a Jason Wang         2023-01-19  2458  	const char *name,
2713ea3c7d930a Jason Wang         2023-01-19  2459  	struct device *dma_dev)
2713ea3c7d930a Jason Wang         2023-01-19  2460  {
2713ea3c7d930a Jason Wang         2023-01-19  2461  
2713ea3c7d930a Jason Wang         2023-01-19  2462  	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2713ea3c7d930a Jason Wang         2023-01-19  2463  		return vring_create_virtqueue_packed(index, num, vring_align,
2713ea3c7d930a Jason Wang         2023-01-19  2464  				vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2465  				context, notify, callback, name, dma_dev);
2713ea3c7d930a Jason Wang         2023-01-19  2466  
2713ea3c7d930a Jason Wang         2023-01-19  2467  	return vring_create_virtqueue_split(index, num, vring_align,
2713ea3c7d930a Jason Wang         2023-01-19  2468  			vdev, weak_barriers, may_reduce_num,
2713ea3c7d930a Jason Wang         2023-01-19  2469  			context, notify, callback, name, dma_dev);
2713ea3c7d930a Jason Wang         2023-01-19  2470  }
2713ea3c7d930a Jason Wang         2023-01-19  2471  EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
2713ea3c7d930a Jason Wang         2023-01-19  2472  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2473  /**
c790e8e1817f1a Xuan Zhuo          2022-08-01  2474   * virtqueue_resize - resize the vring of vq
c790e8e1817f1a Xuan Zhuo          2022-08-01  2475   * @_vq: the struct virtqueue we're talking about.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2476   * @num: new ring num
c790e8e1817f1a Xuan Zhuo          2022-08-01  2477   * @recycle: callback for recycle the useless buffer
c790e8e1817f1a Xuan Zhuo          2022-08-01  2478   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2479   * When it is really necessary to create a new vring, it will set the current vq
c790e8e1817f1a Xuan Zhuo          2022-08-01  2480   * into the reset state. Then call the passed callback to recycle the buffer
c790e8e1817f1a Xuan Zhuo          2022-08-01  2481   * that is no longer used. Only after the new vring is successfully created, the
c790e8e1817f1a Xuan Zhuo          2022-08-01  2482   * old vring will be released.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2483   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2484   * Caller must ensure we don't call this with other virtqueue operations
c790e8e1817f1a Xuan Zhuo          2022-08-01  2485   * at the same time (except where noted).
c790e8e1817f1a Xuan Zhuo          2022-08-01  2486   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2487   * Returns zero or a negative error.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2488   * 0: success.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2489   * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
c790e8e1817f1a Xuan Zhuo          2022-08-01  2490   *  vq can still work normally
c790e8e1817f1a Xuan Zhuo          2022-08-01  2491   * -EBUSY: Failed to sync with device, vq may not work properly
c790e8e1817f1a Xuan Zhuo          2022-08-01  2492   * -ENOENT: Transport or device not supported
c790e8e1817f1a Xuan Zhuo          2022-08-01  2493   * -E2BIG/-EINVAL: num error
c790e8e1817f1a Xuan Zhuo          2022-08-01  2494   * -EPERM: Operation not permitted
c790e8e1817f1a Xuan Zhuo          2022-08-01  2495   *
c790e8e1817f1a Xuan Zhuo          2022-08-01  2496   */
146086b281eebe zhenwei pi         2023-05-17  2497  static int vring_virtqueue_resize(struct virtqueue *_vq, u32 num,
c790e8e1817f1a Xuan Zhuo          2022-08-01  2498  				  void (*recycle)(struct virtqueue *vq, void *buf))
c790e8e1817f1a Xuan Zhuo          2022-08-01 @2499  {
c790e8e1817f1a Xuan Zhuo          2022-08-01  2500  	struct vring_virtqueue *vq = to_vvq(_vq);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2501  	struct virtio_device *vdev = vq->vq.vdev;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2502  	void *buf;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2503  	int err;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2504  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2505  	if (!vq->we_own_ring)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2506  		return -EPERM;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2507  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2508  	if (num > vq->vq.num_max)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2509  		return -E2BIG;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2510  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2511  	if (!num)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2512  		return -EINVAL;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2513  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2514  	if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2515  		return 0;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2516  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2517  	if (!vdev->config->disable_vq_and_reset)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2518  		return -ENOENT;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2519  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2520  	if (!vdev->config->enable_vq_after_reset)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2521  		return -ENOENT;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2522  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2523  	err = vdev->config->disable_vq_and_reset(_vq);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2524  	if (err)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2525  		return err;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2526  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2527  	while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2528  		recycle(_vq, buf);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2529  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2530  	if (vq->packed_ring)
c790e8e1817f1a Xuan Zhuo          2022-08-01  2531  		err = virtqueue_resize_packed(_vq, num);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2532  	else
c790e8e1817f1a Xuan Zhuo          2022-08-01  2533  		err = virtqueue_resize_split(_vq, num);
c790e8e1817f1a Xuan Zhuo          2022-08-01  2534  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2535  	if (vdev->config->enable_vq_after_reset(_vq))
c790e8e1817f1a Xuan Zhuo          2022-08-01  2536  		return -EBUSY;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2537  
c790e8e1817f1a Xuan Zhuo          2022-08-01  2538  	return err;
c790e8e1817f1a Xuan Zhuo          2022-08-01  2539  }
c790e8e1817f1a Xuan Zhuo          2022-08-01  2540
  
Christoph Hellwig May 17, 2023, 7:39 a.m. UTC | #3
On Wed, May 17, 2023 at 10:54:23AM +0800, zhenwei pi wrote:
> All the vring based virtqueue methods could be abstratct in theory,
> MST suggested that add/get bufs and kick functions are quite perfmance
> sensitive, so export these functions from virtio_ring.ko, drivers
> still call them in a fast path.

Who is going to use this?  And why do you think every virtio users
would want to pay for indirect calls just for your shiny new feature?

This seems like an amazingly bad idea to me.
  
zhenwei pi May 17, 2023, 7:43 a.m. UTC | #4
On 5/17/23 15:39, Christoph Hellwig wrote:
> On Wed, May 17, 2023 at 10:54:23AM +0800, zhenwei pi wrote:
>> All the vring based virtqueue methods could be abstratct in theory,
>> MST suggested that add/get bufs and kick functions are quite perfmance
>> sensitive, so export these functions from virtio_ring.ko, drivers
>> still call them in a fast path.
> 
> Who is going to use this?  And why do you think every virtio users
> would want to pay for indirect calls just for your shiny new feature?
> 
> This seems like an amazingly bad idea to me.

Hi,

I have a plan to introduce 'Virtio Over Fabrics'(TCP&RDMA) as Virtio 
transport, as mentioned in cover letter of this series:
3 weeks ago, I posted a proposal 'Virtio Over Fabrics':
https://lists.oasis-open.org/archives/virtio-comment/202304/msg00442.html

Jason and Stefan pointed out that a non-vring based virtqueue has a
chance to overwrite virtqueue instead of using vring virtqueue.
  
Christoph Hellwig May 17, 2023, 7:46 a.m. UTC | #5
On Wed, May 17, 2023 at 03:43:03PM +0800, zhenwei pi wrote:
> I have a plan to introduce 'Virtio Over Fabrics'(TCP&RDMA) as Virtio
> transport, as mentioned in cover letter of this series:
> 3 weeks ago, I posted a proposal 'Virtio Over Fabrics':
> https://lists.oasis-open.org/archives/virtio-comment/202304/msg00442.html

Just don't do it.  Please define your own protocols over RDMA or TCP
for exactly the operations you need (for many they will already exist)
instead of piggyg backing on virtio and making everyone else pay the
price.
  
zhenwei pi May 17, 2023, 8:35 a.m. UTC | #6
On 5/17/23 15:46, Christoph Hellwig wrote:
> On Wed, May 17, 2023 at 03:43:03PM +0800, zhenwei pi wrote:
>> I have a plan to introduce 'Virtio Over Fabrics'(TCP&RDMA) as Virtio
>> transport, as mentioned in cover letter of this series:
>> 3 weeks ago, I posted a proposal 'Virtio Over Fabrics':
>> https://lists.oasis-open.org/archives/virtio-comment/202304/msg00442.html
> 
> Just don't do it.  Please define your own protocols over RDMA or TCP
> for exactly the operations you need (for many they will already exist)
> instead of piggyg backing on virtio and making everyone else pay the
> price.
> 

Hi

1, `virtqueue_add_inbuf` in current version:
static inline int virtqueue_add_inbuf(struct virtqueue *vq,
                                       struct scatterlist *sg,
                                       unsigned int num,
                                       void *data,
                                       gfp_t gfp)
{
         if (likely(!vq->abstract))
                 return vring_virtqueue_add_sgs(vq, &sg, num, 0, 1, 
data, NULL, gfp);

         return vq->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
}

And disassemble 'virtinput_queue_evtbuf':
static void virtinput_queue_evtbuf(struct virtio_input *vi,
                                    struct virtio_input_event *evtbuf)
{
         struct scatterlist sg[1];

         sg_init_one(sg, evtbuf, sizeof(*evtbuf));
         virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
}

I notice that two instructions are newly added for vring like:
  24d:   80 78 35 00             cmpb   $0x0,0x35(%rax)
  251:   75 3f                   jne    292

Is it an expensive price...

2, Storage/FS specific remote protocol is quite popular, otherwise I'm 
not familiar with other device protocols. For example, I need a remote 
crypto device to accelerate HTTPS ... With Virtio Over Fabrics, I have a 
chance to attach a virtio-crypto device to do this work.
  
Michael S. Tsirkin May 17, 2023, 10:39 a.m. UTC | #7
On Wed, May 17, 2023 at 04:35:55PM +0800, zhenwei pi wrote:
> 
> 
> On 5/17/23 15:46, Christoph Hellwig wrote:
> > On Wed, May 17, 2023 at 03:43:03PM +0800, zhenwei pi wrote:
> > > I have a plan to introduce 'Virtio Over Fabrics'(TCP&RDMA) as Virtio
> > > transport, as mentioned in cover letter of this series:
> > > 3 weeks ago, I posted a proposal 'Virtio Over Fabrics':
> > > https://lists.oasis-open.org/archives/virtio-comment/202304/msg00442.html
> > 
> > Just don't do it.  Please define your own protocols over RDMA or TCP
> > for exactly the operations you need (for many they will already exist)
> > instead of piggyg backing on virtio and making everyone else pay the
> > price.
> > 
> 
> Hi
> 
> 1, `virtqueue_add_inbuf` in current version:
> static inline int virtqueue_add_inbuf(struct virtqueue *vq,
>                                       struct scatterlist *sg,
>                                       unsigned int num,
>                                       void *data,
>                                       gfp_t gfp)
> {
>         if (likely(!vq->abstract))
>                 return vring_virtqueue_add_sgs(vq, &sg, num, 0, 1, data,
> NULL, gfp);
> 
>         return vq->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
> }
> 
> And disassemble 'virtinput_queue_evtbuf':
> static void virtinput_queue_evtbuf(struct virtio_input *vi,
>                                    struct virtio_input_event *evtbuf)
> {
>         struct scatterlist sg[1];
> 
>         sg_init_one(sg, evtbuf, sizeof(*evtbuf));
>         virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
> }
> 
> I notice that two instructions are newly added for vring like:
>  24d:   80 78 35 00             cmpb   $0x0,0x35(%rax)
>  251:   75 3f                   jne    292
> 
> Is it an expensive price...

Can we somehow only override the kick method?
Then take the ring and send it over ...


> 2, Storage/FS specific remote protocol is quite popular, otherwise I'm not
> familiar with other device protocols. For example, I need a remote crypto
> device to accelerate HTTPS ... With Virtio Over Fabrics, I have a chance to
> attach a virtio-crypto device to do this work.
> 
> -- 
> zhenwei pi
  
kernel test robot May 17, 2023, 5:59 p.m. UTC | #8
Hi zhenwei,

kernel test robot noticed the following build errors:

[auto build test ERROR on mst-vhost/linux-next]
[also build test ERROR on linus/master v6.4-rc2 next-20230517]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230517-110311
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link:    https://lore.kernel.org/r/20230517025424.601141-2-pizhenwei%40bytedance.com
patch subject: [PATCH v2 1/2] virtio: abstract virtqueue related methods
config: loongarch-randconfig-r004-20230517
compiler: loongarch64-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/146086b281eebe5c5368c387f96a0395c6252d41
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review zhenwei-pi/virtio-abstract-virtqueue-related-methods/20230517-110311
        git checkout 146086b281eebe5c5368c387f96a0395c6252d41
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202305180109.U6SyyRS2-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from drivers/gpu/drm/virtio/virtgpu_drv.h:30,
                    from drivers/gpu/drm/virtio/virtgpu_drv.c:41:
   include/linux/virtio.h: In function 'virtio_break_device':
>> include/linux/virtio.h:492:19: error: 'struct virtqueue' has no member named '__builtin_loongarch_break'
     492 |                 vq->__break(vq);
         |                   ^~


vim +492 include/linux/virtio.h

   481	
   482	/*
   483	 * This should prevent the device from being used, allowing drivers to
   484	 * recover.  You may need to grab appropriate locks to flush.
   485	 */
   486	static inline void virtio_break_device(struct virtio_device *dev)
   487	{
   488		struct virtqueue *vq;
   489	
   490		spin_lock(&dev->vqs_list_lock);
   491		list_for_each_entry(vq, &dev->vqs, list) {
 > 492			vq->__break(vq);
   493		}
   494		spin_unlock(&dev->vqs_list_lock);
   495	}
   496
  
zhenwei pi May 18, 2023, 12:47 a.m. UTC | #9
On 5/17/23 18:39, Michael S. Tsirkin wrote:
> On Wed, May 17, 2023 at 04:35:55PM +0800, zhenwei pi wrote:
>>
>>
>> On 5/17/23 15:46, Christoph Hellwig wrote:
>>> On Wed, May 17, 2023 at 03:43:03PM +0800, zhenwei pi wrote:
>>>> I have a plan to introduce 'Virtio Over Fabrics'(TCP&RDMA) as Virtio
>>>> transport, as mentioned in cover letter of this series:
>>>> 3 weeks ago, I posted a proposal 'Virtio Over Fabrics':
>>>> https://lists.oasis-open.org/archives/virtio-comment/202304/msg00442.html
>>>
>>> Just don't do it.  Please define your own protocols over RDMA or TCP
>>> for exactly the operations you need (for many they will already exist)
>>> instead of piggyg backing on virtio and making everyone else pay the
>>> price.
>>>
>>
>> Hi
>>
>> 1, `virtqueue_add_inbuf` in current version:
>> static inline int virtqueue_add_inbuf(struct virtqueue *vq,
>>                                        struct scatterlist *sg,
>>                                        unsigned int num,
>>                                        void *data,
>>                                        gfp_t gfp)
>> {
>>          if (likely(!vq->abstract))
>>                  return vring_virtqueue_add_sgs(vq, &sg, num, 0, 1, data,
>> NULL, gfp);
>>
>>          return vq->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
>> }
>>
>> And disassemble 'virtinput_queue_evtbuf':
>> static void virtinput_queue_evtbuf(struct virtio_input *vi,
>>                                     struct virtio_input_event *evtbuf)
>> {
>>          struct scatterlist sg[1];
>>
>>          sg_init_one(sg, evtbuf, sizeof(*evtbuf));
>>          virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
>> }
>>
>> I notice that two instructions are newly added for vring like:
>>   24d:   80 78 35 00             cmpb   $0x0,0x35(%rax)
>>   251:   75 3f                   jne    292
>>
>> Is it an expensive price...
> 
> Can we somehow only override the kick method?
> Then take the ring and send it over ...
> 

Could you please take a look at this code?
https://github.com/pizhenwei/linux/blob/virtio-of-github/drivers/virtio/virtio_fabrics.c#LL861C13-L861C23

> 
>> 2, Storage/FS specific remote protocol is quite popular, otherwise I'm not
>> familiar with other device protocols. For example, I need a remote crypto
>> device to accelerate HTTPS ... With Virtio Over Fabrics, I have a chance to
>> attach a virtio-crypto device to do this work.
>>
>> -- 
>> zhenwei pi
>
  
Michael S. Tsirkin May 18, 2023, 10:09 a.m. UTC | #10
On Thu, May 18, 2023 at 08:47:22AM +0800, zhenwei pi wrote:
> On 5/17/23 18:39, Michael S. Tsirkin wrote:
> > On Wed, May 17, 2023 at 04:35:55PM +0800, zhenwei pi wrote:
> > > 
> > > 
> > > On 5/17/23 15:46, Christoph Hellwig wrote:
> > > > On Wed, May 17, 2023 at 03:43:03PM +0800, zhenwei pi wrote:
> > > > > I have a plan to introduce 'Virtio Over Fabrics'(TCP&RDMA) as Virtio
> > > > > transport, as mentioned in cover letter of this series:
> > > > > 3 weeks ago, I posted a proposal 'Virtio Over Fabrics':
> > > > > https://lists.oasis-open.org/archives/virtio-comment/202304/msg00442.html
> > > > 
> > > > Just don't do it.  Please define your own protocols over RDMA or TCP
> > > > for exactly the operations you need (for many they will already exist)
> > > > instead of piggyg backing on virtio and making everyone else pay the
> > > > price.
> > > > 
> > > 
> > > Hi
> > > 
> > > 1, `virtqueue_add_inbuf` in current version:
> > > static inline int virtqueue_add_inbuf(struct virtqueue *vq,
> > >                                        struct scatterlist *sg,
> > >                                        unsigned int num,
> > >                                        void *data,
> > >                                        gfp_t gfp)
> > > {
> > >          if (likely(!vq->abstract))
> > >                  return vring_virtqueue_add_sgs(vq, &sg, num, 0, 1, data,
> > > NULL, gfp);
> > > 
> > >          return vq->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
> > > }
> > > 
> > > And disassemble 'virtinput_queue_evtbuf':
> > > static void virtinput_queue_evtbuf(struct virtio_input *vi,
> > >                                     struct virtio_input_event *evtbuf)
> > > {
> > >          struct scatterlist sg[1];
> > > 
> > >          sg_init_one(sg, evtbuf, sizeof(*evtbuf));
> > >          virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
> > > }
> > > 
> > > I notice that two instructions are newly added for vring like:
> > >   24d:   80 78 35 00             cmpb   $0x0,0x35(%rax)
> > >   251:   75 3f                   jne    292
> > > 
> > > Is it an expensive price...
> > 
> > Can we somehow only override the kick method?
> > Then take the ring and send it over ...
> > 
> 
> Could you please take a look at this code?
> https://github.com/pizhenwei/linux/blob/virtio-of-github/drivers/virtio/virtio_fabrics.c#LL861C13-L861C23

what am I looking at here?

Looks like at least vof_handle_vq duplicates some code from vringh.
But besides that yes, that's the idea.


> > 
> > > 2, Storage/FS specific remote protocol is quite popular, otherwise I'm not
> > > familiar with other device protocols. For example, I need a remote crypto
> > > device to accelerate HTTPS ... With Virtio Over Fabrics, I have a chance to
> > > attach a virtio-crypto device to do this work.
> > > 
> > > -- 
> > > zhenwei pi
> > 
> 
> -- 
> zhenwei pi
  
zhenwei pi May 18, 2023, 11:01 a.m. UTC | #11
On 5/18/23 18:09, Michael S. Tsirkin wrote:
> On Thu, May 18, 2023 at 08:47:22AM +0800, zhenwei pi wrote:
>> On 5/17/23 18:39, Michael S. Tsirkin wrote:
>>> On Wed, May 17, 2023 at 04:35:55PM +0800, zhenwei pi wrote:
>>>>
>>>>
>>>> On 5/17/23 15:46, Christoph Hellwig wrote:
>>>>> On Wed, May 17, 2023 at 03:43:03PM +0800, zhenwei pi wrote:
>>>>>> I have a plan to introduce 'Virtio Over Fabrics'(TCP&RDMA) as Virtio
>>>>>> transport, as mentioned in cover letter of this series:
>>>>>> 3 weeks ago, I posted a proposal 'Virtio Over Fabrics':
>>>>>> https://lists.oasis-open.org/archives/virtio-comment/202304/msg00442.html
>>>>>
>>>>> Just don't do it.  Please define your own protocols over RDMA or TCP
>>>>> for exactly the operations you need (for many they will already exist)
>>>>> instead of piggyg backing on virtio and making everyone else pay the
>>>>> price.
>>>>>
>>>>
>>>> Hi
>>>>
>>>> 1, `virtqueue_add_inbuf` in current version:
>>>> static inline int virtqueue_add_inbuf(struct virtqueue *vq,
>>>>                                         struct scatterlist *sg,
>>>>                                         unsigned int num,
>>>>                                         void *data,
>>>>                                         gfp_t gfp)
>>>> {
>>>>           if (likely(!vq->abstract))
>>>>                   return vring_virtqueue_add_sgs(vq, &sg, num, 0, 1, data,
>>>> NULL, gfp);
>>>>
>>>>           return vq->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
>>>> }
>>>>
>>>> And disassemble 'virtinput_queue_evtbuf':
>>>> static void virtinput_queue_evtbuf(struct virtio_input *vi,
>>>>                                      struct virtio_input_event *evtbuf)
>>>> {
>>>>           struct scatterlist sg[1];
>>>>
>>>>           sg_init_one(sg, evtbuf, sizeof(*evtbuf));
>>>>           virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
>>>> }
>>>>
>>>> I notice that two instructions are newly added for vring like:
>>>>    24d:   80 78 35 00             cmpb   $0x0,0x35(%rax)
>>>>    251:   75 3f                   jne    292
>>>>
>>>> Is it an expensive price...
>>>
>>> Can we somehow only override the kick method?
>>> Then take the ring and send it over ...
>>>
>>
>> Could you please take a look at this code?
>> https://github.com/pizhenwei/linux/blob/virtio-of-github/drivers/virtio/virtio_fabrics.c#LL861C13-L861C23
> 
> what am I looking at here?
> 
> Looks like at least vof_handle_vq duplicates some code from vringh.
> But besides that yes, that's the idea.
> 

OK, I'd drop this series.

Cc Jason & Stefan.
  

Patch

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c5310eaf8b46..82c26f50d941 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -226,6 +226,7 @@  static struct virtqueue *__vring_new_virtqueue(unsigned int index,
 					       struct device *dma_dev);
 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
 static void vring_free(struct virtqueue *_vq);
+static void vring_virtqueue_set_ops(struct virtqueue *vq);
 
 /*
  * Helpers.
@@ -2036,6 +2037,7 @@  static struct virtqueue *vring_create_virtqueue_packed(
 	if (!vq)
 		goto err_vq;
 
+	vring_virtqueue_set_ops(&vq->vq);
 	vq->vq.callback = callback;
 	vq->vq.vdev = vdev;
 	vq->vq.name = name;
@@ -2117,14 +2119,14 @@  static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
  * Generic functions and exported symbols.
  */
 
-static inline int virtqueue_add(struct virtqueue *_vq,
-				struct scatterlist *sgs[],
-				unsigned int total_sg,
-				unsigned int out_sgs,
-				unsigned int in_sgs,
-				void *data,
-				void *ctx,
-				gfp_t gfp)
+int vring_virtqueue_add_sgs(struct virtqueue *_vq,
+			    struct scatterlist *sgs[],
+			    unsigned int total_sg,
+			    unsigned int out_sgs,
+			    unsigned int in_sgs,
+			    void *data,
+			    void *ctx,
+			    gfp_t gfp)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2133,112 +2135,10 @@  static inline int virtqueue_add(struct virtqueue *_vq,
 				 virtqueue_add_split(_vq, sgs, total_sg,
 					out_sgs, in_sgs, data, ctx, gfp);
 }
+EXPORT_SYMBOL_GPL(vring_virtqueue_add_sgs);
 
 /**
- * virtqueue_add_sgs - expose buffers to other end
- * @_vq: the struct virtqueue we're talking about.
- * @sgs: array of terminated scatterlists.
- * @out_sgs: the number of scatterlists readable by other side
- * @in_sgs: the number of scatterlists which are writable (after readable ones)
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_sgs(struct virtqueue *_vq,
-		      struct scatterlist *sgs[],
-		      unsigned int out_sgs,
-		      unsigned int in_sgs,
-		      void *data,
-		      gfp_t gfp)
-{
-	unsigned int i, total_sg = 0;
-
-	/* Count them first. */
-	for (i = 0; i < out_sgs + in_sgs; i++) {
-		struct scatterlist *sg;
-
-		for (sg = sgs[i]; sg; sg = sg_next(sg))
-			total_sg++;
-	}
-	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
-			     data, NULL, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
-
-/**
- * virtqueue_add_outbuf - expose output buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg readable by other side
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_outbuf(struct virtqueue *vq,
-			 struct scatterlist *sg, unsigned int num,
-			 void *data,
-			 gfp_t gfp)
-{
-	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
-
-/**
- * virtqueue_add_inbuf - expose input buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg writable by other side
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_inbuf(struct virtqueue *vq,
-			struct scatterlist *sg, unsigned int num,
-			void *data,
-			gfp_t gfp)
-{
-	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
-
-/**
- * virtqueue_add_inbuf_ctx - expose input buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg writable by other side
- * @data: the token identifying the buffer.
- * @ctx: extra context for the token
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
-			struct scatterlist *sg, unsigned int num,
-			void *data,
-			void *ctx,
-			gfp_t gfp)
-{
-	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
-
-/**
- * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * vring_virtqueue_kick_prepare - first half of split virtqueue_kick call.
  * @_vq: the struct virtqueue
  *
  * Instead of virtqueue_kick(), you can do:
@@ -2248,24 +2148,24 @@  EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
  * This is sometimes useful because the virtqueue_kick_prepare() needs
  * to be serialized, but the actual virtqueue_notify() call does not.
  */
-bool virtqueue_kick_prepare(struct virtqueue *_vq)
+bool vring_virtqueue_kick_prepare(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
 				 virtqueue_kick_prepare_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
+EXPORT_SYMBOL_GPL(vring_virtqueue_kick_prepare);
 
 /**
- * virtqueue_notify - second half of split virtqueue_kick call.
+ * vring_virtqueue_notify - second half of split virtqueue_kick call.
  * @_vq: the struct virtqueue
  *
  * This does not need to be serialized.
  *
  * Returns false if host notify failed or queue is broken, otherwise true.
  */
-bool virtqueue_notify(struct virtqueue *_vq)
+bool vring_virtqueue_notify(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2279,30 +2179,10 @@  bool virtqueue_notify(struct virtqueue *_vq)
 	}
 	return true;
 }
-EXPORT_SYMBOL_GPL(virtqueue_notify);
+EXPORT_SYMBOL_GPL(vring_virtqueue_notify);
 
 /**
- * virtqueue_kick - update after add_buf
- * @vq: the struct virtqueue
- *
- * After one or more virtqueue_add_* calls, invoke this to kick
- * the other side.
- *
- * Caller must ensure we don't call this with other virtqueue
- * operations at the same time (except where noted).
- *
- * Returns false if kick failed, otherwise true.
- */
-bool virtqueue_kick(struct virtqueue *vq)
-{
-	if (virtqueue_kick_prepare(vq))
-		return virtqueue_notify(vq);
-	return true;
-}
-EXPORT_SYMBOL_GPL(virtqueue_kick);
-
-/**
- * virtqueue_get_buf_ctx - get the next used buffer
+ * vring_virtqueue_get_buf_ctx - get the next used buffer
  * @_vq: the struct virtqueue we're talking about.
  * @len: the length written into the buffer
  * @ctx: extra context for the token
@@ -2318,23 +2198,17 @@  EXPORT_SYMBOL_GPL(virtqueue_kick);
  * Returns NULL if there are no used buffers, or the "data" token
  * handed to virtqueue_add_*().
  */
-void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
-			    void **ctx)
+void *vring_virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, void **ctx)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
 }
-EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
+EXPORT_SYMBOL_GPL(vring_virtqueue_get_buf_ctx);
 
-void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
-{
-	return virtqueue_get_buf_ctx(_vq, len, NULL);
-}
-EXPORT_SYMBOL_GPL(virtqueue_get_buf);
 /**
- * virtqueue_disable_cb - disable callbacks
+ * vring_virtqueue_disable_cb - disable callbacks
  * @_vq: the struct virtqueue we're talking about.
  *
  * Note that this is not necessarily synchronous, hence unreliable and only
@@ -2342,7 +2216,7 @@  EXPORT_SYMBOL_GPL(virtqueue_get_buf);
  *
  * Unlike other operations, this need not be serialized.
  */
-void virtqueue_disable_cb(struct virtqueue *_vq)
+void vring_virtqueue_disable_cb(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2351,10 +2225,10 @@  void virtqueue_disable_cb(struct virtqueue *_vq)
 	else
 		virtqueue_disable_cb_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
+EXPORT_SYMBOL_GPL(vring_virtqueue_disable_cb);
 
 /**
- * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
+ * vring_virtqueue_enable_cb_prepare - restart callbacks after disable_cb
  * @_vq: the struct virtqueue we're talking about.
  *
  * This re-enables callbacks; it returns current queue state
@@ -2365,7 +2239,7 @@  EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+unsigned int vring_virtqueue_enable_cb_prepare(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2375,18 +2249,18 @@  unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
 				 virtqueue_enable_cb_prepare_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
+EXPORT_SYMBOL_GPL(vring_virtqueue_enable_cb_prepare);
 
 /**
- * virtqueue_poll - query pending used buffers
+ * vring_virtqueue_poll - query pending used buffers
  * @_vq: the struct virtqueue we're talking about.
- * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
+ * @last_used_idx: virtqueue state (from call to vring_virtqueue_enable_cb_prepare).
  *
  * Returns "true" if there are pending used buffers in the queue.
  *
  * This does not need to be serialized.
  */
-bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
+bool vring_virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2397,29 +2271,10 @@  bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
 				 virtqueue_poll_split(_vq, last_used_idx);
 }
-EXPORT_SYMBOL_GPL(virtqueue_poll);
-
-/**
- * virtqueue_enable_cb - restart callbacks after disable_cb.
- * @_vq: the struct virtqueue we're talking about.
- *
- * This re-enables callbacks; it returns "false" if there are pending
- * buffers in the queue, to detect a possible race between the driver
- * checking for more work, and enabling callbacks.
- *
- * Caller must ensure we don't call this with other virtqueue
- * operations at the same time (except where noted).
- */
-bool virtqueue_enable_cb(struct virtqueue *_vq)
-{
-	unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
-
-	return !virtqueue_poll(_vq, last_used_idx);
-}
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+EXPORT_SYMBOL_GPL(vring_virtqueue_poll);
 
 /**
- * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * vring_virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
  * @_vq: the struct virtqueue we're talking about.
  *
  * This re-enables callbacks but hints to the other side to delay
@@ -2431,7 +2286,7 @@  EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+bool vring_virtqueue_enable_cb_delayed(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2441,7 +2296,7 @@  bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
 				 virtqueue_enable_cb_delayed_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
+EXPORT_SYMBOL_GPL(vring_virtqueue_enable_cb_delayed);
 
 /**
  * virtqueue_detach_unused_buf - detach first unused buffer
@@ -2451,14 +2306,13 @@  EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
  * This is not valid on an active queue; it is useful for device
  * shutdown or the reset queue.
  */
-void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
+static void *vring_virtqueue_detach_unused_buf(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
 				 virtqueue_detach_unused_buf_split(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
 
 static inline bool more_used(const struct vring_virtqueue *vq)
 {
@@ -2525,6 +2379,7 @@  static struct virtqueue *__vring_new_virtqueue(unsigned int index,
 	if (!vq)
 		return NULL;
 
+	vring_virtqueue_set_ops(&vq->vq);
 	vq->packed_ring = false;
 	vq->vq.callback = callback;
 	vq->vq.vdev = vdev;
@@ -2639,8 +2494,8 @@  EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
  * -EPERM: Operation not permitted
  *
  */
-int virtqueue_resize(struct virtqueue *_vq, u32 num,
-		     void (*recycle)(struct virtqueue *vq, void *buf))
+static int vring_virtqueue_resize(struct virtqueue *_vq, u32 num,
+				  void (*recycle)(struct virtqueue *vq, void *buf))
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	struct virtio_device *vdev = vq->vq.vdev;
@@ -2682,7 +2537,6 @@  int virtqueue_resize(struct virtqueue *_vq, u32 num,
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(virtqueue_resize);
 
 /* Only available for split ring */
 struct virtqueue *vring_new_virtqueue(unsigned int index,
@@ -2815,14 +2669,13 @@  EXPORT_SYMBOL_GPL(vring_transport_features);
  * Returns the size of the vring.  This is mainly used for boasting to
  * userspace.  Unlike other operations, this need not be serialized.
  */
-unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
+static unsigned int vring_virtqueue_get_vring_size(const struct virtqueue *_vq)
 {
 
 	const struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
 }
-EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
 
 /*
  * This function should only be called by the core, not directly by the driver.
@@ -2831,7 +2684,7 @@  void __virtqueue_break(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
-	/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+	/* Pairs with READ_ONCE() in vring_virtqueue_is_broken(). */
 	WRITE_ONCE(vq->broken, true);
 }
 EXPORT_SYMBOL_GPL(__virtqueue_break);
@@ -2843,59 +2696,18 @@  void __virtqueue_unbreak(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
-	/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+	/* Pairs with READ_ONCE() in vring_virtqueue_is_broken(). */
 	WRITE_ONCE(vq->broken, false);
 }
 EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
 
-bool virtqueue_is_broken(const struct virtqueue *_vq)
+bool vring_virtqueue_is_broken(const struct virtqueue *_vq)
 {
 	const struct vring_virtqueue *vq = to_vvq(_vq);
 
 	return READ_ONCE(vq->broken);
 }
-EXPORT_SYMBOL_GPL(virtqueue_is_broken);
-
-/*
- * This should prevent the device from being used, allowing drivers to
- * recover.  You may need to grab appropriate locks to flush.
- */
-void virtio_break_device(struct virtio_device *dev)
-{
-	struct virtqueue *_vq;
-
-	spin_lock(&dev->vqs_list_lock);
-	list_for_each_entry(_vq, &dev->vqs, list) {
-		struct vring_virtqueue *vq = to_vvq(_vq);
-
-		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
-		WRITE_ONCE(vq->broken, true);
-	}
-	spin_unlock(&dev->vqs_list_lock);
-}
-EXPORT_SYMBOL_GPL(virtio_break_device);
-
-/*
- * This should allow the device to be used by the driver. You may
- * need to grab appropriate locks to flush the write to
- * vq->broken. This should only be used in some specific case e.g
- * (probing and restoring). This function should only be called by the
- * core, not directly by the driver.
- */
-void __virtio_unbreak_device(struct virtio_device *dev)
-{
-	struct virtqueue *_vq;
-
-	spin_lock(&dev->vqs_list_lock);
-	list_for_each_entry(_vq, &dev->vqs, list) {
-		struct vring_virtqueue *vq = to_vvq(_vq);
-
-		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
-		WRITE_ONCE(vq->broken, false);
-	}
-	spin_unlock(&dev->vqs_list_lock);
-}
-EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
+EXPORT_SYMBOL_GPL(vring_virtqueue_is_broken);
 
 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
 {
@@ -2946,3 +2758,16 @@  const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
 
 MODULE_LICENSE("GPL");
+
+static void vring_virtqueue_set_ops(struct virtqueue *vq)
+{
+	/* In theory, add/get bufs and kick functions also could be overwritten,
+	 * these functions are quite perfmance sensitive, so call them directly.
+	 */
+	vq->abstract = false;
+	vq->detach_unused_buf = vring_virtqueue_detach_unused_buf;
+	vq->get_vring_size = vring_virtqueue_get_vring_size;
+	vq->resize = vring_virtqueue_resize;
+	vq->__break = __virtqueue_break;
+	vq->__unbreak = __virtqueue_unbreak;
+}
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b93238db94e3..203f0397dc58 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,6 +9,7 @@ 
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/gfp.h>
+#include <linux/virtio_ring.h>
 
 /**
  * struct virtqueue - a queue to register buffers for sending or receiving.
@@ -35,67 +36,399 @@  struct virtqueue {
 	unsigned int num_free;
 	unsigned int num_max;
 	bool reset;
+	bool abstract;
 	void *priv;
+
+	/* abstract operations */
+	int (*add_sgs)(struct virtqueue *vq, struct scatterlist *sgs[],
+		       unsigned int total_sg,
+		       unsigned int out_sgs, unsigned int in_sgs,
+		       void *data, void *ctx, gfp_t gfp);
+	bool (*kick_prepare)(struct virtqueue *vq);
+	bool (*notify)(struct virtqueue *vq);
+	unsigned int (*enable_cb_prepare)(struct virtqueue *vq);
+	bool (*enable_cb_delayed)(struct virtqueue *vq);
+	void (*disable_cb)(struct virtqueue *vq);
+	bool (*poll)(struct virtqueue *vq, unsigned int idx);
+	void *(*get_buf_ctx)(struct virtqueue *vq, unsigned int *len, void **ctx);
+	void *(*detach_unused_buf)(struct virtqueue *vq);
+	unsigned int (*get_vring_size)(const struct virtqueue *vq);
+	int (*resize)(struct virtqueue *vq, u32 num,
+		      void (*recycle)(struct virtqueue *vq, void *buf));
+	void (*__break)(struct virtqueue *vq);
+	void (*__unbreak)(struct virtqueue *vq);
+	bool (*is_broken)(const struct virtqueue *vq);
 };
 
-int virtqueue_add_outbuf(struct virtqueue *vq,
-			 struct scatterlist sg[], unsigned int num,
-			 void *data,
-			 gfp_t gfp);
+/**
+ * virtqueue_add_sgs - expose buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of terminated scatterlists.
+ * @out_sgs: the number of scatterlists readable by other side
+ * @in_sgs: the number of scatterlists which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+static inline int virtqueue_add_sgs(struct virtqueue *vq,
+				    struct scatterlist *sgs[],
+				    unsigned int out_sgs,
+				    unsigned int in_sgs,
+				    void *data, gfp_t gfp)
+{
+	unsigned int i, total_sg = 0;
+
+	/* Count them first. */
+	for (i = 0; i < out_sgs + in_sgs; i++) {
+		struct scatterlist *sg;
+
+		for (sg = sgs[i]; sg; sg = sg_next(sg))
+			total_sg++;
+	}
+
+	if (likely(!vq->abstract))
+		return vring_virtqueue_add_sgs(vq, sgs, total_sg, out_sgs, in_sgs, data, NULL, gfp);
+
+	return vq->add_sgs(vq, sgs, total_sg, out_sgs, in_sgs, data, NULL, gfp);
+}
+
+/**
+ * virtqueue_add_outbuf - expose output buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg readable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+static inline int virtqueue_add_outbuf(struct virtqueue *vq,
+				       struct scatterlist *sg,
+				       unsigned int num,
+				       void *data,
+				       gfp_t gfp)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_add_sgs(vq, &sg, num, 1, 0, data, NULL, gfp);
+
+	return vq->add_sgs(vq, &sg, num, 1, 0, data, NULL, gfp);
+}
 
-int virtqueue_add_inbuf(struct virtqueue *vq,
-			struct scatterlist sg[], unsigned int num,
-			void *data,
-			gfp_t gfp);
+/**
+ * virtqueue_add_inbuf - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+static inline int virtqueue_add_inbuf(struct virtqueue *vq,
+				      struct scatterlist *sg,
+				      unsigned int num,
+				      void *data,
+				      gfp_t gfp)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
 
-int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
-			    struct scatterlist sg[], unsigned int num,
-			    void *data,
-			    void *ctx,
-			    gfp_t gfp);
+	return vq->add_sgs(vq, &sg, num, 0, 1, data, NULL, gfp);
+}
 
-int virtqueue_add_sgs(struct virtqueue *vq,
-		      struct scatterlist *sgs[],
-		      unsigned int out_sgs,
-		      unsigned int in_sgs,
-		      void *data,
-		      gfp_t gfp);
+/**
+ * virtqueue_add_inbuf_ctx - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @ctx: extra context for the token
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+static inline int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
+					  struct scatterlist *sg,
+					  unsigned int num,
+					  void *data,
+					  void *ctx,
+					  gfp_t gfp)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_add_sgs(vq, &sg, num, 0, 1, data, ctx, gfp);
 
-bool virtqueue_kick(struct virtqueue *vq);
+	return vq->add_sgs(vq, &sg, num, 0, 1, data, ctx, gfp);
+}
 
-bool virtqueue_kick_prepare(struct virtqueue *vq);
+/**
+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * Instead of virtqueue_kick(), you can do:
+ *	if (virtqueue_kick_prepare(vq))
+ *		virtqueue_notify(vq);
+ *
+ * This is sometimes useful because the virtqueue_kick_prepare() needs
+ * to be serialized, but the actual virtqueue_notify() call does not.
+ */
+static inline bool virtqueue_kick_prepare(struct virtqueue *vq)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_kick_prepare(vq);
 
-bool virtqueue_notify(struct virtqueue *vq);
+	return vq->kick_prepare(vq);
+}
 
-void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+/**
+ * virtqueue_notify - second half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
+ */
+static inline bool virtqueue_notify(struct virtqueue *vq)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_notify(vq);
 
-void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
-			    void **ctx);
+	return vq->notify(vq);
+}
 
-void virtqueue_disable_cb(struct virtqueue *vq);
+/**
+ * virtqueue_kick - update after add_buf
+ * @vq: the struct virtqueue
+ *
+ * After one or more virtqueue_add_* calls, invoke this to kick
+ * the other side.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns false if kick failed, otherwise true.
+ */
+static inline bool virtqueue_kick(struct virtqueue *vq)
+{
+	if (virtqueue_kick_prepare(vq))
+		return virtqueue_notify(vq);
 
-bool virtqueue_enable_cb(struct virtqueue *vq);
+	return true;
+}
 
-unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns current queue state
+ * in an opaque unsigned value. This value should be later tested by
+ * virtqueue_poll, to detect a possible race between the driver checking for
+ * more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline unsigned int virtqueue_enable_cb_prepare(struct virtqueue *vq)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_enable_cb_prepare(vq);
 
-bool virtqueue_poll(struct virtqueue *vq, unsigned);
+	return vq->enable_cb_prepare(vq);
+}
 
-bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+/**
+ * virtqueue_poll - query pending used buffers
+ * @vq: the struct virtqueue we're talking about.
+ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
+ *
+ * Returns "true" if there are pending used buffers in the queue.
+ *
+ * This does not need to be serialized.
+ */
+static inline bool virtqueue_poll(struct virtqueue *vq, unsigned int idx)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_poll(vq, idx);
 
-void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+	return vq->poll(vq, idx);
+}
 
-unsigned int virtqueue_get_vring_size(const struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb(struct virtqueue *vq)
+{
+	unsigned int opaque = virtqueue_enable_cb_prepare(vq);
 
-bool virtqueue_is_broken(const struct virtqueue *vq);
+	return !virtqueue_poll(vq, opaque);
+}
+
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_enable_cb_delayed(vq);
+
+	return vq->enable_cb_delayed(vq);
+}
+
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
+static inline void virtqueue_disable_cb(struct virtqueue *vq)
+{
+	if (likely(!vq->abstract)) {
+		vring_virtqueue_disable_cb(vq);
+		return;
+	}
+
+	vq->disable_cb(vq);
+}
+
+/**
+ * virtqueue_get_buf_ctx - get the next used buffer
+ * @vq: the struct virtqueue we're talking about.
+ * @len: the length written into the buffer
+ * @ctx: extra context for the token
+ *
+ * If the device wrote data into the buffer, @len will be set to the
+ * amount written.  This means you don't need to clear the buffer
+ * beforehand to ensure there's no data leakage in the case of short
+ * writes.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns NULL if there are no used buffers, or the "data" token
+ * handed to virtqueue_add_*().
+ */
+static inline void *virtqueue_get_buf_ctx(struct virtqueue *vq,
+					  unsigned int *len,
+					  void **ctx)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_get_buf_ctx(vq, len, ctx);
+
+	return vq->get_buf_ctx(vq, len, ctx);
+}
+
+static inline void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_get_buf_ctx(vq, len, NULL);
+
+	return vq->get_buf_ctx(vq, len, NULL);
+}
+
+/**
+ * virtqueue_detach_unused_buf - detach first unused buffer
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Returns NULL or the "data" token handed to virtqueue_add_*().
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
+ */
+static inline void *virtqueue_detach_unused_buf(struct virtqueue *vq)
+{
+	return vq->detach_unused_buf(vq);
+}
+
+/**
+ * virtqueue_get_vring_size - return the size of the virtqueue's vring
+ * @vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring.  This is mainly used for boasting to
+ * userspace.  Unlike other operations, this need not be serialized.
+ */
+static inline unsigned int virtqueue_get_vring_size(const struct virtqueue *vq)
+{
+	return vq->get_vring_size(vq);
+}
+
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ *  vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+static inline int virtqueue_resize(struct virtqueue *vq, u32 num,
+				   void (*recycle)(struct virtqueue *vq, void *buf))
+{
+	if (vq->resize)
+		return vq->resize(vq, num, recycle);
+
+	return -ENOENT;
+}
+
+static inline bool virtqueue_is_broken(const struct virtqueue *vq)
+{
+	if (likely(!vq->abstract))
+		return vring_virtqueue_is_broken(vq);
+
+	return vq->is_broken(vq);
+}
 
 const struct vring *virtqueue_get_vring(const struct virtqueue *vq);
 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *vq);
 dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
 dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
 
-int virtqueue_resize(struct virtqueue *vq, u32 num,
-		     void (*recycle)(struct virtqueue *vq, void *buf));
-
 /**
  * struct virtio_device - representation of a device using virtio
  * @index: unique position on the virtio bus
@@ -134,9 +467,6 @@  int register_virtio_device(struct virtio_device *dev);
 void unregister_virtio_device(struct virtio_device *dev);
 bool is_virtio_device(struct device *dev);
 
-void virtio_break_device(struct virtio_device *dev);
-void __virtio_unbreak_device(struct virtio_device *dev);
-
 void __virtqueue_break(struct virtqueue *_vq);
 void __virtqueue_unbreak(struct virtqueue *_vq);
 
@@ -149,6 +479,39 @@  void virtio_reset_device(struct virtio_device *dev);
 
 size_t virtio_max_dma_size(const struct virtio_device *vdev);
 
+/*
+ * This should prevent the device from being used, allowing drivers to
+ * recover.  You may need to grab appropriate locks to flush.
+ */
+static inline void virtio_break_device(struct virtio_device *dev)
+{
+	struct virtqueue *vq;
+
+	spin_lock(&dev->vqs_list_lock);
+	list_for_each_entry(vq, &dev->vqs, list) {
+		vq->__break(vq);
+	}
+	spin_unlock(&dev->vqs_list_lock);
+}
+
+/*
+ * This should allow the device to be used by the driver. You may
+ * need to grab appropriate locks to flush the write to
+ * vq->broken. This should only be used in some specific case e.g
+ * (probing and restoring). This function should only be called by the
+ * core, not directly by the driver.
+ */
+static inline void __virtio_unbreak_device(struct virtio_device *dev)
+{
+	struct virtqueue *vq;
+
+	spin_lock(&dev->vqs_list_lock);
+	list_for_each_entry(vq, &dev->vqs, list) {
+		vq->__unbreak(vq);
+	}
+	spin_unlock(&dev->vqs_list_lock);
+}
+
 #define virtio_device_for_each_vq(vdev, vq) \
 	list_for_each_entry(vq, &vdev->vqs, list)
 
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 9b33df741b63..3cfbc98ab3c6 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -4,6 +4,7 @@ 
 
 #include <asm/barrier.h>
 #include <linux/irqreturn.h>
+#include <linux/scatterlist.h>
 #include <uapi/linux/virtio_ring.h>
 
 /*
@@ -108,6 +109,31 @@  struct virtqueue *vring_new_virtqueue(unsigned int index,
 				      void (*callback)(struct virtqueue *vq),
 				      const char *name);
 
+int vring_virtqueue_add_sgs(struct virtqueue *_vq,
+			    struct scatterlist *sgs[],
+			    unsigned int total_sg,
+			    unsigned int out_sgs,
+			    unsigned int in_sgs,
+			    void *data,
+			    void *ctx,
+			    gfp_t gfp);
+
+bool vring_virtqueue_kick_prepare(struct virtqueue *vq);
+
+bool vring_virtqueue_notify(struct virtqueue *vq);
+
+void *vring_virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len, void **ctx);
+
+void vring_virtqueue_disable_cb(struct virtqueue *vq);
+
+unsigned vring_virtqueue_enable_cb_prepare(struct virtqueue *vq);
+
+bool vring_virtqueue_poll(struct virtqueue *vq, unsigned);
+
+bool vring_virtqueue_enable_cb_delayed(struct virtqueue *vq);
+
+bool vring_virtqueue_is_broken(const struct virtqueue *vq);
+
 /*
  * Destroys a virtqueue.  If created with vring_create_virtqueue, this
  * also frees the ring.