[RFC,v1,1/4] virtio/vsock: rework MSG_PEEK for SOCK_STREAM

Message ID 20230618062451.79980-2-AVKrasnov@sberdevices.ru
State New
Headers
Series virtio/vsock: some updates for MSG_PEEK flag |

Commit Message

Arseniy Krasnov June 18, 2023, 6:24 a.m. UTC
  This reworks current implementation of MSG_PEEK logic:
1) Replaces 'skb_queue_walk_safe()' with 'skb_queue_walk()'. There is
   no need in the first one, as there are no removes of skb in loop.
2) Removes nested while loop - MSG_PEEK logic could be implemented
   without it: just iterate over skbs without removing it and copy
   data from each until destination buffer is not full.

Signed-off-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru>
---
 net/vmw_vsock/virtio_transport_common.c | 41 ++++++++++++-------------
 1 file changed, 19 insertions(+), 22 deletions(-)
  

Comments

Stefano Garzarella June 26, 2023, 4:23 p.m. UTC | #1
On Sun, Jun 18, 2023 at 09:24:48AM +0300, Arseniy Krasnov wrote:
>This reworks current implementation of MSG_PEEK logic:
>1) Replaces 'skb_queue_walk_safe()' with 'skb_queue_walk()'. There is
>   no need in the first one, as there are no removes of skb in loop.
>2) Removes nested while loop - MSG_PEEK logic could be implemented
>   without it: just iterate over skbs without removing it and copy
>   data from each until destination buffer is not full.
>
>Signed-off-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru>
>---
> net/vmw_vsock/virtio_transport_common.c | 41 ++++++++++++-------------
> 1 file changed, 19 insertions(+), 22 deletions(-)

Great clean up!

LGTM, but @Bobby can you also take a look?

Thanks,
Stefano

>
>diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
>index b769fc258931..2ee40574c339 100644
>--- a/net/vmw_vsock/virtio_transport_common.c
>+++ b/net/vmw_vsock/virtio_transport_common.c
>@@ -348,37 +348,34 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
> 				size_t len)
> {
> 	struct virtio_vsock_sock *vvs = vsk->trans;
>-	size_t bytes, total = 0, off;
>-	struct sk_buff *skb, *tmp;
>-	int err = -EFAULT;
>+	struct sk_buff *skb;
>+	size_t total = 0;
>+	int err;
>
> 	spin_lock_bh(&vvs->rx_lock);
>
>-	skb_queue_walk_safe(&vvs->rx_queue, skb,  tmp) {
>-		off = 0;
>+	skb_queue_walk(&vvs->rx_queue, skb) {
>+		size_t bytes;
>
>-		if (total == len)
>-			break;
>+		bytes = len - total;
>+		if (bytes > skb->len)
>+			bytes = skb->len;
>
>-		while (total < len && off < skb->len) {
>-			bytes = len - total;
>-			if (bytes > skb->len - off)
>-				bytes = skb->len - off;
>+		spin_unlock_bh(&vvs->rx_lock);
>
>-			/* sk_lock is held by caller so no one else can dequeue.
>-			 * Unlock rx_lock since memcpy_to_msg() may sleep.
>-			 */
>-			spin_unlock_bh(&vvs->rx_lock);
>+		/* sk_lock is held by caller so no one else can dequeue.
>+		 * Unlock rx_lock since memcpy_to_msg() may sleep.
>+		 */
>+		err = memcpy_to_msg(msg, skb->data, bytes);
>+		if (err)
>+			goto out;
>
>-			err = memcpy_to_msg(msg, skb->data + off, bytes);
>-			if (err)
>-				goto out;
>+		total += bytes;
>
>-			spin_lock_bh(&vvs->rx_lock);
>+		spin_lock_bh(&vvs->rx_lock);
>
>-			total += bytes;
>-			off += bytes;
>-		}
>+		if (total == len)
>+			break;
> 	}
>
> 	spin_unlock_bh(&vvs->rx_lock);
>-- 
>2.25.1
>
  
Bobby Eshleman June 27, 2023, 1:32 a.m. UTC | #2
On Sun, Jun 18, 2023 at 09:24:48AM +0300, Arseniy Krasnov wrote:
> This reworks current implementation of MSG_PEEK logic:
> 1) Replaces 'skb_queue_walk_safe()' with 'skb_queue_walk()'. There is
>    no need in the first one, as there are no removes of skb in loop.
> 2) Removes nested while loop - MSG_PEEK logic could be implemented
>    without it: just iterate over skbs without removing it and copy
>    data from each until destination buffer is not full.
> 
> Signed-off-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru>
> ---
>  net/vmw_vsock/virtio_transport_common.c | 41 ++++++++++++-------------
>  1 file changed, 19 insertions(+), 22 deletions(-)
> 
> diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
> index b769fc258931..2ee40574c339 100644
> --- a/net/vmw_vsock/virtio_transport_common.c
> +++ b/net/vmw_vsock/virtio_transport_common.c
> @@ -348,37 +348,34 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
>  				size_t len)
>  {
>  	struct virtio_vsock_sock *vvs = vsk->trans;
> -	size_t bytes, total = 0, off;
> -	struct sk_buff *skb, *tmp;
> -	int err = -EFAULT;
> +	struct sk_buff *skb;
> +	size_t total = 0;
> +	int err;
>  
>  	spin_lock_bh(&vvs->rx_lock);
>  
> -	skb_queue_walk_safe(&vvs->rx_queue, skb,  tmp) {
> -		off = 0;
> +	skb_queue_walk(&vvs->rx_queue, skb) {
> +		size_t bytes;
>  
> -		if (total == len)
> -			break;
> +		bytes = len - total;
> +		if (bytes > skb->len)
> +			bytes = skb->len;
>  
> -		while (total < len && off < skb->len) {
> -			bytes = len - total;
> -			if (bytes > skb->len - off)
> -				bytes = skb->len - off;
> +		spin_unlock_bh(&vvs->rx_lock);
>  
> -			/* sk_lock is held by caller so no one else can dequeue.
> -			 * Unlock rx_lock since memcpy_to_msg() may sleep.
> -			 */
> -			spin_unlock_bh(&vvs->rx_lock);
> +		/* sk_lock is held by caller so no one else can dequeue.
> +		 * Unlock rx_lock since memcpy_to_msg() may sleep.
> +		 */
> +		err = memcpy_to_msg(msg, skb->data, bytes);
> +		if (err)
> +			goto out;
>  
> -			err = memcpy_to_msg(msg, skb->data + off, bytes);
> -			if (err)
> -				goto out;
> +		total += bytes;
>  
> -			spin_lock_bh(&vvs->rx_lock);
> +		spin_lock_bh(&vvs->rx_lock);
>  
> -			total += bytes;
> -			off += bytes;
> -		}
> +		if (total == len)
> +			break;
>  	}
>  
>  	spin_unlock_bh(&vvs->rx_lock);
> -- 
> 2.25.1
> 

That cleans up nicely!

LGTM.

Reviewed-by: Bobby Eshleman <bobby.eshleman@bytedance.com>
  

Patch

diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index b769fc258931..2ee40574c339 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -348,37 +348,34 @@  virtio_transport_stream_do_peek(struct vsock_sock *vsk,
 				size_t len)
 {
 	struct virtio_vsock_sock *vvs = vsk->trans;
-	size_t bytes, total = 0, off;
-	struct sk_buff *skb, *tmp;
-	int err = -EFAULT;
+	struct sk_buff *skb;
+	size_t total = 0;
+	int err;
 
 	spin_lock_bh(&vvs->rx_lock);
 
-	skb_queue_walk_safe(&vvs->rx_queue, skb,  tmp) {
-		off = 0;
+	skb_queue_walk(&vvs->rx_queue, skb) {
+		size_t bytes;
 
-		if (total == len)
-			break;
+		bytes = len - total;
+		if (bytes > skb->len)
+			bytes = skb->len;
 
-		while (total < len && off < skb->len) {
-			bytes = len - total;
-			if (bytes > skb->len - off)
-				bytes = skb->len - off;
+		spin_unlock_bh(&vvs->rx_lock);
 
-			/* sk_lock is held by caller so no one else can dequeue.
-			 * Unlock rx_lock since memcpy_to_msg() may sleep.
-			 */
-			spin_unlock_bh(&vvs->rx_lock);
+		/* sk_lock is held by caller so no one else can dequeue.
+		 * Unlock rx_lock since memcpy_to_msg() may sleep.
+		 */
+		err = memcpy_to_msg(msg, skb->data, bytes);
+		if (err)
+			goto out;
 
-			err = memcpy_to_msg(msg, skb->data + off, bytes);
-			if (err)
-				goto out;
+		total += bytes;
 
-			spin_lock_bh(&vvs->rx_lock);
+		spin_lock_bh(&vvs->rx_lock);
 
-			total += bytes;
-			off += bytes;
-		}
+		if (total == len)
+			break;
 	}
 
 	spin_unlock_bh(&vvs->rx_lock);