[5/5] 9p: refactor 'post_recv()'

Message ID 20221121080049.3850133-6-yebin@huaweicloud.com
State New
Headers
Series Fix error handle in 'rdma_request()' |

Commit Message

Ye Bin Nov. 21, 2022, 8 a.m. UTC
  From: Ye Bin <yebin10@huawei.com>

Refactor 'post_recv()', move receive resource request from 'rdma_request()' to
'post_recv()'.

Signed-off-by: Ye Bin <yebin10@huawei.com>
---
 net/9p/trans_rdma.c | 77 +++++++++++++++++++++++----------------------
 1 file changed, 39 insertions(+), 38 deletions(-)
  

Patch

diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index bb917389adc9..78452c289f35 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -380,19 +380,40 @@  static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
 	kfree(rdma);
 }
 
-static int
-post_recv(struct p9_client *client, struct p9_rdma_context *c)
+static int post_recv(struct p9_client *client, struct p9_req_t *req)
 {
 	struct p9_trans_rdma *rdma = client->trans;
+	struct p9_rdma_context *c = NULL;
 	struct ib_recv_wr wr;
 	struct ib_sge sge;
-	int err = -EIO;
+	int err;
+
+	c = kmalloc(sizeof *c, GFP_NOFS);
+	if (!c) {
+		err = -ENOMEM;
+		goto error;
+	}
+	c->rc.sdata = req->rc.sdata;
+
+	/*
+	 * Post a receive buffer for this request. We need to ensure
+	 * there is a reply buffer available for every outstanding
+	 * request. A flushed request can result in no reply for an
+	 * outstanding request, so we must keep a count to avoid
+	 * overflowing the RQ.
+	 */
+	if (down_interruptible(&rdma->rq_sem)) {
+		err = -EINTR;
+		goto error;
+	}
 
 	c->busa = ib_dma_map_single(rdma->cm_id->device,
 				    c->rc.sdata, client->msize,
 				    DMA_FROM_DEVICE);
-	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
-		goto error;
+	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
+		err = -EIO;
+		goto sem_error;
+	}
 
 	c->cqe.done = recv_done;
 
@@ -405,15 +426,18 @@  post_recv(struct p9_client *client, struct p9_rdma_context *c)
 	wr.sg_list = &sge;
 	wr.num_sge = 1;
 	err = ib_post_recv(rdma->qp, &wr, NULL);
-	if (err) {
-		ib_dma_unmap_single(rdma->cm_id->device, c->busa,
-				    client->msize, DMA_FROM_DEVICE);
-		goto error;
-	}
+	if (err)
+		goto mapping_error;
+
 	return 0;
- error:
+
+mapping_error:
+	ib_dma_unmap_single(rdma->cm_id->device, c->busa,
+			    client->msize, DMA_FROM_DEVICE);
+sem_error:
 	up(&rdma->rq_sem);
-	p9_debug(P9_DEBUG_ERROR, "EIO\n");
+error:
+	kfree(c);
 	return err;
 }
 
@@ -481,9 +505,8 @@  static int post_send(struct p9_client *client, struct p9_req_t *req)
 static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 {
 	struct p9_trans_rdma *rdma = client->trans;
-	int err = 0;
 	unsigned long flags;
-	struct p9_rdma_context *rpl_context = NULL;
+	int err;
 
 	/* When an error occurs between posting the recv and the send,
 	 * there will be a receive context posted without a pending request.
@@ -505,27 +528,7 @@  static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 		}
 	}
 
-	/* Allocate an fcall for the reply */
-	rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
-	if (!rpl_context) {
-		err = -ENOMEM;
-		goto recv_error;
-	}
-	rpl_context->rc.sdata = req->rc.sdata;
-
-	/*
-	 * Post a receive buffer for this request. We need to ensure
-	 * there is a reply buffer available for every outstanding
-	 * request. A flushed request can result in no reply for an
-	 * outstanding request, so we must keep a count to avoid
-	 * overflowing the RQ.
-	 */
-	if (down_interruptible(&rdma->rq_sem)) {
-		err = -EINTR;
-		goto recv_error;
-	}
-
-	err = post_recv(client, rpl_context);
+	err = post_recv(client, req);
 	if (err) {
 		p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
 		goto recv_error;
@@ -547,9 +550,7 @@  static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 	}
 	return err;
 
- /* Handle errors that happened during or while preparing post_recv(): */
- recv_error:
-	kfree(rpl_context);
+recv_error:
 	spin_lock_irqsave(&rdma->req_lock, flags);
 	if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
 		rdma->state = P9_RDMA_CLOSING;