Hi Junxian,
kernel test robot noticed the following build warnings:
[auto build test WARNING on rdma/for-next]
[also build test WARNING on linus/master v6.6-rc2 next-20230920]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Junxian-Huang/RDMA-hns-Support-SRQ-record-doorbell/20230920-113419
base: https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git for-next
patch link: https://lore.kernel.org/r/20230920033005.1557-1-huangjunxian6%40hisilicon.com
patch subject: [PATCH for-next] RDMA/hns: Support SRQ record doorbell
config: sparc-allyesconfig (https://download.01.org/0day-ci/archive/20230920/202309201334.ecTzlCD0-lkp@intel.com/config)
compiler: sparc64-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230920/202309201334.ecTzlCD0-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309201334.ecTzlCD0-lkp@intel.com/
All warnings (new ones prefixed by >>):
drivers/infiniband/hw/hns/hns_roce_hw_v2.c: In function 'hns_roce_v2_post_srq_recv':
>> drivers/infiniband/hw/hns/hns_roce_hw_v2.c:960:30: warning: unused variable 'hr_dev' [-Wunused-variable]
960 | struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
| ^~~~~~
vim +/hr_dev +960 drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2b035e7312b508 Yixing Liu 2021-06-21 955
ffb1308b88b602 Yixian Liu 2020-04-28 956 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
ffb1308b88b602 Yixian Liu 2020-04-28 957 const struct ib_recv_wr *wr,
ffb1308b88b602 Yixian Liu 2020-04-28 958 const struct ib_recv_wr **bad_wr)
ffb1308b88b602 Yixian Liu 2020-04-28 959 {
ffb1308b88b602 Yixian Liu 2020-04-28 @960 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
ffb1308b88b602 Yixian Liu 2020-04-28 961 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
ffb1308b88b602 Yixian Liu 2020-04-28 962 unsigned long flags;
ffb1308b88b602 Yixian Liu 2020-04-28 963 int ret = 0;
2e07a3d945851f Wenpeng Liang 2021-01-30 964 u32 max_sge;
2e07a3d945851f Wenpeng Liang 2021-01-30 965 u32 wqe_idx;
ffb1308b88b602 Yixian Liu 2020-04-28 966 void *wqe;
2e07a3d945851f Wenpeng Liang 2021-01-30 967 u32 nreq;
ffb1308b88b602 Yixian Liu 2020-04-28 968
ffb1308b88b602 Yixian Liu 2020-04-28 969 spin_lock_irqsave(&srq->lock, flags);
ffb1308b88b602 Yixian Liu 2020-04-28 970
9dd052474a2645 Lang Cheng 2021-01-30 971 max_sge = srq->max_gs - srq->rsv_sge;
ffb1308b88b602 Yixian Liu 2020-04-28 972 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2e07a3d945851f Wenpeng Liang 2021-01-30 973 ret = check_post_srq_valid(srq, max_sge, wr);
2e07a3d945851f Wenpeng Liang 2021-01-30 974 if (ret) {
ffb1308b88b602 Yixian Liu 2020-04-28 975 *bad_wr = wr;
ffb1308b88b602 Yixian Liu 2020-04-28 976 break;
ffb1308b88b602 Yixian Liu 2020-04-28 977 }
ffb1308b88b602 Yixian Liu 2020-04-28 978
6b981e2bd9251f Xi Wang 2021-01-30 979 ret = get_srq_wqe_idx(srq, &wqe_idx);
6b981e2bd9251f Xi Wang 2021-01-30 980 if (unlikely(ret)) {
ffb1308b88b602 Yixian Liu 2020-04-28 981 *bad_wr = wr;
ffb1308b88b602 Yixian Liu 2020-04-28 982 break;
ffb1308b88b602 Yixian Liu 2020-04-28 983 }
ffb1308b88b602 Yixian Liu 2020-04-28 984
6b981e2bd9251f Xi Wang 2021-01-30 985 wqe = get_srq_wqe_buf(srq, wqe_idx);
6b981e2bd9251f Xi Wang 2021-01-30 986 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
2e07a3d945851f Wenpeng Liang 2021-01-30 987 fill_wqe_idx(srq, wqe_idx);
ffb1308b88b602 Yixian Liu 2020-04-28 988 srq->wrid[wqe_idx] = wr->wr_id;
ffb1308b88b602 Yixian Liu 2020-04-28 989 }
ffb1308b88b602 Yixian Liu 2020-04-28 990
ffb1308b88b602 Yixian Liu 2020-04-28 991 if (likely(nreq)) {
14d4b5285cbe41 Yangyang Li 2023-09-20 992 if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)
14d4b5285cbe41 Yangyang Li 2023-09-20 993 *srq->rdb.db_record = srq->idx_que.head &
14d4b5285cbe41 Yangyang Li 2023-09-20 994 V2_DB_PRODUCER_IDX_M;
14d4b5285cbe41 Yangyang Li 2023-09-20 995 else
14d4b5285cbe41 Yangyang Li 2023-09-20 996 update_srq_db(srq);
ffb1308b88b602 Yixian Liu 2020-04-28 997 }
ffb1308b88b602 Yixian Liu 2020-04-28 998
ffb1308b88b602 Yixian Liu 2020-04-28 999 spin_unlock_irqrestore(&srq->lock, flags);
ffb1308b88b602 Yixian Liu 2020-04-28 1000
ffb1308b88b602 Yixian Liu 2020-04-28 1001 return ret;
ffb1308b88b602 Yixian Liu 2020-04-28 1002 }
ffb1308b88b602 Yixian Liu 2020-04-28 1003
@@ -146,6 +146,7 @@ enum {
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
+ HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22),
};
#define HNS_ROCE_DB_TYPE_COUNT 2
@@ -453,6 +454,8 @@ struct hns_roce_srq {
spinlock_t lock;
struct mutex mutex;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
+ struct hns_roce_db rdb;
+ u32 cap_flags;
};
struct hns_roce_uar_table {
@@ -941,11 +941,16 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
idx_que->head++;
}
-static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
+static void update_srq_db(struct hns_roce_srq *srq)
{
- hr_reg_write(db, DB_TAG, srq->srqn);
- hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
- hr_reg_write(db, DB_PI, srq->idx_que.head);
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct hns_roce_v2_db db;
+
+ hr_reg_write(&db, DB_TAG, srq->srqn);
+ hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
+ hr_reg_write(&db, DB_PI, srq->idx_que.head);
+
+ hns_roce_write64(hr_dev, (__le32 *)&db, srq->db_reg);
}
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
@@ -954,7 +959,6 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_v2_db srq_db;
unsigned long flags;
int ret = 0;
u32 max_sge;
@@ -985,9 +989,11 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
if (likely(nreq)) {
- update_srq_db(&srq_db, srq);
-
- hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
+ if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)
+ *srq->rdb.db_record = srq->idx_que.head &
+ V2_DB_PRODUCER_IDX_M;
+ else
+ update_srq_db(srq);
}
spin_unlock_irqrestore(&srq->lock, flags);
@@ -5606,6 +5612,14 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
+ if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) {
+ hr_reg_enable(ctx, SRQC_DB_RECORD_EN);
+ hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_L,
+ lower_32_bits(srq->rdb.dma) >> 1);
+ hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_H,
+ upper_32_bits(srq->rdb.dma));
+ }
+
return hns_roce_v2_write_srqc_index_queue(srq, ctx);
}
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
@@ -387,6 +388,79 @@ static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
free_srq_idx(hr_dev, srq);
}
+static int get_srq_ucmd(struct hns_roce_srq *srq, struct ib_udata *udata,
+ struct hns_roce_ib_create_srq *ucmd)
+{
+ struct ib_device *ibdev = srq->ibsrq.device;
+ int ret;
+
+ ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
+ if (ret) {
+ ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx;
+
+ if (!(srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB))
+ return;
+
+ srq->cap_flags &= ~HNS_ROCE_SRQ_CAP_RECORD_DB;
+ if (udata) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+ hns_roce_db_unmap_user(uctx, &srq->rdb);
+ } else {
+ hns_roce_free_db(hr_dev, &srq->rdb);
+ }
+}
+
+static int alloc_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_srq_resp *resp)
+{
+ struct hns_roce_ib_create_srq ucmd = {};
+ struct hns_roce_ucontext *uctx;
+ int ret;
+
+ if (udata) {
+ ret = get_srq_ucmd(srq, udata, &ucmd);
+ if (ret)
+ return ret;
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) &&
+ (ucmd.req_cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ ret = hns_roce_db_map_user(uctx, ucmd.db_addr,
+ &srq->rdb);
+ if (ret)
+ return ret;
+
+ srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
+ }
+ } else {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) {
+ ret = hns_roce_alloc_db(hr_dev, &srq->rdb, 1);
+ if (ret)
+ return ret;
+
+ *srq->rdb.db_record = 0;
+ srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
+ }
+ srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
+ }
+
+ return 0;
+}
+
int hns_roce_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
@@ -407,15 +481,20 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
if (ret)
return ret;
- ret = alloc_srqn(hr_dev, srq);
+ ret = alloc_srq_db(hr_dev, srq, udata, &resp);
if (ret)
goto err_srq_buf;
+ ret = alloc_srqn(hr_dev, srq);
+ if (ret)
+ goto err_srq_db;
+
ret = alloc_srqc(hr_dev, srq);
if (ret)
goto err_srqn;
if (udata) {
+ resp.cap_flags = srq->cap_flags;
resp.srqn = srq->srqn;
if (ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)))) {
@@ -424,7 +503,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
- srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
srq->event = hns_roce_ib_srq_event;
refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -435,6 +513,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
free_srqc(hr_dev, srq);
err_srqn:
free_srqn(hr_dev, srq);
+err_srq_db:
+ free_srq_db(hr_dev, srq, udata);
err_srq_buf:
free_srq_buf(hr_dev, srq);
@@ -448,6 +528,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
free_srqc(hr_dev, srq);
free_srqn(hr_dev, srq);
+ free_srq_db(hr_dev, srq, udata);
free_srq_buf(hr_dev, srq);
return 0;
}
@@ -52,15 +52,26 @@ struct hns_roce_ib_create_cq_resp {
__aligned_u64 cap_flags;
};
+enum hns_roce_srq_cap_flags {
+ HNS_ROCE_SRQ_CAP_RECORD_DB = 1 << 0,
+};
+
+enum hns_roce_srq_cap_flags_resp {
+ HNS_ROCE_RSP_SRQ_CAP_RECORD_DB = 1 << 0,
+};
+
+
struct hns_roce_ib_create_srq {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__aligned_u64 que_addr;
+ __u32 req_cap_flags; /* Use enum hns_roce_srq_cap_flags */
+ __u32 reserved;
};
struct hns_roce_ib_create_srq_resp {
__u32 srqn;
- __u32 reserved;
+ __u32 cap_flags; /* Use enum hns_roce_srq_cap_flags */
};
struct hns_roce_ib_create_qp {