@@ -129,7 +129,6 @@ struct bnxt_re_dev {
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
int id;
@@ -262,7 +262,7 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+ struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
struct bnxt_qplib_nq *nq;
int indx, rc;
@@ -281,7 +281,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
* in device sctructure.
*/
for (indx = 0; indx < rdev->num_msix; indx++)
- rdev->msix_entries[indx].vector = ent[indx].vector;
+ rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
false);
@@ -315,32 +315,6 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
return rc;
}
-static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
-{
- int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
- struct bnxt_en_dev *en_dev;
-
- en_dev = rdev->en_dev;
-
- num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
-
- num_msix_got = bnxt_req_msix_vecs(en_dev,
- rdev->msix_entries,
- num_msix_want);
- if (num_msix_got < BNXT_RE_MIN_MSIX) {
- rc = -EINVAL;
- goto done;
- }
- if (num_msix_got != num_msix_want) {
- ibdev_warn(&rdev->ibdev,
- "Requested %d MSI-X vectors, got %d\n",
- num_msix_want, num_msix_got);
- }
- rdev->num_msix = num_msix_got;
-done:
- return rc;
-}
-
static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
u16 opcd, u16 crid, u16 trid)
{
@@ -785,7 +759,7 @@ static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) :
- rdev->msix_entries[indx].db_offset;
+ rdev->en_dev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -810,7 +784,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
for (i = 1; i < rdev->num_msix ; i++) {
db_offt = bnxt_re_get_nqdb_offset(rdev, i);
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
- i - 1, rdev->msix_entries[i].vector,
+ i - 1, rdev->en_dev->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
if (rc) {
@@ -897,7 +871,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
- rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
+ rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev,
@@ -1217,7 +1191,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
- bnxt_free_msix_vecs(rdev->en_dev);
+ rdev->num_msix = 0;
bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
@@ -1262,13 +1236,15 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
- rc = bnxt_re_request_msix(rdev);
- if (rc) {
+ if (!rdev->en_dev->ulp_tbl->msix_requested) {
ibdev_err(&rdev->ibdev,
"Failed to get MSI-X vectors: %#x\n", rc);
rc = -EINVAL;
goto fail;
}
+ ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+ rdev->en_dev->ulp_tbl->msix_requested);
+ rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
bnxt_re_query_hwrm_intf_version(rdev);
@@ -1292,14 +1268,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
- rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
- vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, rdev->is_virtfn,
&bnxt_re_aeq_handler);
@@ -42,17 +42,18 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
bp->cp_nr_rings == max_stat_ctxs)
return -ENOMEM;
- ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ ulp = edev->ulp_tbl;
if (!ulp)
return -ENOMEM;
- edev->ulp_tbl = ulp;
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, 0);
+ bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
+ edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return 0;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -66,7 +67,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
ulp = edev->ulp_tbl;
if (ulp->msix_requested)
- bnxt_free_msix_vecs(edev);
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
@@ -79,17 +80,19 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
msleep(100);
i++;
}
- kfree(ulp);
- edev->ulp_tbl = NULL;
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
-static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
+void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
+ if (!edev->ulp_tbl->msix_requested) {
+ netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
+ return;
+ }
num_msix = edev->ulp_tbl->msix_requested;
idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
@@ -105,100 +108,13 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
-int bnxt_req_msix_vecs(struct bnxt_en_dev *edev,
- struct bnxt_msix_entry *ent,
- int num_msix)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_hw_resc *hw_resc;
- int max_idx, max_cp_rings;
- int avail_msix, idx;
- int total_vecs;
- int rc = 0;
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- return -ENODEV;
-
- if (edev->ulp_tbl->msix_requested)
- return -EAGAIN;
-
- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- avail_msix = bnxt_get_avail_msix(bp, num_msix);
- if (!avail_msix)
- return -ENOMEM;
- if (avail_msix > num_msix)
- avail_msix = num_msix;
-
- if (BNXT_NEW_RM(bp)) {
- idx = bp->cp_nr_rings;
- } else {
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- idx = max_idx - avail_msix;
- }
-
- edev->ulp_tbl->msix_base = idx;
- edev->ulp_tbl->msix_requested = avail_msix;
- hw_resc = &bp->hw_resc;
- total_vecs = idx + avail_msix;
- if (bp->total_irqs < total_vecs ||
- (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
- if (netif_running(dev)) {
- rtnl_lock();
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- rtnl_unlock();
- } else {
- rc = bnxt_reserve_rings(bp, true);
- }
- }
- if (rc) {
- edev->ulp_tbl->msix_requested = 0;
- return -EAGAIN;
- }
-
- if (BNXT_NEW_RM(bp)) {
- int resv_msix;
-
- resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
- avail_msix = min_t(int, resv_msix, avail_msix);
- edev->ulp_tbl->msix_requested = avail_msix;
- }
- bnxt_fill_msix_vecs(bp, ent);
- edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return avail_msix;
-}
-EXPORT_SYMBOL(bnxt_req_msix_vecs);
-
-void bnxt_free_msix_vecs(struct bnxt_en_dev *edev)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
-
- if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
- return;
-
- edev->ulp_tbl->msix_requested = 0;
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
- rtnl_lock();
- bnxt_close_nic(bp, true, false);
- bnxt_open_nic(bp, true, false);
- rtnl_unlock();
- }
-
- return;
-}
-EXPORT_SYMBOL(bnxt_free_msix_vecs);
-
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
+ u32 roce_msix = BNXT_VF(bp) ?
+ BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
- return edev->ulp_tbl->msix_requested;
- }
- return 0;
+ return ((bp->flags & BNXT_FLAG_ROCE_CAP) && bp->aux_dev ?
+ min_t(u32, roce_msix, num_online_cpus()) : 0);
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
@@ -457,6 +373,8 @@ void bnxt_aux_dev_release(struct device *dev)
container_of(dev, struct bnxt_aux_dev, aux_dev.dev);
struct bnxt *bp = netdev_priv(bnxt_adev->edev->net);
+ kfree(bnxt_adev->edev->ulp_tbl);
+ bnxt_adev->edev->ulp_tbl = NULL;
kfree(bnxt_adev->edev);
bnxt_adev->edev = NULL;
bp->edev = NULL;
@@ -480,6 +398,8 @@ static inline void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
}
int bnxt_rdma_aux_device_add(struct bnxt *bp)
@@ -487,6 +407,7 @@ int bnxt_rdma_aux_device_add(struct bnxt *bp)
struct bnxt_aux_dev *bnxt_adev = bp->aux_dev;
struct bnxt_en_dev *edev = bnxt_adev->edev;
struct auxiliary_device *aux_dev;
+ struct bnxt_ulp *ulp;
int ret;
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
@@ -495,6 +416,14 @@ int bnxt_rdma_aux_device_add(struct bnxt *bp)
goto cleanup_edev_failure;
}
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp) {
+ ret = -ENOMEM;
+ goto cleanup_ulp_failure;
+ }
+
+ edev->ulp_tbl = ulp;
+
aux_dev = &bnxt_adev->aux_dev;
aux_dev->id = bnxt_adev->id;
aux_dev->name = "rdma";
@@ -518,6 +447,9 @@ int bnxt_rdma_aux_device_add(struct bnxt *bp)
cleanup_add_failure:
auxiliary_device_uninit(aux_dev);
cleanup_init_failure:
+ kfree(ulp);
+ bp->edev->ulp_tbl = NULL;
+cleanup_ulp_failure:
kfree(edev);
bp->edev = NULL;
cleanup_edev_failure:
@@ -15,6 +15,8 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+#define BNXT_MAX_ROCE_MSIX 9
+#define BNXT_MAX_VF_ROCE_MSIX 2
struct hwrm_async_event_cmpl;
struct bnxt_aux_dev;
@@ -52,6 +54,7 @@ struct bnxt_ulp {
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
+ struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
@@ -104,10 +107,8 @@ void bnxt_aux_dev_free(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
-int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, struct bnxt_msix_entry *ent,
- int num_msix);
-void bnxt_free_msix_vecs(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id);
+void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent);
#endif