On Fri, Apr 07, 2023 at 05:53:43PM +0530, Sai Krishna wrote:
> From: Ratheesh Kannoth <rkannoth@marvell.com>
>
> Skip mbox initialization of disabled PFs. Firmware configures PFs
> and allocate mbox resources etc. Linux should configure particular
> PFs, which ever are enabled by firmware.
>
> Fixes: 9bdc47a6e328 ("octeontx2-af: Mbox communication support btw AF and it's VFs")
> Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com>
> Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
> Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
...
> @@ -2343,8 +2349,27 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
> int err = -EINVAL, i, dir, dir_up;
> void __iomem *reg_base;
> struct rvu_work *mwork;
> + unsigned long *pf_bmap;
> void **mbox_regions;
> const char *name;
> + u64 cfg;
> +
> + pf_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
Hi Sai and Ratheesh,
I am a little confused about the lifecycle of pf_bmap.
It is a local variable of this function.
But it is allocated using devm_kcalloc(), so it will persist for
the life of the device.
Also, I note that rvu_mbox_init() has too call sites.
So is there a situation where a pf_bmap is allocated more than once
for the same rvu->dev instance?
It seems to me it would be more appropriate to allocate bf_bmap using
kcalloc() take clear to free it before leaving rvu_mbox_init(),
as is already done for mbox_regions.
> + if (!pf_bmap)
> + return -ENOMEM;
...
Please see inline,
> -----Original Message-----
> From: Simon Horman <simon.horman@corigine.com>
> Sent: Saturday, April 8, 2023 8:26 PM
> To: Sai Krishna Gajula <saikrishnag@marvell.com>
> Cc: davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; netdev@vger.kernel.org; linux-
> kernel@vger.kernel.org; Sunil Kovvuri Goutham <sgoutham@marvell.com>;
> Geethasowjanya Akula <gakula@marvell.com>; richardcochran@gmail.com;
> Linu Cherian <lcherian@marvell.com>; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>; Hariprasad Kelam <hkelam@marvell.com>;
> Subbaraya Sundeep Bhatta <sbhatta@marvell.com>; Ratheesh Kannoth
> <rkannoth@marvell.com>
> Subject: Re: [net PATCH v2 6/7] octeontx2-af: Skip PFs if not enabled
>
> On Fri, Apr 07, 2023 at 05:53:43PM +0530, Sai Krishna wrote:
> > From: Ratheesh Kannoth <rkannoth@marvell.com>
> >
> > Skip mbox initialization of disabled PFs. Firmware configures PFs and
> > allocate mbox resources etc. Linux should configure particular PFs,
> > which ever are enabled by firmware.
> >
> > Fixes: 9bdc47a6e328 ("octeontx2-af: Mbox communication support btw AF
> > and it's VFs")
> > Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com>
> > Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
> > Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
>
> ...
>
> > @@ -2343,8 +2349,27 @@ static int rvu_mbox_init(struct rvu *rvu, struct
> mbox_wq_info *mw,
> > int err = -EINVAL, i, dir, dir_up;
> > void __iomem *reg_base;
> > struct rvu_work *mwork;
> > + unsigned long *pf_bmap;
> > void **mbox_regions;
> > const char *name;
> > + u64 cfg;
> > +
> > + pf_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(num),
> sizeof(long),
> > +GFP_KERNEL);
>
> Hi Sai and Ratheesh,
>
> I am a little confused about the lifecycle of pf_bmap.
> It is a local variable of this function.
> But it is allocated using devm_kcalloc(), so it will persist for the life of the
> device.
>
> Also, I note that rvu_mbox_init() has too call sites.
> So is there a situation where a pf_bmap is allocated more than once for the
> same rvu->dev instance?
>
> It seems to me it would be more appropriate to allocate bf_bmap using
> kcalloc() take clear to free it before leaving rvu_mbox_init(), as is already
> done for mbox_regions.
>
We will prepare v3 patch according to the suggested changes using kcalloc().
> > + if (!pf_bmap)
> > + return -ENOMEM;
>
> ...
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(otx2_mbox_init);
*/
int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
struct pci_dev *pdev, void *reg_base,
- int direction, int ndevs)
+ int direction, int ndevs, unsigned long *pf_bmap)
{
struct otx2_mbox_dev *mdev;
int devid, err;
@@ -169,6 +169,9 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
mbox->hwbase = hwbase[0];
for (devid = 0; devid < ndevs; devid++) {
+ if (!test_bit(devid, pf_bmap))
+ continue;
+
mdev = &mbox->dev[devid];
mdev->mbase = hwbase[devid];
mdev->hwbase = hwbase[devid];
@@ -96,9 +96,10 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs);
+
int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
- int direction, int ndevs);
+ int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
@@ -2282,7 +2282,7 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
}
static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
- int num, int type)
+ int num, int type, unsigned long *pf_bmap)
{
struct rvu_hwinfo *hw = rvu->hw;
int region;
@@ -2294,6 +2294,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
*/
if (type == TYPE_AFVF) {
for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
if (hw->cap.per_pf_mbox_regs) {
bar4 = rvu_read64(rvu, BLKADDR_RVUM,
RVU_AF_PFX_BAR4_ADDR(0)) +
@@ -2315,6 +2318,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
* RVU_AF_PF_BAR4_ADDR register.
*/
for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
if (hw->cap.per_pf_mbox_regs) {
bar4 = rvu_read64(rvu, BLKADDR_RVUM,
RVU_AF_PFX_BAR4_ADDR(region));
@@ -2343,8 +2349,27 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int err = -EINVAL, i, dir, dir_up;
void __iomem *reg_base;
struct rvu_work *mwork;
+ unsigned long *pf_bmap;
void **mbox_regions;
const char *name;
+ u64 cfg;
+
+ pf_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
+ if (!pf_bmap)
+ return -ENOMEM;
+
+ /* RVU VFs */
+ if (type == TYPE_AFVF)
+ bitmap_set(pf_bmap, 0, num);
+
+ if (type == TYPE_AFPF) {
+ /* Mark enabled PFs in bitmap */
+ for (i = 0; i < num; i++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
+ if (cfg & BIT_ULL(20))
+ set_bit(i, pf_bmap);
+ }
+ }
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
if (!mbox_regions)
@@ -2356,7 +2381,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
dir = MBOX_DIR_AFPF;
dir_up = MBOX_DIR_AFPF_UP;
reg_base = rvu->afreg_base;
- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
if (err)
goto free_regions;
break;
@@ -2365,7 +2390,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
dir = MBOX_DIR_PFVF;
dir_up = MBOX_DIR_PFVF_UP;
reg_base = rvu->pfreg_base;
- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
if (err)
goto free_regions;
break;
@@ -2396,16 +2421,19 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
- reg_base, dir, num);
+ reg_base, dir, num, pf_bmap);
if (err)
goto exit;
err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
- reg_base, dir_up, num);
+ reg_base, dir_up, num, pf_bmap);
if (err)
goto exit;
for (i = 0; i < num; i++) {
+ if (!test_bit(i, pf_bmap))
+ continue;
+
mwork = &mw->mbox_wrk[i];
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_handler);