net: mana: Assign interrupts to CPUs based on NUMA nodes

Message ID 1667282761-11547-1-git-send-email-ssengar@linux.microsoft.com
State New
Headers
Series net: mana: Assign interrupts to CPUs based on NUMA nodes |

Commit Message

Saurabh Singh Sengar Nov. 1, 2022, 6:06 a.m. UTC
  In large VMs with multiple NUMA nodes, network performance is usually
best if network interrupts are all assigned to the same virtual NUMA
node. This patch assigns online CPU according to a numa aware policy,
local cpus are returned first, followed by non-local ones, then it wraps
around.

Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com>
---
 drivers/net/ethernet/microsoft/mana/gdma.h    |  1 +
 .../net/ethernet/microsoft/mana/gdma_main.c   | 30 +++++++++++++++++--
 2 files changed, 28 insertions(+), 3 deletions(-)
  

Comments

Haiyang Zhang Nov. 2, 2022, 1:55 p.m. UTC | #1
> -----Original Message-----
> From: Saurabh Sengar <ssengar@linux.microsoft.com>
> Sent: Tuesday, November 1, 2022 2:06 AM
> To: Saurabh Singh Sengar <ssengar@microsoft.com>; KY Srinivasan
> <kys@microsoft.com>; Haiyang Zhang <haiyangz@microsoft.com>;
> wei.liu@kernel.org; Dexuan Cui <decui@microsoft.com>;
> davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; ssengar@linux.microsoft.com;
> colin.i.king@googlemail.com; vkuznets@redhat.com; linux-
> hyperv@vger.kernel.org; netdev@vger.kernel.org; linux-
> kernel@vger.kernel.org; Michael Kelley (LINUX) <mikelley@microsoft.com>
> Subject: [PATCH] net: mana: Assign interrupts to CPUs based on NUMA nodes
> 
> In large VMs with multiple NUMA nodes, network performance is usually
> best if network interrupts are all assigned to the same virtual NUMA
> node. This patch assigns online CPU according to a numa aware policy,
> local cpus are returned first, followed by non-local ones, then it wraps
> around.
> 
> Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com>

Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>

Thank you.
  
patchwork-bot+netdevbpf@kernel.org Nov. 3, 2022, 11:30 a.m. UTC | #2
Hello:

This patch was applied to netdev/net-next.git (master)
by Paolo Abeni <pabeni@redhat.com>:

On Mon, 31 Oct 2022 23:06:01 -0700 you wrote:
> In large VMs with multiple NUMA nodes, network performance is usually
> best if network interrupts are all assigned to the same virtual NUMA
> node. This patch assigns online CPU according to a numa aware policy,
> local cpus are returned first, followed by non-local ones, then it wraps
> around.
> 
> Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com>
> 
> [...]

Here is the summary with links:
  - net: mana: Assign interrupts to CPUs based on NUMA nodes
    https://git.kernel.org/netdev/net-next/c/71fa6887eeca

You are awesome, thank you!
  

Patch

diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
index 4a6efe6ada08..db340f36ef29 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma.h
+++ b/drivers/net/ethernet/microsoft/mana/gdma.h
@@ -353,6 +353,7 @@  struct gdma_context {
 	void __iomem		*shm_base;
 	void __iomem		*db_page_base;
 	u32 db_page_size;
+	int                     numa_node;
 
 	/* Shared memory chanenl (used to bootstrap HWC) */
 	struct shm_channel	shm_channel;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index a6f99b4344d9..726ac94d96ae 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1208,8 +1208,10 @@  static int mana_gd_setup_irqs(struct pci_dev *pdev)
 	struct gdma_context *gc = pci_get_drvdata(pdev);
 	struct gdma_irq_context *gic;
 	unsigned int max_irqs;
+	u16 *cpus;
+	cpumask_var_t req_mask;
 	int nvec, irq;
-	int err, i, j;
+	int err, i = 0, j;
 
 	if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
 		max_queues_per_port = MANA_MAX_NUM_QUEUES;
@@ -1228,7 +1230,21 @@  static int mana_gd_setup_irqs(struct pci_dev *pdev)
 		goto free_irq_vector;
 	}
 
+	if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
+		err = -ENOMEM;
+		goto free_irq;
+	}
+
+	cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
+	if (!cpus) {
+		err = -ENOMEM;
+		goto free_mask;
+	}
+	for (i = 0; i < nvec; i++)
+		cpus[i] = cpumask_local_spread(i, gc->numa_node);
+
 	for (i = 0; i < nvec; i++) {
+		cpumask_set_cpu(cpus[i], req_mask);
 		gic = &gc->irq_contexts[i];
 		gic->handler = NULL;
 		gic->arg = NULL;
@@ -1236,13 +1252,17 @@  static int mana_gd_setup_irqs(struct pci_dev *pdev)
 		irq = pci_irq_vector(pdev, i);
 		if (irq < 0) {
 			err = irq;
-			goto free_irq;
+			goto free_mask;
 		}
 
 		err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
 		if (err)
-			goto free_irq;
+			goto free_mask;
+		irq_set_affinity_and_hint(irq, req_mask);
+		cpumask_clear(req_mask);
 	}
+	free_cpumask_var(req_mask);
+	kfree(cpus);
 
 	err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
 	if (err)
@@ -1253,6 +1273,9 @@  static int mana_gd_setup_irqs(struct pci_dev *pdev)
 
 	return 0;
 
+free_mask:
+	free_cpumask_var(req_mask);
+	kfree(cpus);
 free_irq:
 	for (j = i - 1; j >= 0; j--) {
 		irq = pci_irq_vector(pdev, j);
@@ -1382,6 +1405,7 @@  static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (!bar0_va)
 		goto free_gc;
 
+	gc->numa_node = dev_to_node(&pdev->dev);
 	gc->is_pf = mana_is_pf(pdev->device);
 	gc->bar0_va = bar0_va;
 	gc->dev = &pdev->dev;