]> git.dujemihanovic.xyz Git - linux.git/commitdiff
net/mlx5: Shift control IRQ to the last index
authorShay Drory <shayd@nvidia.com>
Thu, 19 Aug 2021 13:18:57 +0000 (16:18 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 5 Oct 2021 01:10:57 +0000 (18:10 -0700)
Control IRQ is the first IRQ vector. This complicates handling of
completion irqs as we need to offset them by one.
in the next patch, there are scenarios where completion and control EQs
will share the same irq. for example: functions with single IRQ. To ease
such scenarios, we shift control IRQ to the end of the irq array.

Signed-off-by: Shay Drory <shayd@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/infiniband/hw/mlx5/odp.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
include/linux/mlx5/driver.h

index d0d98e584ebcc312b6f890e5330f16ccb4d73e7f..81147d774dd2d9de21bc1813bb3ebc433cc7242a 100644 (file)
@@ -1559,6 +1559,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
 
        eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
        param = (struct mlx5_eq_param) {
+               .irq_index = MLX5_IRQ_EQ_CTRL,
                .nent = MLX5_IB_NUM_PF_EQE,
        };
        param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
index 605c8ecc3610f71360e4f0354effb35eb1ad15c3..792e0d6aa86188bc845debf5f21ac4706da61ade 100644 (file)
@@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
        mlx5_eq_notifier_register(dev, &table->cq_err_nb);
 
        param = (struct mlx5_eq_param) {
+               .irq_index = MLX5_IRQ_EQ_CTRL,
                .nent = MLX5_NUM_CMD_EQE,
                .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
        };
@@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
        mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
 
        param = (struct mlx5_eq_param) {
+               .irq_index = MLX5_IRQ_EQ_CTRL,
                .nent = MLX5_NUM_ASYNC_EQE,
        };
 
@@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
                goto err2;
 
        param = (struct mlx5_eq_param) {
+               .irq_index = MLX5_IRQ_EQ_CTRL,
                .nent = /* TODO: sriov max_vf + */ 1,
                .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
        };
@@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
        ncomp_eqs = table->num_comp_eqs;
        nent = MLX5_COMP_EQ_SIZE;
        for (i = 0; i < ncomp_eqs; i++) {
-               int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
                struct mlx5_eq_param param = {};
+               int vecidx = i;
 
                eq = kzalloc(sizeof(*eq), GFP_KERNEL);
                if (!eq) {
@@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev)
                goto err_out;
        }
 
-       vecidx = MLX5_IRQ_VEC_COMP_BASE;
-       for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
-            vecidx++) {
+       for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
                err = irq_cpu_rmap_add(eq_table->rmap,
                                       pci_irq_vector(mdev->pdev, vecidx));
                if (err) {
index abd024173c42e3fae1429627d5682ab41f6b2312..8116815663a77b46c5e4fd9b4fa695e57db5f0b5 100644 (file)
@@ -8,8 +8,6 @@
 
 #define MLX5_COMP_EQS_PER_SF 8
 
-#define MLX5_IRQ_EQ_CTRL (0)
-
 struct mlx5_irq;
 
 int mlx5_irq_table_init(struct mlx5_core_dev *dev);
index 763c83a0238091ef55b7e13d78bc4dc41212be60..a66144b54fc8d684b8c8a33aca230b9e3d5f4126 100644 (file)
@@ -194,15 +194,14 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
        snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
 }
 
-static void irq_set_name(char *name, int vecidx)
+static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
 {
-       if (vecidx == 0) {
+       if (vecidx == pool->xa_num_irqs.max) {
                snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
                return;
        }
 
-       snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
-                vecidx - MLX5_IRQ_VEC_COMP_BASE);
+       snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
 }
 
 static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
@@ -217,7 +216,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
                return ERR_PTR(-ENOMEM);
        irq->irqn = pci_irq_vector(dev->pdev, i);
        if (!pool->name[0])
-               irq_set_name(name, i);
+               irq_set_name(pool, name, i);
        else
                irq_sf_set_name(pool, name, i);
        ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
@@ -440,6 +439,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
        }
 pf_irq:
        pool = irq_table->pf_pool;
+       vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
        irq = irq_pool_request_vector(pool, vecidx, affinity);
 out:
        if (IS_ERR(irq))
index e23417424373fc647d89e3df7d8ea2b51684ca43..0ca719c00824ae1c6765028cf2e38a67e7781b57 100644 (file)
@@ -59,6 +59,8 @@
 
 #define MLX5_ADEV_NAME "mlx5_core"
 
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
 enum {
        MLX5_BOARD_ID_LEN = 64,
 };