]> git.dujemihanovic.xyz Git - linux.git/commitdiff
net/mlx5e: SHAMPO, Release in progress headers
authorDragos Tatulea <dtatulea@nvidia.com>
Thu, 15 Aug 2024 07:16:09 +0000 (10:16 +0300)
committerJakub Kicinski <kuba@kernel.org>
Fri, 16 Aug 2024 23:15:57 +0000 (16:15 -0700)
The change in the fixes tag cleaned up too much: it removed the part
that was releasing header pages that were posted via UMR but haven't
been acknowledged yet on the ICOSQ.

This patch corrects this omission by setting the bits between pi and ci
to on when shutting down a queue with SHAMPO. To be consistent with the
Striding RQ code, this action is done in mlx5e_free_rx_missing_descs().

Fixes: e839ac9a89cb ("net/mlx5e: SHAMPO, Simplify header page release in teardown")
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20240815071611.2211873-3-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index bb5da42edc23a07fff50988c5e01e90bd51c30a3..d9e241423bc5673299d13175d27197cb1117700c 100644 (file)
@@ -998,6 +998,7 @@ void mlx5e_build_ptys2ethtool_map(void);
 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
                                            enum mlx5e_mpwrq_umr_mode umr_mode);
 
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len);
 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
index 5df904639b0ce6295010c8c94bb82be6a31a339c..583fa24a7ae980aa69085bea1b83e30f95762d5c 100644 (file)
@@ -1236,6 +1236,14 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
        rq->mpwqe.actual_wq_head = wq->head;
        rq->mpwqe.umr_in_progress = 0;
        rq->mpwqe.umr_completed = 0;
+
+       if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+               struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+               u16 len;
+
+               len = (shampo->pi - shampo->ci) & shampo->hd_per_wq;
+               mlx5e_shampo_fill_umr(rq, len);
+       }
 }
 
 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
index 23aa555ca0ae8dfbe32d7901ef214b9128c2bf17..de9d01036c2807afea326a90955aa3f59d039cbb 100644 (file)
@@ -963,26 +963,31 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
        sq->cc = sqcc;
 }
 
-static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
-                                      struct mlx5e_icosq *sq)
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
 {
-       struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
-       struct mlx5e_shampo_hd *shampo;
-       /* assume 1:1 relationship between RQ and icosq */
-       struct mlx5e_rq *rq = &c->rq;
-       int end, from, len = umr.len;
+       struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+       int end, from, full_len = len;
 
-       shampo = rq->mpwqe.shampo;
        end = shampo->hd_per_wq;
        from = shampo->ci;
-       if (from + len > shampo->hd_per_wq) {
+       if (from + len > end) {
                len -= end - from;
                bitmap_set(shampo->bitmap, from, end - from);
                from = 0;
        }
 
        bitmap_set(shampo->bitmap, from, len);
-       shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
+       shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
+}
+
+static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
+                                      struct mlx5e_icosq *sq)
+{
+       struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
+       /* assume 1:1 relationship between RQ and icosq */
+       struct mlx5e_rq *rq = &c->rq;
+
+       mlx5e_shampo_fill_umr(rq, umr.len);
 }
 
 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)