]> git.dujemihanovic.xyz Git - linux.git/commitdiff
RDMA/mlx5: Ensure created mkeys always have a populated rb_key
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 28 May 2024 12:52:54 +0000 (15:52 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 21 Jun 2024 13:19:36 +0000 (10:19 -0300)
cachable and mmkey.rb_key together are used by mlx5_revoke_mr() to put the
MR/mkey back into the cache. In all cases they should be set correctly.

alloc_cacheable_mr() was setting cachable but not filling rb_key,
resulting in cache_ent_find_and_store() bucketing them all into a 0 length
entry.

implicit_get_child_mr()/mlx5_ib_alloc_implicit_mr() failed to set cachable
or rb_key at all, so the cache was not working at all for implicit ODP.

Cc: stable@vger.kernel.org
Fixes: 8c1185fef68c ("RDMA/mlx5: Change check for cacheable mkeys")
Fixes: dd1b913fb0d0 ("RDMA/mlx5: Cache all user cacheable mkeys on dereg MR flow")
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/7778c02dfa0999a30d6746c79a23dd7140a9c729.1716900410.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mlx5/mr.c

index 35dcb9d9e12af2502421ccf3de664c49eddf82c6..d3c1f63791a2b6354ded7589733df36591fc69cb 100644 (file)
@@ -718,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
        }
        mr->mmkey.cache_ent = ent;
        mr->mmkey.type = MLX5_MKEY_MR;
+       mr->mmkey.rb_key = ent->rb_key;
+       mr->mmkey.cacheable = true;
        init_waitqueue_head(&mr->mmkey.wait);
        return mr;
 }
@@ -1168,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
        mr->ibmr.pd = pd;
        mr->umem = umem;
        mr->page_shift = order_base_2(page_size);
-       mr->mmkey.cacheable = true;
        set_mr_fields(dev, mr, umem->length, access_flags, iova);
 
        return mr;