summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5/mlx5_ib.h
diff options
context:
space:
mode:
authorMichael Guralnik <michaelgur@nvidia.com>2023-01-26 00:28:05 +0200
committerJason Gunthorpe <jgg@nvidia.com>2023-01-27 13:04:09 -0400
commit73d09b2fe8336f5f37935e46418666ddbcd3c343 (patch)
treecc2ef0f1196c2f25fcd4edea7e42b247beb3e8bc /drivers/infiniband/hw/mlx5/mlx5_ib.h
parentb9584517832858a0f78d6851d09b697a829514cd (diff)
RDMA/mlx5: Introduce mlx5r_cache_rb_key
Switch from using the mkey order to using the new struct as the key to the RB tree of cache entries. The key is all the mkey properties that UMR operations can't modify. Using this key to define the cache entries and to search and create cache mkeys. Link: https://lore.kernel.org/r/20230125222807.6921-5-michaelgur@nvidia.com Signed-off-by: Michael Guralnik <michaelgur@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mlx5_ib.h')
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index eec16db2d536..a9ef9d751c5e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -617,6 +617,13 @@ enum mlx5_mkey_type {
MLX5_MKEY_INDIRECT_DEVX,
};
+struct mlx5r_cache_rb_key {
+ u8 ats:1;
+ unsigned int access_mode;
+ unsigned int access_flags;
+ unsigned int ndescs;
+};
+
struct mlx5_ib_mkey {
u32 key;
enum mlx5_mkey_type type;
@@ -737,11 +744,9 @@ struct mlx5_cache_ent {
unsigned long reserved;
char name[4];
- u32 order;
- u32 access_mode;
- unsigned int ndescs;
struct rb_node node;
+ struct mlx5r_cache_rb_key rb_key;
u8 disabled:1;
u8 fill_to_high_water:1;
@@ -1320,14 +1325,13 @@ int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_cache_ent *mlx5r_cache_create_ent(struct mlx5_ib_dev *dev,
- int order);
+ struct mlx5r_cache_rb_key rb_key,
+ bool persistent_entry);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- struct mlx5_cache_ent *ent,
- int access_flags);
+ int access_flags, int access_mode,
+ int ndescs);
-struct mlx5_ib_mr *mlx5_mr_cache_alloc_order(struct mlx5_ib_dev *dev, u32 order,
- int access_flags);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
@@ -1350,7 +1354,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
+int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags);
@@ -1369,7 +1373,10 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
-static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
+static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags) {}