summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorMichael Guralnik <michaelgur@nvidia.com>2024-09-09 13:05:04 +0300
committerLeon Romanovsky <leon@kernel.org>2024-09-13 08:27:30 +0300
commit907936b6f4e630718cc31ddea79cc76a3e32080a (patch)
treece10967c75e7c58703463c274954a36fc5c0073f /drivers/net
parent6f2487bfafce5e6cd6f89e7238a82012f7b9f5ac (diff)
downloadlwn-907936b6f4e630718cc31ddea79cc76a3e32080a.tar.gz
lwn-907936b6f4e630718cc31ddea79cc76a3e32080a.zip
net/mlx5: Handle memory scheme ODP capabilities
When running over new FW that supports the new memory scheme ODP, set the cap in the FW to signal the FW we are working in the new scheme. In the memory scheme ODP the per_transport_service capabilities are RO for the driver so we skip their setting. Signed-off-by: Michael Guralnik <michaelgur@nvidia.com> Link: https://patch.msgid.link/20240909100504.29797-9-michaelgur@nvidia.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index cc2aa46cff04..4ec6507d094a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -454,8 +454,8 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false, mem_page_fault = false;
void *set_hca_cap;
- bool do_set = false;
int err;
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
@@ -470,6 +470,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
MLX5_ST_SZ_BYTES(odp_cap));
+ /* For best performance, enable memory scheme ODP only when
+ * it has page prefetch enabled.
+ */
+ if (MLX5_CAP_ODP_MAX(dev, mem_page_fault) &&
+ MLX5_CAP_ODP_MAX(dev, memory_page_fault_scheme_cap.page_prefetch)) {
+ mem_page_fault = true;
+ do_set = true;
+ MLX5_SET(odp_cap, set_hca_cap, mem_page_fault, mem_page_fault);
+ goto set;
+ }
+
#define ODP_CAP_SET_MAX(dev, field) \
do { \
u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
@@ -494,10 +505,13 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
- if (!do_set)
- return 0;
+set:
+ if (do_set)
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
- return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+ mlx5_core_dbg(dev, "Using ODP %s scheme\n",
+ mem_page_fault ? "memory" : "transport");
+ return err;
}
static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)