1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

net/mlx5e: Enable striding RQ for Connect-X IPsec capable devices

This limitation was inherited by previous Innova (FPGA) IPsec
implementation, it uses its private set of RQ handlers which does
not support striding rq, for Connect-X this is no longer true.

Fix by keeping this limitation only for Innova IPsec supporting devices,
as otherwise this limitation effectively wrongly blocks striding RQs for
all future Connect-X devices for all flows even if IPsec offload is not
used.

Fixes: 2d64663cd5 ("net/mlx5: IPsec: Add HW crypto offload support")
Signed-off-by: Raed Salem <raeds@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Raed Salem 2021-01-24 22:40:23 +02:00 committed by Saeed Mahameed
parent 0e22bfb7c0
commit e4484d9df5
4 changed files with 8 additions and 5 deletions

View file

@ -65,6 +65,7 @@
#include "en/devlink.h" #include "en/devlink.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "en/ptp.h" #include "en/ptp.h"
#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{ {
@ -106,7 +107,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false; return false;
if (MLX5_IPSEC_DEV(mdev)) if (mlx5_fpga_is_ipsec_device(mdev))
return false; return false;
if (params->xdp_prog) { if (params->xdp_prog) {
@ -2069,7 +2070,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int i; int i;
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN; byte_count += MLX5E_METADATA_ETHER_LEN;
#endif #endif

View file

@ -1795,8 +1795,8 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) { if (mlx5_fpga_is_ipsec_device(mdev)) {
netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n"); netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL; return -EINVAL;
} }
#endif #endif

View file

@ -124,7 +124,7 @@ struct mlx5_fpga_ipsec {
struct ida halloc; struct ida halloc;
}; };
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
{ {
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
return false; return false;

View file

@ -43,6 +43,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
const struct mlx5_flow_cmds * const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
void mlx5_fpga_ipsec_build_fs_cmds(void); void mlx5_fpga_ipsec_build_fs_cmds(void);
bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
#else #else
static inline static inline
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
@ -55,6 +56,7 @@ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
} }
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {}; static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_FPGA_IPSEC */ #endif /* CONFIG_MLX5_FPGA_IPSEC */
#endif /* __MLX5_FPGA_IPSEC_H__ */ #endif /* __MLX5_FPGA_IPSEC_H__ */