net/mlx5e: Convert mlx5e_flow_steering member of mlx5e_priv to pointer
Make mlx5e_flow_steering member of mlx5e_priv a pointer. Add dynamic allocation respectively. Allocate fs for all profiles when initializing profile, symmetrically deallocate at profile cleanup. Signed-off-by: Lama Kayal <lkayal@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
454533aa87
commit
af8bbf7300
14 changed files with 298 additions and 264 deletions
|
@ -921,7 +921,7 @@ struct mlx5e_priv {
|
||||||
struct mlx5e_rx_res *rx_res;
|
struct mlx5e_rx_res *rx_res;
|
||||||
u32 *tx_rates;
|
u32 *tx_rates;
|
||||||
|
|
||||||
struct mlx5e_flow_steering fs;
|
struct mlx5e_flow_steering *fs;
|
||||||
|
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
struct work_struct update_carrier_work;
|
struct work_struct update_carrier_work;
|
||||||
|
|
|
@ -137,6 +137,7 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
|
||||||
struct mlx5e_accel_fs_tcp;
|
struct mlx5e_accel_fs_tcp;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
struct mlx5e_profile;
|
||||||
struct mlx5e_fs_udp;
|
struct mlx5e_fs_udp;
|
||||||
struct mlx5e_fs_any;
|
struct mlx5e_fs_any;
|
||||||
struct mlx5e_ptp_fs;
|
struct mlx5e_ptp_fs;
|
||||||
|
@ -177,8 +178,8 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
|
||||||
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
||||||
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
||||||
|
|
||||||
int mlx5e_fs_init(struct mlx5e_priv *priv);
|
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile);
|
||||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
|
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
|
||||||
|
|
||||||
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||||
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
|
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
|
||||||
|
|
|
@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
|
||||||
if (!spec)
|
if (!spec)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
fs_udp = priv->fs.udp;
|
fs_udp = priv->fs->udp;
|
||||||
ft = fs_udp->tables[type].t;
|
ft = fs_udp->tables[type].t;
|
||||||
|
|
||||||
fs_udp_set_dport_flow(spec, type, d_port);
|
fs_udp_set_dport_flow(spec, type, d_port);
|
||||||
|
@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
|
||||||
struct mlx5e_fs_udp *fs_udp;
|
struct mlx5e_fs_udp *fs_udp;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
fs_udp = priv->fs.udp;
|
fs_udp = priv->fs->udp;
|
||||||
fs_udp_t = &fs_udp->tables[type];
|
fs_udp_t = &fs_udp->tables[type];
|
||||||
|
|
||||||
dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
|
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
|
||||||
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
|
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
|
||||||
if (IS_ERR(rule)) {
|
if (IS_ERR(rule)) {
|
||||||
err = PTR_ERR(rule);
|
err = PTR_ERR(rule);
|
||||||
|
@ -208,7 +208,7 @@ out:
|
||||||
|
|
||||||
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
|
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
|
||||||
{
|
{
|
||||||
struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
|
struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
|
||||||
struct mlx5_flow_table_attr ft_attr = {};
|
struct mlx5_flow_table_attr ft_attr = {};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
|
||||||
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
|
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
|
|
||||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft->t)) {
|
if (IS_ERR(ft->t)) {
|
||||||
err = PTR_ERR(ft->t);
|
err = PTR_ERR(ft->t);
|
||||||
ft->t = NULL;
|
ft->t = NULL;
|
||||||
|
@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
|
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
|
||||||
/* Modify ttc rules destination to point back to the indir TIRs */
|
/* Modify ttc rules destination to point back to the indir TIRs */
|
||||||
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
|
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
||||||
|
@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
|
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
|
||||||
dest.ft = priv->fs.udp->tables[i].t;
|
dest.ft = priv->fs->udp->tables[i].t;
|
||||||
|
|
||||||
/* Modify ttc rules destination to point on the accel_fs FTs */
|
/* Modify ttc rules destination to point on the accel_fs FTs */
|
||||||
err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
|
err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
|
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
|
||||||
|
@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
|
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
|
struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!fs_udp)
|
if (!fs_udp)
|
||||||
|
@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
|
||||||
fs_udp_destroy_table(fs_udp, i);
|
fs_udp_destroy_table(fs_udp, i);
|
||||||
|
|
||||||
kfree(fs_udp);
|
kfree(fs_udp);
|
||||||
priv->fs.udp = NULL;
|
priv->fs->udp = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
|
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
int i, err;
|
int i, err;
|
||||||
|
|
||||||
if (priv->fs.udp) {
|
if (priv->fs->udp) {
|
||||||
priv->fs.udp->ref_cnt++;
|
priv->fs->udp->ref_cnt++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
|
priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
|
||||||
if (!priv->fs.udp)
|
if (!priv->fs->udp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
|
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
|
||||||
|
@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
|
||||||
if (err)
|
if (err)
|
||||||
goto err_destroy_tables;
|
goto err_destroy_tables;
|
||||||
|
|
||||||
priv->fs.udp->ref_cnt = 1;
|
priv->fs->udp->ref_cnt = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_destroy_tables:
|
err_destroy_tables:
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
fs_udp_destroy_table(priv->fs.udp, i);
|
fs_udp_destroy_table(priv->fs->udp, i);
|
||||||
|
|
||||||
kfree(priv->fs.udp);
|
kfree(priv->fs->udp);
|
||||||
priv->fs.udp = NULL;
|
priv->fs->udp = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
|
||||||
if (!spec)
|
if (!spec)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
fs_any = priv->fs.any;
|
fs_any = priv->fs->any;
|
||||||
ft = fs_any->table.t;
|
ft = fs_any->table.t;
|
||||||
|
|
||||||
fs_any_set_ethertype_flow(spec, ether_type);
|
fs_any_set_ethertype_flow(spec, ether_type);
|
||||||
|
@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
|
||||||
struct mlx5e_fs_any *fs_any;
|
struct mlx5e_fs_any *fs_any;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
fs_any = priv->fs.any;
|
fs_any = priv->fs->any;
|
||||||
fs_any_t = &fs_any->table;
|
fs_any_t = &fs_any->table;
|
||||||
|
|
||||||
dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
|
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
|
||||||
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
|
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
|
||||||
if (IS_ERR(rule)) {
|
if (IS_ERR(rule)) {
|
||||||
err = PTR_ERR(rule);
|
err = PTR_ERR(rule);
|
||||||
|
@ -474,7 +474,7 @@ err:
|
||||||
|
|
||||||
static int fs_any_create_table(struct mlx5e_priv *priv)
|
static int fs_any_create_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_flow_table *ft = &priv->fs.any->table;
|
struct mlx5e_flow_table *ft = &priv->fs->any->table;
|
||||||
struct mlx5_flow_table_attr ft_attr = {};
|
struct mlx5_flow_table_attr ft_attr = {};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -484,7 +484,7 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
|
||||||
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
|
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
|
|
||||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft->t)) {
|
if (IS_ERR(ft->t)) {
|
||||||
err = PTR_ERR(ft->t);
|
err = PTR_ERR(ft->t);
|
||||||
ft->t = NULL;
|
ft->t = NULL;
|
||||||
|
@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_priv *priv)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Modify ttc rules destination to point back to the indir TIRs */
|
/* Modify ttc rules destination to point back to the indir TIRs */
|
||||||
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
|
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
||||||
|
@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_priv *priv)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
dest.ft = priv->fs.any->table.t;
|
dest.ft = priv->fs->any->table.t;
|
||||||
|
|
||||||
/* Modify ttc rules destination to point on the accel_fs FTs */
|
/* Modify ttc rules destination to point on the accel_fs FTs */
|
||||||
err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
|
err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
|
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
|
||||||
|
@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
|
||||||
|
|
||||||
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
|
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_fs_any *fs_any = priv->fs.any;
|
struct mlx5e_fs_any *fs_any = priv->fs->any;
|
||||||
|
|
||||||
if (!fs_any)
|
if (!fs_any)
|
||||||
return;
|
return;
|
||||||
|
@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
|
||||||
fs_any_destroy_table(fs_any);
|
fs_any_destroy_table(fs_any);
|
||||||
|
|
||||||
kfree(fs_any);
|
kfree(fs_any);
|
||||||
priv->fs.any = NULL;
|
priv->fs->any = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
|
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (priv->fs.any) {
|
if (priv->fs->any) {
|
||||||
priv->fs.any->ref_cnt++;
|
priv->fs->any->ref_cnt++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
|
priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
|
||||||
if (!priv->fs.any)
|
if (!priv->fs->any)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = fs_any_create_table(priv);
|
err = fs_any_create_table(priv);
|
||||||
|
@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
|
||||||
if (err)
|
if (err)
|
||||||
goto err_destroy_table;
|
goto err_destroy_table;
|
||||||
|
|
||||||
priv->fs.any->ref_cnt = 1;
|
priv->fs->any->ref_cnt = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_destroy_table:
|
err_destroy_table:
|
||||||
fs_any_destroy_table(priv->fs.any);
|
fs_any_destroy_table(priv->fs->any);
|
||||||
|
|
||||||
kfree(priv->fs.any);
|
kfree(priv->fs->any);
|
||||||
priv->fs.any = NULL;
|
priv->fs->any = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -624,7 +624,7 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
|
||||||
|
|
||||||
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
|
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
|
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
|
||||||
|
|
||||||
if (!ptp_fs->valid)
|
if (!ptp_fs->valid)
|
||||||
return;
|
return;
|
||||||
|
@ -641,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
|
||||||
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
|
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
|
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
|
||||||
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
|
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
|
||||||
struct mlx5_flow_handle *rule;
|
struct mlx5_flow_handle *rule;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -808,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
|
||||||
if (!ptp_fs)
|
if (!ptp_fs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
priv->fs.ptp_fs = ptp_fs;
|
priv->fs->ptp_fs = ptp_fs;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
|
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
|
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
|
||||||
|
|
||||||
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
|
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
|
||||||
u32 max_chain;
|
u32 max_chain;
|
||||||
|
|
||||||
esw = priv->mdev->priv.eswitch;
|
esw = priv->mdev->priv.eswitch;
|
||||||
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs.tc);
|
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
|
||||||
max_chain = mlx5_chains_get_chain_range(chains);
|
max_chain = mlx5_chains_get_chain_range(chains);
|
||||||
reformat_and_fwd = is_esw ?
|
reformat_and_fwd = is_esw ?
|
||||||
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
|
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
|
||||||
|
|
|
@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
|
||||||
if (!spec)
|
if (!spec)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
fs_tcp = priv->fs.accel_tcp;
|
fs_tcp = priv->fs->accel_tcp;
|
||||||
|
|
||||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||||
|
|
||||||
|
@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
|
||||||
struct mlx5_flow_handle *rule;
|
struct mlx5_flow_handle *rule;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
fs_tcp = priv->fs.accel_tcp;
|
fs_tcp = priv->fs->accel_tcp;
|
||||||
accel_fs_t = &fs_tcp->tables[type];
|
accel_fs_t = &fs_tcp->tables[type];
|
||||||
|
|
||||||
dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
|
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
|
||||||
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
|
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
|
||||||
if (IS_ERR(rule)) {
|
if (IS_ERR(rule)) {
|
||||||
err = PTR_ERR(rule);
|
err = PTR_ERR(rule);
|
||||||
|
@ -267,7 +267,7 @@ out:
|
||||||
|
|
||||||
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
|
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
|
||||||
{
|
{
|
||||||
struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
|
struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
|
||||||
struct mlx5_flow_table_attr ft_attr = {};
|
struct mlx5_flow_table_attr ft_attr = {};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
|
||||||
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
|
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
|
|
||||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft->t)) {
|
if (IS_ERR(ft->t)) {
|
||||||
err = PTR_ERR(ft->t);
|
err = PTR_ERR(ft->t);
|
||||||
ft->t = NULL;
|
ft->t = NULL;
|
||||||
|
@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
|
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
|
||||||
/* Modify ttc rules destination to point back to the indir TIRs */
|
/* Modify ttc rules destination to point back to the indir TIRs */
|
||||||
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
|
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
||||||
|
@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
|
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
|
||||||
dest.ft = priv->fs.accel_tcp->tables[i].t;
|
dest.ft = priv->fs->accel_tcp->tables[i].t;
|
||||||
|
|
||||||
/* Modify ttc rules destination to point on the accel_fs FTs */
|
/* Modify ttc rules destination to point on the accel_fs FTs */
|
||||||
err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
|
err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
|
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
|
||||||
|
@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
|
||||||
{
|
{
|
||||||
struct mlx5e_accel_fs_tcp *fs_tcp;
|
struct mlx5e_accel_fs_tcp *fs_tcp;
|
||||||
|
|
||||||
fs_tcp = priv->fs.accel_tcp;
|
fs_tcp = priv->fs->accel_tcp;
|
||||||
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
|
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!priv->fs.accel_tcp)
|
if (!priv->fs->accel_tcp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
accel_fs_tcp_disable(priv);
|
accel_fs_tcp_disable(priv);
|
||||||
|
@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
|
||||||
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
|
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
|
||||||
accel_fs_tcp_destroy_table(priv, i);
|
accel_fs_tcp_destroy_table(priv, i);
|
||||||
|
|
||||||
kfree(priv->fs.accel_tcp);
|
kfree(priv->fs->accel_tcp);
|
||||||
priv->fs.accel_tcp = NULL;
|
priv->fs->accel_tcp = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
|
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
|
||||||
|
@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
|
||||||
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
|
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
|
priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
|
||||||
if (!priv->fs.accel_tcp)
|
if (!priv->fs->accel_tcp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
|
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
|
||||||
|
@ -396,7 +396,7 @@ err_destroy_tables:
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
accel_fs_tcp_destroy_table(priv, i);
|
accel_fs_tcp_destroy_table(priv, i);
|
||||||
|
|
||||||
kfree(priv->fs.accel_tcp);
|
kfree(priv->fs->accel_tcp);
|
||||||
priv->fs.accel_tcp = NULL;
|
priv->fs->accel_tcp = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
|
||||||
fs_prot = &accel_esp->fs_prot[type];
|
fs_prot = &accel_esp->fs_prot[type];
|
||||||
|
|
||||||
fs_prot->default_dest =
|
fs_prot->default_dest =
|
||||||
mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
|
mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
|
||||||
|
|
||||||
ft_attr.max_fte = 1;
|
ft_attr.max_fte = 1;
|
||||||
ft_attr.autogroup.max_num_groups = 1;
|
ft_attr.autogroup.max_num_groups = 1;
|
||||||
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
|
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
|
ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft))
|
if (IS_ERR(ft))
|
||||||
return PTR_ERR(ft);
|
return PTR_ERR(ft);
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
ft_attr.autogroup.num_reserved_entries = 1;
|
ft_attr.autogroup.num_reserved_entries = 1;
|
||||||
ft_attr.autogroup.max_num_groups = 1;
|
ft_attr.autogroup.max_num_groups = 1;
|
||||||
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
|
ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft)) {
|
if (IS_ERR(ft)) {
|
||||||
err = PTR_ERR(ft);
|
err = PTR_ERR(ft);
|
||||||
goto err_fs_ft;
|
goto err_fs_ft;
|
||||||
|
@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
|
||||||
/* connect */
|
/* connect */
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
dest.ft = fs_prot->ft;
|
dest.ft = fs_prot->ft;
|
||||||
mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
|
mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
|
||||||
|
|
||||||
skip:
|
skip:
|
||||||
fs_prot->refcnt++;
|
fs_prot->refcnt++;
|
||||||
|
@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* disconnect */
|
/* disconnect */
|
||||||
mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
|
mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
|
||||||
|
|
||||||
/* remove FT */
|
/* remove FT */
|
||||||
rx_destroy(priv, type);
|
rx_destroy(priv, type);
|
||||||
|
|
|
@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
||||||
/* Modify ttc rules destination back to their default */
|
/* Modify ttc rules destination back to their default */
|
||||||
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
|
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
"%s: modify ttc[%d] default destination failed, err(%d)\n",
|
||||||
|
@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
||||||
dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
|
dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
|
||||||
/* Modify ttc rules destination to point on the aRFS FTs */
|
/* Modify ttc rules destination to point on the aRFS FTs */
|
||||||
err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
|
err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
|
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
|
||||||
|
@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
arfs_del_rules(priv);
|
arfs_del_rules(priv);
|
||||||
destroy_workqueue(priv->fs.arfs->wq);
|
destroy_workqueue(priv->fs->arfs->wq);
|
||||||
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
||||||
if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
|
if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
|
||||||
arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
|
arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
_mlx5e_cleanup_tables(priv);
|
_mlx5e_cleanup_tables(priv);
|
||||||
kvfree(priv->fs.arfs);
|
kvfree(priv->fs->arfs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arfs_add_default_rule(struct mlx5e_priv *priv,
|
static int arfs_add_default_rule(struct mlx5e_priv *priv,
|
||||||
enum arfs_type type)
|
enum arfs_type type)
|
||||||
{
|
{
|
||||||
struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
|
struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
|
||||||
struct mlx5_flow_destination dest = {};
|
struct mlx5_flow_destination dest = {};
|
||||||
MLX5_DECLARE_FLOW_ACT(flow_act);
|
MLX5_DECLARE_FLOW_ACT(flow_act);
|
||||||
enum mlx5_traffic_types tt;
|
enum mlx5_traffic_types tt;
|
||||||
|
@ -321,7 +321,7 @@ out:
|
||||||
static int arfs_create_table(struct mlx5e_priv *priv,
|
static int arfs_create_table(struct mlx5e_priv *priv,
|
||||||
enum arfs_type type)
|
enum arfs_type type)
|
||||||
{
|
{
|
||||||
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
|
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
|
||||||
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
|
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
|
||||||
struct mlx5_flow_table_attr ft_attr = {};
|
struct mlx5_flow_table_attr ft_attr = {};
|
||||||
int err;
|
int err;
|
||||||
|
@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
|
||||||
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
|
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
|
|
||||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft->t)) {
|
if (IS_ERR(ft->t)) {
|
||||||
err = PTR_ERR(ft->t);
|
err = PTR_ERR(ft->t);
|
||||||
ft->t = NULL;
|
ft->t = NULL;
|
||||||
|
@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
|
||||||
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
|
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
|
priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
|
||||||
if (!priv->fs.arfs)
|
if (!priv->fs->arfs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_init(&priv->fs.arfs->arfs_lock);
|
spin_lock_init(&priv->fs->arfs->arfs_lock);
|
||||||
INIT_LIST_HEAD(&priv->fs.arfs->rules);
|
INIT_LIST_HEAD(&priv->fs->arfs->rules);
|
||||||
priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
|
priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
|
||||||
if (!priv->fs.arfs->wq)
|
if (!priv->fs->arfs->wq)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
for (i = 0; i < ARFS_NUM_TYPES; i++) {
|
||||||
|
@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
|
||||||
err_des:
|
err_des:
|
||||||
_mlx5e_cleanup_tables(priv);
|
_mlx5e_cleanup_tables(priv);
|
||||||
err:
|
err:
|
||||||
kvfree(priv->fs.arfs);
|
kvfree(priv->fs->arfs);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
|
||||||
int i;
|
int i;
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
spin_lock_bh(&priv->fs.arfs->arfs_lock);
|
spin_lock_bh(&priv->fs->arfs->arfs_lock);
|
||||||
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
|
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
|
||||||
if (!work_pending(&arfs_rule->arfs_work) &&
|
if (!work_pending(&arfs_rule->arfs_work) &&
|
||||||
rps_may_expire_flow(priv->netdev,
|
rps_may_expire_flow(priv->netdev,
|
||||||
arfs_rule->rxq, arfs_rule->flow_id,
|
arfs_rule->rxq, arfs_rule->flow_id,
|
||||||
|
@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&priv->fs.arfs->arfs_lock);
|
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
|
||||||
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
|
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
|
||||||
if (arfs_rule->rule)
|
if (arfs_rule->rule)
|
||||||
mlx5_del_flow_rules(arfs_rule->rule);
|
mlx5_del_flow_rules(arfs_rule->rule);
|
||||||
|
@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
|
||||||
int i;
|
int i;
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
spin_lock_bh(&priv->fs.arfs->arfs_lock);
|
spin_lock_bh(&priv->fs->arfs->arfs_lock);
|
||||||
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
|
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
|
||||||
hlist_del_init(&rule->hlist);
|
hlist_del_init(&rule->hlist);
|
||||||
hlist_add_head(&rule->hlist, &del_list);
|
hlist_add_head(&rule->hlist, &del_list);
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&priv->fs.arfs->arfs_lock);
|
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
|
||||||
|
|
||||||
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
|
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
|
||||||
cancel_work_sync(&rule->arfs_work);
|
cancel_work_sync(&rule->arfs_work);
|
||||||
|
@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
|
||||||
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
|
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
|
||||||
struct arfs_rule *arfs_rule)
|
struct arfs_rule *arfs_rule)
|
||||||
{
|
{
|
||||||
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
|
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
|
||||||
struct arfs_tuple *tuple = &arfs_rule->tuple;
|
struct arfs_tuple *tuple = &arfs_rule->tuple;
|
||||||
struct mlx5_flow_handle *rule = NULL;
|
struct mlx5_flow_handle *rule = NULL;
|
||||||
struct mlx5_flow_destination dest = {};
|
struct mlx5_flow_destination dest = {};
|
||||||
|
@ -592,9 +592,9 @@ static void arfs_handle_work(struct work_struct *work)
|
||||||
|
|
||||||
mutex_lock(&priv->state_lock);
|
mutex_lock(&priv->state_lock);
|
||||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||||
spin_lock_bh(&priv->fs.arfs->arfs_lock);
|
spin_lock_bh(&priv->fs->arfs->arfs_lock);
|
||||||
hlist_del(&arfs_rule->hlist);
|
hlist_del(&arfs_rule->hlist);
|
||||||
spin_unlock_bh(&priv->fs.arfs->arfs_lock);
|
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
|
||||||
|
|
||||||
mutex_unlock(&priv->state_lock);
|
mutex_unlock(&priv->state_lock);
|
||||||
kfree(arfs_rule);
|
kfree(arfs_rule);
|
||||||
|
@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
||||||
tuple->dst_port = fk->ports.dst;
|
tuple->dst_port = fk->ports.dst;
|
||||||
|
|
||||||
rule->flow_id = flow_id;
|
rule->flow_id = flow_id;
|
||||||
rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
|
rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
|
||||||
|
|
||||||
hlist_add_head(&rule->hlist,
|
hlist_add_head(&rule->hlist,
|
||||||
arfs_hash_bucket(arfs_t, tuple->src_port,
|
arfs_hash_bucket(arfs_t, tuple->src_port,
|
||||||
|
@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||||
u16 rxq_index, u32 flow_id)
|
u16 rxq_index, u32 flow_id)
|
||||||
{
|
{
|
||||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||||
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
|
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
|
||||||
struct arfs_table *arfs_t;
|
struct arfs_table *arfs_t;
|
||||||
struct arfs_rule *arfs_rule;
|
struct arfs_rule *arfs_rule;
|
||||||
struct flow_keys fk;
|
struct flow_keys fk;
|
||||||
|
@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
|
queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
|
||||||
spin_unlock_bh(&arfs->arfs_lock);
|
spin_unlock_bh(&arfs->arfs_lock);
|
||||||
return arfs_rule->filter_id;
|
return arfs_rule->filter_id;
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,7 +144,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
list_size = 0;
|
list_size = 0;
|
||||||
for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
|
for_each_set_bit(vlan, priv->fs->vlan->active_cvlans, VLAN_N_VID)
|
||||||
list_size++;
|
list_size++;
|
||||||
|
|
||||||
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
|
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
|
||||||
|
@ -161,7 +161,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
|
for_each_set_bit(vlan, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
|
||||||
if (i >= list_size)
|
if (i >= list_size)
|
||||||
break;
|
break;
|
||||||
vlans[i++] = vlan;
|
vlans[i++] = vlan;
|
||||||
|
@ -188,14 +188,14 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
|
||||||
enum mlx5e_vlan_rule_type rule_type,
|
enum mlx5e_vlan_rule_type rule_type,
|
||||||
u16 vid, struct mlx5_flow_spec *spec)
|
u16 vid, struct mlx5_flow_spec *spec)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
|
struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
|
||||||
struct mlx5_flow_destination dest = {};
|
struct mlx5_flow_destination dest = {};
|
||||||
struct mlx5_flow_handle **rule_p;
|
struct mlx5_flow_handle **rule_p;
|
||||||
MLX5_DECLARE_FLOW_ACT(flow_act);
|
MLX5_DECLARE_FLOW_ACT(flow_act);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
dest.ft = priv->fs.l2.ft.t;
|
dest.ft = priv->fs->l2.ft.t;
|
||||||
|
|
||||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||||
|
|
||||||
|
@ -205,24 +205,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
|
||||||
* disabled in match value means both S & C tags
|
* disabled in match value means both S & C tags
|
||||||
* don't exist (untagged of both)
|
* don't exist (untagged of both)
|
||||||
*/
|
*/
|
||||||
rule_p = &priv->fs.vlan->untagged_rule;
|
rule_p = &priv->fs->vlan->untagged_rule;
|
||||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
||||||
outer_headers.cvlan_tag);
|
outer_headers.cvlan_tag);
|
||||||
break;
|
break;
|
||||||
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
|
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
|
||||||
rule_p = &priv->fs.vlan->any_cvlan_rule;
|
rule_p = &priv->fs->vlan->any_cvlan_rule;
|
||||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
||||||
outer_headers.cvlan_tag);
|
outer_headers.cvlan_tag);
|
||||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
|
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
|
||||||
break;
|
break;
|
||||||
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
|
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
|
||||||
rule_p = &priv->fs.vlan->any_svlan_rule;
|
rule_p = &priv->fs->vlan->any_svlan_rule;
|
||||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
||||||
outer_headers.svlan_tag);
|
outer_headers.svlan_tag);
|
||||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
|
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
|
||||||
break;
|
break;
|
||||||
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
|
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
|
||||||
rule_p = &priv->fs.vlan->active_svlans_rule[vid];
|
rule_p = &priv->fs->vlan->active_svlans_rule[vid];
|
||||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
||||||
outer_headers.svlan_tag);
|
outer_headers.svlan_tag);
|
||||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
|
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
|
||||||
|
@ -232,7 +232,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
|
||||||
vid);
|
vid);
|
||||||
break;
|
break;
|
||||||
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
|
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
|
||||||
rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
|
rule_p = &priv->fs->vlan->active_cvlans_rule[vid];
|
||||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
||||||
outer_headers.cvlan_tag);
|
outer_headers.cvlan_tag);
|
||||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
|
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
|
||||||
|
@ -282,33 +282,33 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
|
||||||
{
|
{
|
||||||
switch (rule_type) {
|
switch (rule_type) {
|
||||||
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
|
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
|
||||||
if (priv->fs.vlan->untagged_rule) {
|
if (priv->fs->vlan->untagged_rule) {
|
||||||
mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
|
mlx5_del_flow_rules(priv->fs->vlan->untagged_rule);
|
||||||
priv->fs.vlan->untagged_rule = NULL;
|
priv->fs->vlan->untagged_rule = NULL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
|
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
|
||||||
if (priv->fs.vlan->any_cvlan_rule) {
|
if (priv->fs->vlan->any_cvlan_rule) {
|
||||||
mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
|
mlx5_del_flow_rules(priv->fs->vlan->any_cvlan_rule);
|
||||||
priv->fs.vlan->any_cvlan_rule = NULL;
|
priv->fs->vlan->any_cvlan_rule = NULL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
|
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
|
||||||
if (priv->fs.vlan->any_svlan_rule) {
|
if (priv->fs->vlan->any_svlan_rule) {
|
||||||
mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
|
mlx5_del_flow_rules(priv->fs->vlan->any_svlan_rule);
|
||||||
priv->fs.vlan->any_svlan_rule = NULL;
|
priv->fs->vlan->any_svlan_rule = NULL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
|
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
|
||||||
if (priv->fs.vlan->active_svlans_rule[vid]) {
|
if (priv->fs->vlan->active_svlans_rule[vid]) {
|
||||||
mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
|
mlx5_del_flow_rules(priv->fs->vlan->active_svlans_rule[vid]);
|
||||||
priv->fs.vlan->active_svlans_rule[vid] = NULL;
|
priv->fs->vlan->active_svlans_rule[vid] = NULL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
|
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
|
||||||
if (priv->fs.vlan->active_cvlans_rule[vid]) {
|
if (priv->fs->vlan->active_cvlans_rule[vid]) {
|
||||||
mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
|
mlx5_del_flow_rules(priv->fs->vlan->active_cvlans_rule[vid]);
|
||||||
priv->fs.vlan->active_cvlans_rule[vid] = NULL;
|
priv->fs->vlan->active_cvlans_rule[vid] = NULL;
|
||||||
}
|
}
|
||||||
mlx5e_vport_context_update_vlans(priv);
|
mlx5e_vport_context_update_vlans(priv);
|
||||||
break;
|
break;
|
||||||
|
@ -355,62 +355,62 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
|
||||||
|
|
||||||
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
|
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
|
struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
|
||||||
struct mlx5_flow_handle *rule;
|
struct mlx5_flow_handle *rule;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
|
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
|
||||||
if (IS_ERR(rule)) {
|
if (IS_ERR(rule)) {
|
||||||
err = PTR_ERR(rule);
|
err = PTR_ERR(rule);
|
||||||
priv->fs.vlan->trap_rule = NULL;
|
priv->fs->vlan->trap_rule = NULL;
|
||||||
netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
|
netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
|
||||||
__func__, err);
|
__func__, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
priv->fs.vlan->trap_rule = rule;
|
priv->fs->vlan->trap_rule = rule;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
|
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (priv->fs.vlan->trap_rule) {
|
if (priv->fs->vlan->trap_rule) {
|
||||||
mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
|
mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
|
||||||
priv->fs.vlan->trap_rule = NULL;
|
priv->fs->vlan->trap_rule = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
|
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
|
struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
|
||||||
struct mlx5_flow_handle *rule;
|
struct mlx5_flow_handle *rule;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
|
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
|
||||||
if (IS_ERR(rule)) {
|
if (IS_ERR(rule)) {
|
||||||
err = PTR_ERR(rule);
|
err = PTR_ERR(rule);
|
||||||
priv->fs.l2.trap_rule = NULL;
|
priv->fs->l2.trap_rule = NULL;
|
||||||
netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
|
netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
|
||||||
__func__, err);
|
__func__, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
priv->fs.l2.trap_rule = rule;
|
priv->fs->l2.trap_rule = rule;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
|
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (priv->fs.l2.trap_rule) {
|
if (priv->fs->l2.trap_rule) {
|
||||||
mlx5_del_flow_rules(priv->fs.l2.trap_rule);
|
mlx5_del_flow_rules(priv->fs->l2.trap_rule);
|
||||||
priv->fs.l2.trap_rule = NULL;
|
priv->fs->l2.trap_rule = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
|
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (!priv->fs.vlan->cvlan_filter_disabled)
|
if (!priv->fs->vlan->cvlan_filter_disabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
priv->fs.vlan->cvlan_filter_disabled = false;
|
priv->fs->vlan->cvlan_filter_disabled = false;
|
||||||
if (priv->netdev->flags & IFF_PROMISC)
|
if (priv->netdev->flags & IFF_PROMISC)
|
||||||
return;
|
return;
|
||||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
|
||||||
|
@ -418,10 +418,10 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
|
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (priv->fs.vlan->cvlan_filter_disabled)
|
if (priv->fs->vlan->cvlan_filter_disabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
priv->fs.vlan->cvlan_filter_disabled = true;
|
priv->fs->vlan->cvlan_filter_disabled = true;
|
||||||
if (priv->netdev->flags & IFF_PROMISC)
|
if (priv->netdev->flags & IFF_PROMISC)
|
||||||
return;
|
return;
|
||||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
|
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
|
||||||
|
@ -431,11 +431,11 @@ static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
set_bit(vid, priv->fs.vlan->active_cvlans);
|
set_bit(vid, priv->fs->vlan->active_cvlans);
|
||||||
|
|
||||||
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
|
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
|
||||||
if (err)
|
if (err)
|
||||||
clear_bit(vid, priv->fs.vlan->active_cvlans);
|
clear_bit(vid, priv->fs->vlan->active_cvlans);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -445,11 +445,11 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
|
||||||
struct net_device *netdev = priv->netdev;
|
struct net_device *netdev = priv->netdev;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
set_bit(vid, priv->fs.vlan->active_svlans);
|
set_bit(vid, priv->fs->vlan->active_svlans);
|
||||||
|
|
||||||
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
|
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
|
||||||
if (err) {
|
if (err) {
|
||||||
clear_bit(vid, priv->fs.vlan->active_svlans);
|
clear_bit(vid, priv->fs->vlan->active_svlans);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,10 +481,10 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||||
return 0; /* no vlan table for uplink rep */
|
return 0; /* no vlan table for uplink rep */
|
||||||
|
|
||||||
if (be16_to_cpu(proto) == ETH_P_8021Q) {
|
if (be16_to_cpu(proto) == ETH_P_8021Q) {
|
||||||
clear_bit(vid, priv->fs.vlan->active_cvlans);
|
clear_bit(vid, priv->fs->vlan->active_cvlans);
|
||||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
|
||||||
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
|
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
|
||||||
clear_bit(vid, priv->fs.vlan->active_svlans);
|
clear_bit(vid, priv->fs->vlan->active_svlans);
|
||||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
|
||||||
netdev_update_features(dev);
|
netdev_update_features(dev);
|
||||||
}
|
}
|
||||||
|
@ -498,14 +498,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
|
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
|
||||||
|
|
||||||
for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
|
for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
|
||||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
|
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
|
for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
|
||||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
|
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
|
||||||
|
|
||||||
if (priv->fs.vlan->cvlan_filter_disabled)
|
if (priv->fs->vlan->cvlan_filter_disabled)
|
||||||
mlx5e_add_any_vid_rules(priv);
|
mlx5e_add_any_vid_rules(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,11 +515,11 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
|
||||||
|
|
||||||
for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
|
for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
|
||||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
|
for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
|
||||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
|
||||||
|
|
||||||
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
|
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
|
||||||
|
@ -529,7 +529,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
|
||||||
/* must be called after DESTROY bit is set and
|
/* must be called after DESTROY bit is set and
|
||||||
* set_rx_mode is called and flushed
|
* set_rx_mode is called and flushed
|
||||||
*/
|
*/
|
||||||
if (priv->fs.vlan->cvlan_filter_disabled)
|
if (priv->fs->vlan->cvlan_filter_disabled)
|
||||||
mlx5e_del_any_vid_rules(priv);
|
mlx5e_del_any_vid_rules(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -576,14 +576,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
netif_addr_lock_bh(netdev);
|
netif_addr_lock_bh(netdev);
|
||||||
|
|
||||||
mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
|
mlx5e_add_l2_to_hash(priv->fs->l2.netdev_uc,
|
||||||
priv->netdev->dev_addr);
|
priv->netdev->dev_addr);
|
||||||
|
|
||||||
netdev_for_each_uc_addr(ha, netdev)
|
netdev_for_each_uc_addr(ha, netdev)
|
||||||
mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
|
mlx5e_add_l2_to_hash(priv->fs->l2.netdev_uc, ha->addr);
|
||||||
|
|
||||||
netdev_for_each_mc_addr(ha, netdev)
|
netdev_for_each_mc_addr(ha, netdev)
|
||||||
mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
|
mlx5e_add_l2_to_hash(priv->fs->l2.netdev_mc, ha->addr);
|
||||||
|
|
||||||
netif_addr_unlock_bh(netdev);
|
netif_addr_unlock_bh(netdev);
|
||||||
}
|
}
|
||||||
|
@ -599,11 +599,11 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
|
||||||
int i = 0;
|
int i = 0;
|
||||||
int hi;
|
int hi;
|
||||||
|
|
||||||
addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
|
addr_list = is_uc ? priv->fs->l2.netdev_uc : priv->fs->l2.netdev_mc;
|
||||||
|
|
||||||
if (is_uc) /* Make sure our own address is pushed first */
|
if (is_uc) /* Make sure our own address is pushed first */
|
||||||
ether_addr_copy(addr_array[i++], ndev->dev_addr);
|
ether_addr_copy(addr_array[i++], ndev->dev_addr);
|
||||||
else if (priv->fs.l2.broadcast_enabled)
|
else if (priv->fs->l2.broadcast_enabled)
|
||||||
ether_addr_copy(addr_array[i++], ndev->broadcast);
|
ether_addr_copy(addr_array[i++], ndev->broadcast);
|
||||||
|
|
||||||
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
|
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
|
||||||
|
@ -628,12 +628,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
|
||||||
int err;
|
int err;
|
||||||
int hi;
|
int hi;
|
||||||
|
|
||||||
size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
|
size = is_uc ? 0 : (priv->fs->l2.broadcast_enabled ? 1 : 0);
|
||||||
max_size = is_uc ?
|
max_size = is_uc ?
|
||||||
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
|
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
|
||||||
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
|
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
|
||||||
|
|
||||||
addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
|
addr_list = is_uc ? priv->fs->l2.netdev_uc : priv->fs->l2.netdev_mc;
|
||||||
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
|
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
|
||||||
size++;
|
size++;
|
||||||
|
|
||||||
|
@ -664,7 +664,7 @@ out:
|
||||||
|
|
||||||
static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
|
static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_l2_table *ea = &priv->fs.l2;
|
struct mlx5e_l2_table *ea = &priv->fs->l2;
|
||||||
|
|
||||||
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
|
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
|
||||||
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
|
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
|
||||||
|
@ -679,10 +679,10 @@ static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
|
||||||
struct hlist_node *tmp;
|
struct hlist_node *tmp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
|
mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_uc, i)
|
||||||
mlx5e_execute_l2_action(priv, hn);
|
mlx5e_execute_l2_action(priv, hn);
|
||||||
|
|
||||||
mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
|
mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_mc, i)
|
||||||
mlx5e_execute_l2_action(priv, hn);
|
mlx5e_execute_l2_action(priv, hn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -692,9 +692,9 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
|
||||||
struct hlist_node *tmp;
|
struct hlist_node *tmp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
|
mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_uc, i)
|
||||||
hn->action = MLX5E_ACTION_DEL;
|
hn->action = MLX5E_ACTION_DEL;
|
||||||
mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
|
mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_mc, i)
|
||||||
hn->action = MLX5E_ACTION_DEL;
|
hn->action = MLX5E_ACTION_DEL;
|
||||||
|
|
||||||
if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
|
if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
|
||||||
|
@ -708,7 +708,7 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
|
static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
|
struct mlx5_flow_table *ft = priv->fs->promisc.ft.t;
|
||||||
struct mlx5_flow_destination dest = {};
|
struct mlx5_flow_destination dest = {};
|
||||||
struct mlx5_flow_handle **rule_p;
|
struct mlx5_flow_handle **rule_p;
|
||||||
MLX5_DECLARE_FLOW_ACT(flow_act);
|
MLX5_DECLARE_FLOW_ACT(flow_act);
|
||||||
|
@ -719,9 +719,9 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
|
||||||
if (!spec)
|
if (!spec)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
|
dest.ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
|
||||||
|
|
||||||
rule_p = &priv->fs.promisc.rule;
|
rule_p = &priv->fs->promisc.rule;
|
||||||
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
|
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
|
||||||
if (IS_ERR(*rule_p)) {
|
if (IS_ERR(*rule_p)) {
|
||||||
err = PTR_ERR(*rule_p);
|
err = PTR_ERR(*rule_p);
|
||||||
|
@ -734,7 +734,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
|
static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
|
struct mlx5e_flow_table *ft = &priv->fs->promisc.ft;
|
||||||
struct mlx5_flow_table_attr ft_attr = {};
|
struct mlx5_flow_table_attr ft_attr = {};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -743,7 +743,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
|
||||||
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
|
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
|
|
||||||
ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
|
ft->t = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft->t)) {
|
if (IS_ERR(ft->t)) {
|
||||||
err = PTR_ERR(ft->t);
|
err = PTR_ERR(ft->t);
|
||||||
netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
|
netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
|
||||||
|
@ -765,19 +765,19 @@ err_destroy_promisc_table:
|
||||||
|
|
||||||
static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
|
static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
|
if (WARN(!priv->fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
|
||||||
return;
|
return;
|
||||||
mlx5_del_flow_rules(priv->fs.promisc.rule);
|
mlx5_del_flow_rules(priv->fs->promisc.rule);
|
||||||
priv->fs.promisc.rule = NULL;
|
priv->fs->promisc.rule = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
|
static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
|
if (WARN(!priv->fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
|
||||||
return;
|
return;
|
||||||
mlx5e_del_promisc_rule(priv);
|
mlx5e_del_promisc_rule(priv);
|
||||||
mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
|
mlx5_destroy_flow_table(priv->fs->promisc.ft.t);
|
||||||
priv->fs.promisc.ft.t = NULL;
|
priv->fs->promisc.ft.t = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_set_rx_mode_work(struct work_struct *work)
|
void mlx5e_set_rx_mode_work(struct work_struct *work)
|
||||||
|
@ -785,7 +785,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
|
||||||
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
|
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
|
||||||
set_rx_mode_work);
|
set_rx_mode_work);
|
||||||
|
|
||||||
struct mlx5e_l2_table *ea = &priv->fs.l2;
|
struct mlx5e_l2_table *ea = &priv->fs->l2;
|
||||||
struct net_device *ndev = priv->netdev;
|
struct net_device *ndev = priv->netdev;
|
||||||
|
|
||||||
bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||||
|
@ -844,7 +844,7 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
|
||||||
|
|
||||||
void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
|
void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
|
ether_addr_copy(priv->fs->l2.broadcast.addr, priv->netdev->broadcast);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
|
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
|
||||||
|
@ -906,7 +906,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
|
||||||
ttc_params->tunnel_dests[tt].type =
|
ttc_params->tunnel_dests[tt].type =
|
||||||
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
ttc_params->tunnel_dests[tt].ft =
|
ttc_params->tunnel_dests[tt].ft =
|
||||||
mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
|
mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -922,7 +922,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
|
||||||
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
|
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_l2_rule *ai, int type)
|
struct mlx5e_l2_rule *ai, int type)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
|
struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
|
||||||
struct mlx5_flow_destination dest = {};
|
struct mlx5_flow_destination dest = {};
|
||||||
MLX5_DECLARE_FLOW_ACT(flow_act);
|
MLX5_DECLARE_FLOW_ACT(flow_act);
|
||||||
struct mlx5_flow_spec *spec;
|
struct mlx5_flow_spec *spec;
|
||||||
|
@ -940,7 +940,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
|
||||||
outer_headers.dmac_47_16);
|
outer_headers.dmac_47_16);
|
||||||
|
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||||
dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
|
dest.ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case MLX5E_FULLMATCH:
|
case MLX5E_FULLMATCH:
|
||||||
|
@ -1045,12 +1045,12 @@ err_destroy_groups:
|
||||||
|
|
||||||
static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
|
static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
mlx5e_destroy_flow_table(&priv->fs.l2.ft);
|
mlx5e_destroy_flow_table(&priv->fs->l2.ft);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
|
static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_l2_table *l2_table = &priv->fs.l2;
|
struct mlx5e_l2_table *l2_table = &priv->fs->l2;
|
||||||
struct mlx5e_flow_table *ft = &l2_table->ft;
|
struct mlx5e_flow_table *ft = &l2_table->ft;
|
||||||
struct mlx5_flow_table_attr ft_attr = {};
|
struct mlx5_flow_table_attr ft_attr = {};
|
||||||
int err;
|
int err;
|
||||||
|
@ -1061,7 +1061,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
|
||||||
ft_attr.level = MLX5E_L2_FT_LEVEL;
|
ft_attr.level = MLX5E_L2_FT_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
|
|
||||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft->t)) {
|
if (IS_ERR(ft->t)) {
|
||||||
err = PTR_ERR(ft->t);
|
err = PTR_ERR(ft->t);
|
||||||
ft->t = NULL;
|
ft->t = NULL;
|
||||||
|
@ -1187,14 +1187,14 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
||||||
struct mlx5e_flow_table *ft;
|
struct mlx5e_flow_table *ft;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
ft = &priv->fs.vlan->ft;
|
ft = &priv->fs->vlan->ft;
|
||||||
ft->num_groups = 0;
|
ft->num_groups = 0;
|
||||||
|
|
||||||
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
|
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
|
||||||
ft_attr.level = MLX5E_VLAN_FT_LEVEL;
|
ft_attr.level = MLX5E_VLAN_FT_LEVEL;
|
||||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||||
|
|
||||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
|
||||||
if (IS_ERR(ft->t))
|
if (IS_ERR(ft->t))
|
||||||
return PTR_ERR(ft->t);
|
return PTR_ERR(ft->t);
|
||||||
|
|
||||||
|
@ -1223,19 +1223,19 @@ err_destroy_vlan_table:
|
||||||
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
|
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
mlx5e_del_vlan_rules(priv);
|
mlx5e_del_vlan_rules(priv);
|
||||||
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
|
mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
|
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
|
if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
|
||||||
return;
|
return;
|
||||||
mlx5_destroy_ttc_table(priv->fs.inner_ttc);
|
mlx5_destroy_ttc_table(priv->fs->inner_ttc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
|
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
mlx5_destroy_ttc_table(priv->fs.ttc);
|
mlx5_destroy_ttc_table(priv->fs->ttc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
|
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
|
||||||
|
@ -1246,10 +1246,10 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mlx5e_set_inner_ttc_params(priv, &ttc_params);
|
mlx5e_set_inner_ttc_params(priv, &ttc_params);
|
||||||
priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
|
priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
|
||||||
&ttc_params);
|
&ttc_params);
|
||||||
if (IS_ERR(priv->fs.inner_ttc))
|
if (IS_ERR(priv->fs->inner_ttc))
|
||||||
return PTR_ERR(priv->fs.inner_ttc);
|
return PTR_ERR(priv->fs->inner_ttc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1258,9 +1258,9 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
|
||||||
struct ttc_params ttc_params = {};
|
struct ttc_params ttc_params = {};
|
||||||
|
|
||||||
mlx5e_set_ttc_params(priv, &ttc_params, true);
|
mlx5e_set_ttc_params(priv, &ttc_params, true);
|
||||||
priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
|
priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
|
||||||
if (IS_ERR(priv->fs.ttc))
|
if (IS_ERR(priv->fs->ttc))
|
||||||
return PTR_ERR(priv->fs.ttc);
|
return PTR_ERR(priv->fs->ttc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1268,10 +1268,10 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
|
priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
|
||||||
MLX5_FLOW_NAMESPACE_KERNEL);
|
MLX5_FLOW_NAMESPACE_KERNEL);
|
||||||
|
|
||||||
if (!priv->fs.ns)
|
if (!priv->fs->ns)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
err = mlx5e_arfs_create_tables(priv);
|
err = mlx5e_arfs_create_tables(priv);
|
||||||
|
@ -1369,31 +1369,39 @@ static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
|
||||||
mlx5e_tc_table_free(fs->tc);
|
mlx5e_tc_table_free(fs->tc);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_fs_init(struct mlx5e_priv *priv)
|
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile)
|
||||||
{
|
{
|
||||||
|
struct mlx5e_flow_steering *fs;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (mlx5e_profile_feature_cap(priv->profile, FS_VLAN)) {
|
fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
|
||||||
err = mlx5e_fs_vlan_alloc(&priv->fs);
|
if (!fs)
|
||||||
if (err)
|
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
|
||||||
|
err = mlx5e_fs_vlan_alloc(fs);
|
||||||
|
if (err)
|
||||||
|
goto err_free_fs;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mlx5e_profile_feature_cap(priv->profile, FS_TC)) {
|
if (mlx5e_profile_feature_cap(profile, FS_TC)) {
|
||||||
err = mlx5e_fs_tc_alloc(&priv->fs);
|
err = mlx5e_fs_tc_alloc(fs);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_vlan;
|
goto err_free_vlan;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return fs;
|
||||||
|
err_free_fs:
|
||||||
|
kvfree(fs);
|
||||||
err_free_vlan:
|
err_free_vlan:
|
||||||
mlx5e_fs_vlan_free(&priv->fs);
|
mlx5e_fs_vlan_free(fs);
|
||||||
err:
|
err:
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
|
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
|
||||||
{
|
{
|
||||||
mlx5e_fs_tc_free(&priv->fs);
|
mlx5e_fs_tc_free(fs);
|
||||||
mlx5e_fs_vlan_free(&priv->fs);
|
mlx5e_fs_vlan_free(fs);
|
||||||
|
kvfree(fs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
|
||||||
case UDP_V6_FLOW:
|
case UDP_V6_FLOW:
|
||||||
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
|
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
|
||||||
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
|
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
|
||||||
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
|
eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
|
||||||
break;
|
break;
|
||||||
case IP_USER_FLOW:
|
case IP_USER_FLOW:
|
||||||
case IPV6_USER_FLOW:
|
case IPV6_USER_FLOW:
|
||||||
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
|
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
|
||||||
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
|
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
|
||||||
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
|
eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
|
||||||
break;
|
break;
|
||||||
case ETHER_FLOW:
|
case ETHER_FLOW:
|
||||||
max_tuples = ETHTOOL_NUM_L2_FTS;
|
max_tuples = ETHTOOL_NUM_L2_FTS;
|
||||||
prio = max_tuples - num_tuples;
|
prio = max_tuples - num_tuples;
|
||||||
eth_ft = &priv->fs.ethtool.l2_ft[prio];
|
eth_ft = &priv->fs->ethtool.l2_ft[prio];
|
||||||
prio += MLX5E_ETHTOOL_L2_PRIO;
|
prio += MLX5E_ETHTOOL_L2_PRIO;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_ethtool_rule *rule)
|
struct mlx5e_ethtool_rule *rule)
|
||||||
{
|
{
|
||||||
struct mlx5e_ethtool_rule *iter;
|
struct mlx5e_ethtool_rule *iter;
|
||||||
struct list_head *head = &priv->fs.ethtool.rules;
|
struct list_head *head = &priv->fs->ethtool.rules;
|
||||||
|
|
||||||
list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
|
list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
|
||||||
if (iter->flow_spec.location > rule->flow_spec.location)
|
if (iter->flow_spec.location > rule->flow_spec.location)
|
||||||
break;
|
break;
|
||||||
head = &iter->list;
|
head = &iter->list;
|
||||||
}
|
}
|
||||||
priv->fs.ethtool.tot_num_rules++;
|
priv->fs->ethtool.tot_num_rules++;
|
||||||
list_add(&rule->list, head);
|
list_add(&rule->list, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
|
||||||
if (eth_rule->rss)
|
if (eth_rule->rss)
|
||||||
mlx5e_rss_refcnt_dec(eth_rule->rss);
|
mlx5e_rss_refcnt_dec(eth_rule->rss);
|
||||||
list_del(ð_rule->list);
|
list_del(ð_rule->list);
|
||||||
priv->fs.ethtool.tot_num_rules--;
|
priv->fs->ethtool.tot_num_rules--;
|
||||||
put_flow_table(eth_rule->eth_ft);
|
put_flow_table(eth_rule->eth_ft);
|
||||||
kfree(eth_rule);
|
kfree(eth_rule);
|
||||||
}
|
}
|
||||||
|
@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
|
||||||
{
|
{
|
||||||
struct mlx5e_ethtool_rule *iter;
|
struct mlx5e_ethtool_rule *iter;
|
||||||
|
|
||||||
list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
|
list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
|
||||||
if (iter->flow_spec.location == location)
|
if (iter->flow_spec.location == location)
|
||||||
return iter;
|
return iter;
|
||||||
}
|
}
|
||||||
|
@ -788,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
|
||||||
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
|
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
|
list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
if (eth_rule->flow_spec.location != location)
|
if (eth_rule->flow_spec.location != location)
|
||||||
|
@ -831,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
|
||||||
struct mlx5e_ethtool_rule *iter;
|
struct mlx5e_ethtool_rule *iter;
|
||||||
struct mlx5e_ethtool_rule *temp;
|
struct mlx5e_ethtool_rule *temp;
|
||||||
|
|
||||||
list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
|
list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
|
||||||
del_ethtool_rule(priv, iter);
|
del_ethtool_rule(priv, iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
|
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
|
INIT_LIST_HEAD(&priv->fs->ethtool.rules);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flow_type_to_traffic_type(u32 flow_type)
|
static int flow_type_to_traffic_type(u32 flow_type)
|
||||||
|
@ -963,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
switch (info->cmd) {
|
switch (info->cmd) {
|
||||||
case ETHTOOL_GRXCLSRLCNT:
|
case ETHTOOL_GRXCLSRLCNT:
|
||||||
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
|
info->rule_cnt = priv->fs->ethtool.tot_num_rules;
|
||||||
break;
|
break;
|
||||||
case ETHTOOL_GRXCLSRULE:
|
case ETHTOOL_GRXCLSRULE:
|
||||||
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
|
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
|
||||||
|
|
|
@ -3888,8 +3888,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
||||||
|
|
||||||
mutex_lock(&priv->state_lock);
|
mutex_lock(&priv->state_lock);
|
||||||
params = &priv->channels.params;
|
params = &priv->channels.params;
|
||||||
if (!priv->fs.vlan ||
|
if (!priv->fs->vlan ||
|
||||||
!bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
|
!bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
|
||||||
/* HW strips the outer C-tag header, this is a problem
|
/* HW strips the outer C-tag header, this is a problem
|
||||||
* for S-tag traffic.
|
* for S-tag traffic.
|
||||||
*/
|
*/
|
||||||
|
@ -5012,6 +5012,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||||
struct net_device *netdev)
|
struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||||
|
struct mlx5e_flow_steering *fs;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
|
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
|
||||||
|
@ -5019,11 +5020,13 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||||
|
|
||||||
mlx5e_timestamp_init(priv);
|
mlx5e_timestamp_init(priv);
|
||||||
|
|
||||||
err = mlx5e_fs_init(priv);
|
fs = mlx5e_fs_init(priv->profile);
|
||||||
if (err) {
|
if (!fs) {
|
||||||
|
err = -ENOMEM;
|
||||||
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
|
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
priv->fs = fs;
|
||||||
|
|
||||||
err = mlx5e_ipsec_init(priv);
|
err = mlx5e_ipsec_init(priv);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -5042,7 +5045,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
|
||||||
mlx5e_health_destroy_reporters(priv);
|
mlx5e_health_destroy_reporters(priv);
|
||||||
mlx5e_ktls_cleanup(priv);
|
mlx5e_ktls_cleanup(priv);
|
||||||
mlx5e_ipsec_cleanup(priv);
|
mlx5e_ipsec_cleanup(priv);
|
||||||
mlx5e_fs_cleanup(priv);
|
mlx5e_fs_cleanup(priv->fs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
|
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
|
||||||
|
|
|
@ -718,6 +718,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
|
||||||
|
|
||||||
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
|
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
|
mlx5e_fs_cleanup(priv->fs);
|
||||||
mlx5e_ipsec_cleanup(priv);
|
mlx5e_ipsec_cleanup(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -728,7 +729,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
|
||||||
struct ttc_params ttc_params = {};
|
struct ttc_params ttc_params = {};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
|
priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
|
||||||
MLX5_FLOW_NAMESPACE_KERNEL);
|
MLX5_FLOW_NAMESPACE_KERNEL);
|
||||||
|
|
||||||
/* The inner_ttc in the ttc params is intentionally not set */
|
/* The inner_ttc in the ttc params is intentionally not set */
|
||||||
|
@ -738,9 +739,9 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
|
||||||
/* To give uplik rep TTC a lower level for chaining from root ft */
|
/* To give uplik rep TTC a lower level for chaining from root ft */
|
||||||
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
|
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
|
||||||
|
|
||||||
priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
|
priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
|
||||||
if (IS_ERR(priv->fs.ttc)) {
|
if (IS_ERR(priv->fs->ttc)) {
|
||||||
err = PTR_ERR(priv->fs.ttc);
|
err = PTR_ERR(priv->fs->ttc);
|
||||||
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
|
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
|
||||||
err);
|
err);
|
||||||
return err;
|
return err;
|
||||||
|
@ -760,7 +761,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
|
||||||
/* non uplik reps will skip any bypass tables and go directly to
|
/* non uplik reps will skip any bypass tables and go directly to
|
||||||
* their own ttc
|
* their own ttc
|
||||||
*/
|
*/
|
||||||
rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
|
rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -835,9 +836,17 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
priv->rx_res = mlx5e_rx_res_alloc();
|
priv->fs = mlx5e_fs_init(priv->profile);
|
||||||
if (!priv->rx_res)
|
if (!priv->fs) {
|
||||||
|
netdev_err(priv->netdev, "FS allocation failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
priv->rx_res = mlx5e_rx_res_alloc();
|
||||||
|
if (!priv->rx_res) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto err_free_fs;
|
||||||
|
}
|
||||||
|
|
||||||
mlx5e_init_l2_addr(priv);
|
mlx5e_init_l2_addr(priv);
|
||||||
|
|
||||||
|
@ -873,13 +882,15 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||||
err_destroy_root_ft:
|
err_destroy_root_ft:
|
||||||
mlx5e_destroy_rep_root_ft(priv);
|
mlx5e_destroy_rep_root_ft(priv);
|
||||||
err_destroy_ttc_table:
|
err_destroy_ttc_table:
|
||||||
mlx5_destroy_ttc_table(priv->fs.ttc);
|
mlx5_destroy_ttc_table(priv->fs->ttc);
|
||||||
err_destroy_rx_res:
|
err_destroy_rx_res:
|
||||||
mlx5e_rx_res_destroy(priv->rx_res);
|
mlx5e_rx_res_destroy(priv->rx_res);
|
||||||
err_close_drop_rq:
|
err_close_drop_rq:
|
||||||
mlx5e_close_drop_rq(&priv->drop_rq);
|
mlx5e_close_drop_rq(&priv->drop_rq);
|
||||||
mlx5e_rx_res_free(priv->rx_res);
|
mlx5e_rx_res_free(priv->rx_res);
|
||||||
priv->rx_res = NULL;
|
priv->rx_res = NULL;
|
||||||
|
err_free_fs:
|
||||||
|
mlx5e_fs_cleanup(priv->fs);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -888,7 +899,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
|
||||||
mlx5e_ethtool_cleanup_steering(priv);
|
mlx5e_ethtool_cleanup_steering(priv);
|
||||||
rep_vport_rx_rule_destroy(priv);
|
rep_vport_rx_rule_destroy(priv);
|
||||||
mlx5e_destroy_rep_root_ft(priv);
|
mlx5e_destroy_rep_root_ft(priv);
|
||||||
mlx5_destroy_ttc_table(priv->fs.ttc);
|
mlx5_destroy_ttc_table(priv->fs->ttc);
|
||||||
mlx5e_rx_res_destroy(priv->rx_res);
|
mlx5e_rx_res_destroy(priv->rx_res);
|
||||||
mlx5e_close_drop_rq(&priv->drop_rq);
|
mlx5e_close_drop_rq(&priv->drop_rq);
|
||||||
mlx5e_rx_res_free(priv->rx_res);
|
mlx5e_rx_res_free(priv->rx_res);
|
||||||
|
|
|
@ -322,7 +322,7 @@ get_ct_priv(struct mlx5e_priv *priv)
|
||||||
return uplink_priv->ct_priv;
|
return uplink_priv->ct_priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
return priv->fs.tc->ct;
|
return priv->fs->tc->ct;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mlx5e_tc_psample *
|
static struct mlx5e_tc_psample *
|
||||||
|
@ -356,7 +356,7 @@ get_post_action(struct mlx5e_priv *priv)
|
||||||
return uplink_priv->post_act;
|
return uplink_priv->post_act;
|
||||||
}
|
}
|
||||||
|
|
||||||
return priv->fs.tc->post_act;
|
return priv->fs->tc->post_act;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mlx5_flow_handle *
|
struct mlx5_flow_handle *
|
||||||
|
@ -611,7 +611,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
|
||||||
|
|
||||||
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
|
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
|
||||||
&esw->offloads.mod_hdr :
|
&esw->offloads.mod_hdr :
|
||||||
&priv->fs.tc->mod_hdr;
|
&priv->fs->tc->mod_hdr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
|
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
|
||||||
|
@ -829,7 +829,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
|
||||||
|
|
||||||
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
|
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
|
||||||
hp->num_channels,
|
hp->num_channels,
|
||||||
mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
|
mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -919,7 +919,7 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_hairpin_entry *hpe;
|
struct mlx5e_hairpin_entry *hpe;
|
||||||
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
|
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
|
||||||
|
|
||||||
hash_for_each_possible(priv->fs.tc->hairpin_tbl, hpe,
|
hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
|
||||||
hairpin_hlist, hash_key) {
|
hairpin_hlist, hash_key) {
|
||||||
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
|
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
|
||||||
refcount_inc(&hpe->refcnt);
|
refcount_inc(&hpe->refcnt);
|
||||||
|
@ -934,10 +934,10 @@ static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_hairpin_entry *hpe)
|
struct mlx5e_hairpin_entry *hpe)
|
||||||
{
|
{
|
||||||
/* no more hairpin flows for us, release the hairpin pair */
|
/* no more hairpin flows for us, release the hairpin pair */
|
||||||
if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc->hairpin_tbl_lock))
|
if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
|
||||||
return;
|
return;
|
||||||
hash_del(&hpe->hairpin_hlist);
|
hash_del(&hpe->hairpin_hlist);
|
||||||
mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
|
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
|
||||||
|
|
||||||
if (!IS_ERR_OR_NULL(hpe->hp)) {
|
if (!IS_ERR_OR_NULL(hpe->hp)) {
|
||||||
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
|
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
|
||||||
|
@ -1021,10 +1021,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
mutex_lock(&priv->fs.tc->hairpin_tbl_lock);
|
mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
|
||||||
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
|
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
|
||||||
if (hpe) {
|
if (hpe) {
|
||||||
mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
|
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
|
||||||
wait_for_completion(&hpe->res_ready);
|
wait_for_completion(&hpe->res_ready);
|
||||||
|
|
||||||
if (IS_ERR(hpe->hp)) {
|
if (IS_ERR(hpe->hp)) {
|
||||||
|
@ -1036,7 +1036,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
|
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
|
||||||
if (!hpe) {
|
if (!hpe) {
|
||||||
mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
|
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1048,9 +1048,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
||||||
refcount_set(&hpe->refcnt, 1);
|
refcount_set(&hpe->refcnt, 1);
|
||||||
init_completion(&hpe->res_ready);
|
init_completion(&hpe->res_ready);
|
||||||
|
|
||||||
hash_add(priv->fs.tc->hairpin_tbl, &hpe->hairpin_hlist,
|
hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
|
||||||
hash_hairpin_info(peer_id, match_prio));
|
hash_hairpin_info(peer_id, match_prio));
|
||||||
mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
|
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
|
||||||
|
|
||||||
params.log_data_size = 16;
|
params.log_data_size = 16;
|
||||||
params.log_data_size = min_t(u8, params.log_data_size,
|
params.log_data_size = min_t(u8, params.log_data_size,
|
||||||
|
@ -1127,7 +1127,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
|
||||||
{
|
{
|
||||||
struct mlx5_flow_context *flow_context = &spec->flow_context;
|
struct mlx5_flow_context *flow_context = &spec->flow_context;
|
||||||
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
|
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
|
||||||
struct mlx5e_tc_table *tc = priv->fs.tc;
|
struct mlx5e_tc_table *tc = priv->fs->tc;
|
||||||
struct mlx5_flow_destination dest[2] = {};
|
struct mlx5_flow_destination dest[2] = {};
|
||||||
struct mlx5_fs_chains *nic_chains;
|
struct mlx5_fs_chains *nic_chains;
|
||||||
struct mlx5_flow_act flow_act = {
|
struct mlx5_flow_act flow_act = {
|
||||||
|
@ -1163,7 +1163,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
|
||||||
if (IS_ERR(dest[dest_ix].ft))
|
if (IS_ERR(dest[dest_ix].ft))
|
||||||
return ERR_CAST(dest[dest_ix].ft);
|
return ERR_CAST(dest[dest_ix].ft);
|
||||||
} else {
|
} else {
|
||||||
dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
|
dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
|
||||||
}
|
}
|
||||||
dest_ix++;
|
dest_ix++;
|
||||||
}
|
}
|
||||||
|
@ -1191,7 +1191,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
|
||||||
mutex_unlock(&tc->t_lock);
|
mutex_unlock(&tc->t_lock);
|
||||||
netdev_err(priv->netdev,
|
netdev_err(priv->netdev,
|
||||||
"Failed to create tc offload table\n");
|
"Failed to create tc offload table\n");
|
||||||
rule = ERR_CAST(priv->fs.tc->t);
|
rule = ERR_CAST(priv->fs->tc->t);
|
||||||
goto err_ft_get;
|
goto err_ft_get;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1293,7 +1293,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
|
||||||
struct mlx5_flow_handle *rule,
|
struct mlx5_flow_handle *rule,
|
||||||
struct mlx5_flow_attr *attr)
|
struct mlx5_flow_attr *attr)
|
||||||
{
|
{
|
||||||
struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs.tc);
|
struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
|
||||||
|
|
||||||
mlx5_del_flow_rules(rule);
|
mlx5_del_flow_rules(rule);
|
||||||
|
|
||||||
|
@ -1310,7 +1310,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_tc_flow *flow)
|
struct mlx5e_tc_flow *flow)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_attr *attr = flow->attr;
|
struct mlx5_flow_attr *attr = flow->attr;
|
||||||
struct mlx5e_tc_table *tc = priv->fs.tc;
|
struct mlx5e_tc_table *tc = priv->fs->tc;
|
||||||
|
|
||||||
flow_flag_clear(flow, OFFLOADED);
|
flow_flag_clear(flow, OFFLOADED);
|
||||||
|
|
||||||
|
@ -1322,13 +1322,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
||||||
/* Remove root table if no rules are left to avoid
|
/* Remove root table if no rules are left to avoid
|
||||||
* extra steering hops.
|
* extra steering hops.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&priv->fs.tc->t_lock);
|
mutex_lock(&priv->fs->tc->t_lock);
|
||||||
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
|
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
|
||||||
!IS_ERR_OR_NULL(tc->t)) {
|
!IS_ERR_OR_NULL(tc->t)) {
|
||||||
mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
|
mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
|
||||||
priv->fs.tc->t = NULL;
|
priv->fs->tc->t = NULL;
|
||||||
}
|
}
|
||||||
mutex_unlock(&priv->fs.tc->t_lock);
|
mutex_unlock(&priv->fs->tc->t_lock);
|
||||||
|
|
||||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||||
mlx5e_detach_mod_hdr(priv, flow);
|
mlx5e_detach_mod_hdr(priv, flow);
|
||||||
|
@ -4064,7 +4064,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
|
||||||
rpriv = priv->ppriv;
|
rpriv = priv->ppriv;
|
||||||
return &rpriv->tc_ht;
|
return &rpriv->tc_ht;
|
||||||
} else /* NIC offload */
|
} else /* NIC offload */
|
||||||
return &priv->fs.tc->ht;
|
return &priv->fs->tc->ht;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
|
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
|
||||||
|
@ -4783,11 +4783,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
|
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
|
||||||
|
|
||||||
mutex_lock(&priv->fs.tc->hairpin_tbl_lock);
|
mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
|
||||||
hash_for_each(priv->fs.tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
|
hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
|
||||||
if (refcount_inc_not_zero(&hpe->refcnt))
|
if (refcount_inc_not_zero(&hpe->refcnt))
|
||||||
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
|
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
|
||||||
mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
|
mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
|
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
|
||||||
wait_for_completion(&hpe->res_ready);
|
wait_for_completion(&hpe->res_ready);
|
||||||
|
@ -4841,7 +4841,7 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
|
||||||
|
|
||||||
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
|
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5_flow_table **ft = &priv->fs.tc->miss_t;
|
struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
|
||||||
struct mlx5_flow_table_attr ft_attr = {};
|
struct mlx5_flow_table_attr ft_attr = {};
|
||||||
struct mlx5_flow_namespace *ns;
|
struct mlx5_flow_namespace *ns;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
@ -4863,12 +4863,12 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
|
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
mlx5_destroy_flow_table(priv->fs.tc->miss_t);
|
mlx5_destroy_flow_table(priv->fs->tc->miss_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_tc_table *tc = priv->fs.tc;
|
struct mlx5e_tc_table *tc = priv->fs->tc;
|
||||||
struct mlx5_core_dev *dev = priv->mdev;
|
struct mlx5_core_dev *dev = priv->mdev;
|
||||||
struct mapping_ctx *chains_mapping;
|
struct mapping_ctx *chains_mapping;
|
||||||
struct mlx5_chains_attr attr = {};
|
struct mlx5_chains_attr attr = {};
|
||||||
|
@ -4909,7 +4909,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||||
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
|
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
|
||||||
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
|
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
|
||||||
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
|
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
|
||||||
attr.default_ft = priv->fs.tc->miss_t;
|
attr.default_ft = priv->fs->tc->miss_t;
|
||||||
attr.mapping = chains_mapping;
|
attr.mapping = chains_mapping;
|
||||||
|
|
||||||
tc->chains = mlx5_chains_create(dev, &attr);
|
tc->chains = mlx5_chains_create(dev, &attr);
|
||||||
|
@ -4958,7 +4958,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
|
||||||
|
|
||||||
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
|
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_tc_table *tc = priv->fs.tc;
|
struct mlx5e_tc_table *tc = priv->fs->tc;
|
||||||
|
|
||||||
if (tc->netdevice_nb.notifier_call)
|
if (tc->netdevice_nb.notifier_call)
|
||||||
unregister_netdevice_notifier_dev_net(priv->netdev,
|
unregister_netdevice_notifier_dev_net(priv->netdev,
|
||||||
|
@ -5163,7 +5163,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
|
||||||
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
|
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
|
||||||
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
|
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
|
||||||
struct mlx5e_priv *priv = netdev_priv(skb->dev);
|
struct mlx5e_priv *priv = netdev_priv(skb->dev);
|
||||||
struct mlx5e_tc_table *tc = priv->fs.tc;
|
struct mlx5e_tc_table *tc = priv->fs->tc;
|
||||||
struct mlx5_mapped_obj mapped_obj;
|
struct mlx5_mapped_obj mapped_obj;
|
||||||
struct tc_skb_ext *tc_skb_ext;
|
struct tc_skb_ext *tc_skb_ext;
|
||||||
int err;
|
int err;
|
||||||
|
|
|
@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
|
priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
|
||||||
MLX5_FLOW_NAMESPACE_KERNEL);
|
MLX5_FLOW_NAMESPACE_KERNEL);
|
||||||
|
|
||||||
if (!priv->fs.ns)
|
if (!priv->fs->ns)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
err = mlx5e_arfs_create_tables(priv);
|
err = mlx5e_arfs_create_tables(priv);
|
||||||
|
@ -364,9 +364,17 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
priv->rx_res = mlx5e_rx_res_alloc();
|
priv->fs = mlx5e_fs_init(priv->profile);
|
||||||
if (!priv->rx_res)
|
if (!priv->fs) {
|
||||||
|
netdev_err(priv->netdev, "FS allocation failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
priv->rx_res = mlx5e_rx_res_alloc();
|
||||||
|
if (!priv->rx_res) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto err_free_fs;
|
||||||
|
}
|
||||||
|
|
||||||
mlx5e_create_q_counters(priv);
|
mlx5e_create_q_counters(priv);
|
||||||
|
|
||||||
|
@ -397,6 +405,8 @@ err_destroy_q_counters:
|
||||||
mlx5e_destroy_q_counters(priv);
|
mlx5e_destroy_q_counters(priv);
|
||||||
mlx5e_rx_res_free(priv->rx_res);
|
mlx5e_rx_res_free(priv->rx_res);
|
||||||
priv->rx_res = NULL;
|
priv->rx_res = NULL;
|
||||||
|
err_free_fs:
|
||||||
|
mlx5e_fs_cleanup(priv->fs);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -408,6 +418,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
|
||||||
mlx5e_destroy_q_counters(priv);
|
mlx5e_destroy_q_counters(priv);
|
||||||
mlx5e_rx_res_free(priv->rx_res);
|
mlx5e_rx_res_free(priv->rx_res);
|
||||||
priv->rx_res = NULL;
|
priv->rx_res = NULL;
|
||||||
|
mlx5e_fs_cleanup(priv->fs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The stats groups order is opposite to the update_stats() order calls */
|
/* The stats groups order is opposite to the update_stats() order calls */
|
||||||
|
|
Loading…
Add table
Reference in a new issue