1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

net/mlx5: DR, handle more than one peer domain

Currently, DR domain is using the assumption that each domain can only
have a single peer.
In order to support VF LAG of more then two ports, expand peer domain
to use an array of peers, and align the code accordingly.

Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Shay Drory 2023-02-21 10:17:06 +02:00 committed by Saeed Mahameed
parent 014e4d48ea
commit 6d5b7321d8
12 changed files with 42 additions and 30 deletions

View file

@ -2778,7 +2778,9 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw, struct mlx5_eswitch *peer_esw,
bool pair) bool pair)
{ {
u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
struct mlx5_flow_root_namespace *peer_ns; struct mlx5_flow_root_namespace *peer_ns;
u8 idx = mlx5_get_dev_index(esw->dev);
struct mlx5_flow_root_namespace *ns; struct mlx5_flow_root_namespace *ns;
int err; int err;
@ -2786,18 +2788,18 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
ns = esw->dev->priv.steering->fdb_root_ns; ns = esw->dev->priv.steering->fdb_root_ns;
if (pair) { if (pair) {
err = mlx5_flow_namespace_set_peer(ns, peer_ns); err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
if (err) if (err)
return err; return err;
err = mlx5_flow_namespace_set_peer(peer_ns, ns); err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
if (err) { if (err) {
mlx5_flow_namespace_set_peer(ns, NULL); mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
return err; return err;
} }
} else { } else {
mlx5_flow_namespace_set_peer(ns, NULL); mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
mlx5_flow_namespace_set_peer(peer_ns, NULL); mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
} }
return 0; return 0;

View file

@ -139,7 +139,8 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace
} }
static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns) struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
{ {
return 0; return 0;
} }

View file

@ -93,7 +93,8 @@ struct mlx5_flow_cmds {
struct mlx5_modify_hdr *modify_hdr); struct mlx5_modify_hdr *modify_hdr);
int (*set_peer)(struct mlx5_flow_root_namespace *ns, int (*set_peer)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns); struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
int (*create_ns)(struct mlx5_flow_root_namespace *ns); int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns); int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);

View file

@ -3620,7 +3620,8 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
} }
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns) struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
{ {
if (peer_ns && ns->mode != peer_ns->mode) { if (peer_ns && ns->mode != peer_ns->mode) {
mlx5_core_err(ns->dev, mlx5_core_err(ns->dev,
@ -3628,7 +3629,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
return -EINVAL; return -EINVAL;
} }
return ns->cmds->set_peer(ns, peer_ns); return ns->cmds->set_peer(ns, peer_ns, peer_idx);
} }
/* This function should be called only at init stage of the namespace. /* This function should be called only at init stage of the namespace.

View file

@ -295,7 +295,8 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns); struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode); enum mlx5_flow_steering_mode mode);

View file

@ -2071,8 +2071,9 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action; struct mlx5dr_action *action;
u8 peer_vport; u8 peer_vport;
peer_vport = vhca_id_valid && (vhca_id != dmn->info.caps.gvmi); peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
vport_dmn = peer_vport ? dmn->peer_dmn : dmn; (vhca_id != dmn->info.caps.gvmi);
vport_dmn = peer_vport ? dmn->peer_dmn[vhca_id] : dmn;
if (!vport_dmn) { if (!vport_dmn) {
mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n"); mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
return NULL; return NULL;

View file

@ -555,17 +555,18 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
} }
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn) struct mlx5dr_domain *peer_dmn,
u8 peer_idx)
{ {
mlx5dr_domain_lock(dmn); mlx5dr_domain_lock(dmn);
if (dmn->peer_dmn) if (dmn->peer_dmn[peer_idx])
refcount_dec(&dmn->peer_dmn->refcount); refcount_dec(&dmn->peer_dmn[peer_idx]->refcount);
dmn->peer_dmn = peer_dmn; dmn->peer_dmn[peer_idx] = peer_dmn;
if (dmn->peer_dmn) if (dmn->peer_dmn[peer_idx])
refcount_inc(&dmn->peer_dmn->refcount); refcount_inc(&dmn->peer_dmn[peer_idx]->refcount);
mlx5dr_domain_unlock(dmn); mlx5dr_domain_unlock(dmn);
} }

View file

@ -1647,6 +1647,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
u8 *tag) u8 *tag)
{ {
struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_match_misc *misc = &value->misc;
int id = misc->source_eswitch_owner_vhca_id;
struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn; struct mlx5dr_domain *vport_dmn;
@ -1657,11 +1658,11 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) { if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */ /* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) if (id == dmn->info.caps.gvmi)
vport_dmn = dmn; vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
dmn->peer_dmn->info.caps.gvmi)) (id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn; vport_dmn = dmn->peer_dmn[id];
else else
return -EINVAL; return -EINVAL;

View file

@ -1979,6 +1979,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
u8 *tag) u8 *tag)
{ {
struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_match_misc *misc = &value->misc;
int id = misc->source_eswitch_owner_vhca_id;
struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn; struct mlx5dr_domain *vport_dmn;
@ -1988,11 +1989,11 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) { if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */ /* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) if (id == dmn->info.caps.gvmi)
vport_dmn = dmn; vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
dmn->peer_dmn->info.caps.gvmi)) (id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn; vport_dmn = dmn->peer_dmn[id];
else else
return -EINVAL; return -EINVAL;

View file

@ -935,7 +935,7 @@ struct mlx5dr_domain_info {
}; };
struct mlx5dr_domain { struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn; struct mlx5dr_domain *peer_dmn[MLX5_MAX_PORTS];
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
u32 pdn; u32 pdn;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;

View file

@ -770,14 +770,15 @@ restore_fte:
} }
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns) struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
{ {
struct mlx5dr_domain *peer_domain = NULL; struct mlx5dr_domain *peer_domain = NULL;
if (peer_ns) if (peer_ns)
peer_domain = peer_ns->fs_dr_domain.dr_domain; peer_domain = peer_ns->fs_dr_domain.dr_domain;
mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain, mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
peer_domain); peer_domain, peer_idx);
return 0; return 0;
} }

View file

@ -48,7 +48,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags); int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn); struct mlx5dr_domain *peer_dmn,
u8 peer_idx);
struct mlx5dr_table * struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags, mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,