mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
{net/RDMA}/mlx5: introduce lag_for_each_peer
Introduce a generic APIs to iterate over all the devices which are part of the LAG. This API replace mlx5_lag_get_peer_mdev() which retrieve only a single peer device from the lag. Signed-off-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Mark Bloch <mbloch@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
committed by
Saeed Mahameed
parent
962825e534
commit
222dd18583
@@ -30,45 +30,65 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
|
|||||||
|
|
||||||
static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
|
static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
|
||||||
|
|
||||||
|
static void mlx5_ib_num_ports_update(struct mlx5_core_dev *dev, u32 *num_ports)
|
||||||
|
{
|
||||||
|
struct mlx5_core_dev *peer_dev;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
|
||||||
|
u32 peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
|
||||||
|
|
||||||
|
if (mlx5_lag_is_mpesw(peer_dev))
|
||||||
|
*num_ports += peer_num_ports;
|
||||||
|
else
|
||||||
|
/* Only 1 ib port is the representor for all uplinks */
|
||||||
|
*num_ports += peer_num_ports - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||||
{
|
{
|
||||||
u32 num_ports = mlx5_eswitch_get_total_vports(dev);
|
u32 num_ports = mlx5_eswitch_get_total_vports(dev);
|
||||||
|
struct mlx5_core_dev *lag_master = dev;
|
||||||
const struct mlx5_ib_profile *profile;
|
const struct mlx5_ib_profile *profile;
|
||||||
struct mlx5_core_dev *peer_dev;
|
struct mlx5_core_dev *peer_dev;
|
||||||
struct mlx5_ib_dev *ibdev;
|
struct mlx5_ib_dev *ibdev;
|
||||||
int second_uplink = false;
|
int new_uplink = false;
|
||||||
u32 peer_num_ports;
|
|
||||||
int vport_index;
|
int vport_index;
|
||||||
int ret;
|
int ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
vport_index = rep->vport_index;
|
vport_index = rep->vport_index;
|
||||||
|
|
||||||
if (mlx5_lag_is_shared_fdb(dev)) {
|
if (mlx5_lag_is_shared_fdb(dev)) {
|
||||||
peer_dev = mlx5_lag_get_peer_mdev(dev);
|
|
||||||
peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
|
|
||||||
if (mlx5_lag_is_master(dev)) {
|
if (mlx5_lag_is_master(dev)) {
|
||||||
if (mlx5_lag_is_mpesw(dev))
|
mlx5_ib_num_ports_update(dev, &num_ports);
|
||||||
num_ports += peer_num_ports;
|
|
||||||
else
|
|
||||||
num_ports += peer_num_ports - 1;
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (rep->vport == MLX5_VPORT_UPLINK) {
|
if (rep->vport == MLX5_VPORT_UPLINK) {
|
||||||
if (!mlx5_lag_is_mpesw(dev))
|
if (!mlx5_lag_is_mpesw(dev))
|
||||||
return 0;
|
return 0;
|
||||||
second_uplink = true;
|
new_uplink = true;
|
||||||
}
|
}
|
||||||
|
mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
|
||||||
|
u32 peer_n_ports = mlx5_eswitch_get_total_vports(peer_dev);
|
||||||
|
|
||||||
vport_index += peer_num_ports;
|
if (mlx5_lag_is_master(peer_dev))
|
||||||
dev = peer_dev;
|
lag_master = peer_dev;
|
||||||
|
else if (!mlx5_lag_is_mpesw(dev))
|
||||||
|
/* Only 1 ib port is the representor for all uplinks */
|
||||||
|
peer_n_ports--;
|
||||||
|
|
||||||
|
if (mlx5_get_dev_index(peer_dev) < mlx5_get_dev_index(dev))
|
||||||
|
vport_index += peer_n_ports;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rep->vport == MLX5_VPORT_UPLINK && !second_uplink)
|
if (rep->vport == MLX5_VPORT_UPLINK && !new_uplink)
|
||||||
profile = &raw_eth_profile;
|
profile = &raw_eth_profile;
|
||||||
else
|
else
|
||||||
return mlx5_ib_set_vport_rep(dev, rep, vport_index);
|
return mlx5_ib_set_vport_rep(lag_master, rep, vport_index);
|
||||||
|
|
||||||
ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
|
ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
|
||||||
if (!ibdev)
|
if (!ibdev)
|
||||||
@@ -85,8 +105,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||||||
vport_index = rep->vport_index;
|
vport_index = rep->vport_index;
|
||||||
ibdev->port[vport_index].rep = rep;
|
ibdev->port[vport_index].rep = rep;
|
||||||
ibdev->port[vport_index].roce.netdev =
|
ibdev->port[vport_index].roce.netdev =
|
||||||
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
|
mlx5_ib_get_rep_netdev(lag_master->priv.eswitch, rep->vport);
|
||||||
ibdev->mdev = dev;
|
ibdev->mdev = lag_master;
|
||||||
ibdev->num_ports = num_ports;
|
ibdev->num_ports = num_ports;
|
||||||
|
|
||||||
ret = __mlx5_ib_add(ibdev, profile);
|
ret = __mlx5_ib_add(ibdev, profile);
|
||||||
@@ -94,8 +114,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||||||
goto fail_add;
|
goto fail_add;
|
||||||
|
|
||||||
rep->rep_data[REP_IB].priv = ibdev;
|
rep->rep_data[REP_IB].priv = ibdev;
|
||||||
if (mlx5_lag_is_shared_fdb(dev))
|
if (mlx5_lag_is_shared_fdb(lag_master))
|
||||||
mlx5_ib_register_peer_vport_reps(dev);
|
mlx5_ib_register_peer_vport_reps(lag_master);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -118,23 +138,27 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
|||||||
struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
|
struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
|
||||||
int vport_index = rep->vport_index;
|
int vport_index = rep->vport_index;
|
||||||
struct mlx5_ib_port *port;
|
struct mlx5_ib_port *port;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (WARN_ON(!mdev))
|
if (WARN_ON(!mdev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (mlx5_lag_is_shared_fdb(mdev) &&
|
|
||||||
!mlx5_lag_is_master(mdev)) {
|
|
||||||
struct mlx5_core_dev *peer_mdev;
|
|
||||||
|
|
||||||
if (rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(mdev))
|
|
||||||
return;
|
|
||||||
peer_mdev = mlx5_lag_get_peer_mdev(mdev);
|
|
||||||
vport_index += mlx5_eswitch_get_total_vports(peer_mdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (mlx5_lag_is_shared_fdb(mdev) &&
|
||||||
|
!mlx5_lag_is_master(mdev)) {
|
||||||
|
if (rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(mdev))
|
||||||
|
return;
|
||||||
|
for (i = 0; i < dev->num_ports; i++) {
|
||||||
|
if (dev->port[i].rep == rep)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (WARN_ON(i == dev->num_ports))
|
||||||
|
return;
|
||||||
|
vport_index = i;
|
||||||
|
}
|
||||||
|
|
||||||
port = &dev->port[vport_index];
|
port = &dev->port[vport_index];
|
||||||
write_lock(&port->roce.netdev_lock);
|
write_lock(&port->roce.netdev_lock);
|
||||||
port->roce.netdev = NULL;
|
port->roce.netdev = NULL;
|
||||||
@@ -143,16 +167,18 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
|||||||
port->rep = NULL;
|
port->rep = NULL;
|
||||||
|
|
||||||
if (rep->vport == MLX5_VPORT_UPLINK) {
|
if (rep->vport == MLX5_VPORT_UPLINK) {
|
||||||
struct mlx5_core_dev *peer_mdev;
|
|
||||||
struct mlx5_eswitch *esw;
|
|
||||||
|
|
||||||
if (mlx5_lag_is_shared_fdb(mdev) && !mlx5_lag_is_master(mdev))
|
if (mlx5_lag_is_shared_fdb(mdev) && !mlx5_lag_is_master(mdev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (mlx5_lag_is_shared_fdb(mdev)) {
|
if (mlx5_lag_is_shared_fdb(mdev)) {
|
||||||
peer_mdev = mlx5_lag_get_peer_mdev(mdev);
|
struct mlx5_core_dev *peer_mdev;
|
||||||
esw = peer_mdev->priv.eswitch;
|
struct mlx5_eswitch *esw;
|
||||||
mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
|
|
||||||
|
mlx5_lag_for_each_peer_mdev(mdev, peer_mdev, i) {
|
||||||
|
esw = peer_mdev->priv.eswitch;
|
||||||
|
mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
||||||
}
|
}
|
||||||
@@ -166,14 +192,14 @@ static const struct mlx5_eswitch_rep_ops rep_ops = {
|
|||||||
|
|
||||||
static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
|
static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *peer_mdev = mlx5_lag_get_peer_mdev(mdev);
|
struct mlx5_core_dev *peer_mdev;
|
||||||
struct mlx5_eswitch *esw;
|
struct mlx5_eswitch *esw;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (!peer_mdev)
|
mlx5_lag_for_each_peer_mdev(mdev, peer_mdev, i) {
|
||||||
return;
|
esw = peer_mdev->priv.eswitch;
|
||||||
|
mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
|
||||||
esw = peer_mdev->priv.eswitch;
|
}
|
||||||
mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
||||||
|
|||||||
@@ -244,16 +244,22 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
|
|||||||
ft->type == FS_FT_FDB &&
|
ft->type == FS_FT_FDB &&
|
||||||
mlx5_lag_is_shared_fdb(dev) &&
|
mlx5_lag_is_shared_fdb(dev) &&
|
||||||
mlx5_lag_is_master(dev)) {
|
mlx5_lag_is_master(dev)) {
|
||||||
err = mlx5_cmd_set_slave_root_fdb(dev,
|
struct mlx5_core_dev *peer_dev;
|
||||||
mlx5_lag_get_peer_mdev(dev),
|
int i;
|
||||||
!disconnect, (!disconnect) ?
|
|
||||||
ft->id : 0);
|
mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
|
||||||
if (err && !disconnect) {
|
err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect,
|
||||||
MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
|
(!disconnect) ? ft->id : 0);
|
||||||
MLX5_SET(set_flow_table_root_in, in, table_id,
|
if (err && !disconnect) {
|
||||||
ns->root_ft->id);
|
MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
|
||||||
mlx5_cmd_exec_in(dev, set_flow_table_root, in);
|
MLX5_SET(set_flow_table_root_in, in, table_id,
|
||||||
|
ns->root_ft->id);
|
||||||
|
mlx5_cmd_exec_in(dev, set_flow_table_root, in);
|
||||||
|
}
|
||||||
|
if (err)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -1519,26 +1519,37 @@ u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_lag_get_num_ports);
|
EXPORT_SYMBOL(mlx5_lag_get_num_ports);
|
||||||
|
|
||||||
struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
|
struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *peer_dev = NULL;
|
struct mlx5_core_dev *peer_dev = NULL;
|
||||||
struct mlx5_lag *ldev;
|
struct mlx5_lag *ldev;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int idx;
|
||||||
|
|
||||||
spin_lock_irqsave(&lag_lock, flags);
|
spin_lock_irqsave(&lag_lock, flags);
|
||||||
ldev = mlx5_lag_dev(dev);
|
ldev = mlx5_lag_dev(dev);
|
||||||
if (!ldev)
|
if (!ldev)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ?
|
if (*i == ldev->ports)
|
||||||
ldev->pf[MLX5_LAG_P2].dev :
|
goto unlock;
|
||||||
ldev->pf[MLX5_LAG_P1].dev;
|
for (idx = *i; idx < ldev->ports; idx++)
|
||||||
|
if (ldev->pf[idx].dev != dev)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (idx == ldev->ports) {
|
||||||
|
*i = idx;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
*i = idx + 1;
|
||||||
|
|
||||||
|
peer_dev = ldev->pf[idx].dev;
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irqrestore(&lag_lock, flags);
|
spin_unlock_irqrestore(&lag_lock, flags);
|
||||||
return peer_dev;
|
return peer_dev;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
|
EXPORT_SYMBOL(mlx5_lag_get_next_peer_mdev);
|
||||||
|
|
||||||
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
|
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
|
||||||
u64 *values,
|
u64 *values,
|
||||||
|
|||||||
@@ -1174,7 +1174,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
|
|||||||
u64 *values,
|
u64 *values,
|
||||||
int num_counters,
|
int num_counters,
|
||||||
size_t *offsets);
|
size_t *offsets);
|
||||||
struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
|
struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i);
|
||||||
|
|
||||||
|
#define mlx5_lag_for_each_peer_mdev(dev, peer, i) \
|
||||||
|
for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \
|
||||||
|
peer; \
|
||||||
|
peer = mlx5_lag_get_next_peer_mdev(dev, &i))
|
||||||
|
|
||||||
u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
|
u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
|
||||||
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
|
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
|
||||||
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
|
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
|
||||||
|
|||||||
Reference in New Issue
Block a user