mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Tariq Toukan says:
====================
mlx5-next updates 2025-11-13
The following pull-request contains common mlx5 updates
* 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
net/mlx5: Expose definition for 1600Gbps link mode
net/mlx5: fs, set non default device per namespace
net/mlx5: fs, Add other_eswitch support for steering tables
net/mlx5: Add OTHER_ESWITCH HW capabilities
net/mlx5: Add direct ST mode support for RDMA
PCI/TPH: Expose pcie_tph_get_st_table_loc()
{rdma,net}/mlx5: Query vports mac address from device
====================
Link: https://patch.msgid.link/1763027252-1168760-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -842,7 +842,7 @@ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
|
||||
break;
|
||||
|
||||
case MLX5_VPORT_ACCESS_METHOD_NIC:
|
||||
err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
|
||||
err = mlx5_query_nic_vport_node_guid(dev->mdev, 0, false, &tmp);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
@@ -875,13 +875,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
|
||||
vport_num, 1,
|
||||
vport->info.link_state);
|
||||
|
||||
/* Host PF has its own mac/guid. */
|
||||
if (vport_num) {
|
||||
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
|
||||
vport->info.mac);
|
||||
mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
|
||||
vport->info.node_guid);
|
||||
}
|
||||
mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true,
|
||||
vport->info.mac);
|
||||
mlx5_query_nic_vport_node_guid(esw->dev, vport_num, true,
|
||||
&vport->info.node_guid);
|
||||
|
||||
flags = (vport->info.vlan || vport->info.qos) ?
|
||||
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
|
||||
@@ -947,12 +944,6 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
goto err_vhca_mapping;
|
||||
}
|
||||
|
||||
/* External controller host PF has factory programmed MAC.
|
||||
* Read it from the device.
|
||||
*/
|
||||
if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
|
||||
mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
|
||||
|
||||
esw_vport_change_handle_locked(vport);
|
||||
|
||||
esw->enabled_vports++;
|
||||
@@ -2235,6 +2226,9 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
|
||||
ivi->vf = vport - 1;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
|
||||
mlx5_query_nic_vport_mac_address(esw->dev, vport, true,
|
||||
evport->info.mac);
|
||||
ether_addr_copy(ivi->mac, evport->info.mac);
|
||||
ivi->linkstate = evport->info.link_state;
|
||||
ivi->vlan = evport->info.vlan;
|
||||
|
||||
@@ -4491,6 +4491,9 @@ int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
|
||||
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
|
||||
mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true,
|
||||
vport->info.mac);
|
||||
ether_addr_copy(hw_addr, vport->info.mac);
|
||||
*hw_addr_len = ETH_ALEN;
|
||||
mutex_unlock(&esw->state_lock);
|
||||
|
||||
@@ -239,6 +239,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(set_flow_table_root_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id,
|
||||
ft->esw_owner_vhca_id);
|
||||
MLX5_SET(set_flow_table_root_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
|
||||
err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
|
||||
if (!err &&
|
||||
@@ -302,6 +306,10 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(create_flow_table_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id,
|
||||
ft->esw_owner_vhca_id);
|
||||
MLX5_SET(create_flow_table_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
|
||||
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
|
||||
en_decap);
|
||||
@@ -360,6 +368,10 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(destroy_flow_table_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id,
|
||||
ft->esw_owner_vhca_id);
|
||||
MLX5_SET(destroy_flow_table_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
|
||||
err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
|
||||
if (!err)
|
||||
@@ -394,6 +406,10 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(modify_flow_table_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id,
|
||||
ft->esw_owner_vhca_id);
|
||||
MLX5_SET(modify_flow_table_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
MLX5_SET(modify_flow_table_in, in, modify_field_select,
|
||||
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
|
||||
if (next_ft) {
|
||||
@@ -429,6 +445,10 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(create_flow_group_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id,
|
||||
ft->esw_owner_vhca_id);
|
||||
MLX5_SET(create_flow_group_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
|
||||
if (!err)
|
||||
fg->id = MLX5_GET(create_flow_group_out, out,
|
||||
@@ -451,6 +471,10 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(destroy_flow_group_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id,
|
||||
ft->esw_owner_vhca_id);
|
||||
MLX5_SET(destroy_flow_group_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
|
||||
}
|
||||
|
||||
@@ -559,6 +583,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(set_fte_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id);
|
||||
MLX5_SET(set_fte_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
|
||||
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
|
||||
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
|
||||
@@ -788,6 +815,10 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(delete_fte_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id,
|
||||
ft->esw_owner_vhca_id);
|
||||
MLX5_SET(delete_fte_in, in, other_eswitch,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
|
||||
|
||||
return mlx5_cmd_exec_in(dev, delete_fte, in);
|
||||
}
|
||||
|
||||
@@ -939,10 +939,10 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
|
||||
return fg;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
|
||||
enum fs_flow_table_type table_type,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
u32 flags)
|
||||
static struct mlx5_flow_table *
|
||||
alloc_flow_table(struct mlx5_flow_table_attr *ft_attr, u16 vport,
|
||||
enum fs_flow_table_type table_type,
|
||||
enum fs_flow_table_op_mod op_mod)
|
||||
{
|
||||
struct mlx5_flow_table *ft;
|
||||
int ret;
|
||||
@@ -957,12 +957,13 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ft->level = level;
|
||||
ft->level = ft_attr->level;
|
||||
ft->node.type = FS_TYPE_FLOW_TABLE;
|
||||
ft->op_mod = op_mod;
|
||||
ft->type = table_type;
|
||||
ft->vport = vport;
|
||||
ft->flags = flags;
|
||||
ft->esw_owner_vhca_id = ft_attr->esw_owner_vhca_id;
|
||||
ft->flags = ft_attr->flags;
|
||||
INIT_LIST_HEAD(&ft->fwd_rules);
|
||||
mutex_init(&ft->lock);
|
||||
|
||||
@@ -1370,10 +1371,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
||||
/* The level is related to the
|
||||
* priority level range.
|
||||
*/
|
||||
ft = alloc_flow_table(ft_attr->level,
|
||||
vport,
|
||||
root->table_type,
|
||||
op_mod, ft_attr->flags);
|
||||
ft = alloc_flow_table(ft_attr, vport, root->table_type, op_mod);
|
||||
if (IS_ERR(ft)) {
|
||||
err = PTR_ERR(ft);
|
||||
goto unlock_root;
|
||||
@@ -3310,6 +3308,62 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool mlx5_fs_ns_is_empty(struct mlx5_flow_namespace *ns)
|
||||
{
|
||||
struct fs_prio *iter_prio;
|
||||
|
||||
fs_for_each_prio(iter_prio, ns) {
|
||||
if (iter_prio->num_ft)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_dev *new_dev,
|
||||
enum fs_flow_table_type table_type)
|
||||
{
|
||||
struct mlx5_flow_root_namespace **root;
|
||||
int total_vports;
|
||||
int i;
|
||||
|
||||
switch (table_type) {
|
||||
case FS_FT_RDMA_TRANSPORT_TX:
|
||||
root = dev->priv.steering->rdma_transport_tx_root_ns;
|
||||
total_vports = dev->priv.steering->rdma_transport_tx_vports;
|
||||
break;
|
||||
case FS_FT_RDMA_TRANSPORT_RX:
|
||||
root = dev->priv.steering->rdma_transport_rx_root_ns;
|
||||
total_vports = dev->priv.steering->rdma_transport_rx_vports;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(true);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < total_vports; i++) {
|
||||
mutex_lock(&root[i]->chain_lock);
|
||||
if (!mlx5_fs_ns_is_empty(&root[i]->ns)) {
|
||||
mutex_unlock(&root[i]->chain_lock);
|
||||
goto err;
|
||||
}
|
||||
root[i]->dev = new_dev;
|
||||
mutex_unlock(&root[i]->chain_lock);
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
while (i--) {
|
||||
mutex_lock(&root[i]->chain_lock);
|
||||
root[i]->dev = dev;
|
||||
mutex_unlock(&root[i]->chain_lock);
|
||||
}
|
||||
/* If you hit this error try destroying all flow tables and try again */
|
||||
mlx5_core_err(dev, "Failed to set root device for RDMA TRANSPORT\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_fs_set_root_dev);
|
||||
|
||||
static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct mlx5_core_dev *dev = steering->dev;
|
||||
|
||||
@@ -103,24 +103,6 @@ enum fs_node_type {
|
||||
FS_TYPE_FLOW_DEST
|
||||
};
|
||||
|
||||
enum fs_flow_table_type {
|
||||
FS_FT_NIC_RX = 0x0,
|
||||
FS_FT_NIC_TX = 0x1,
|
||||
FS_FT_ESW_EGRESS_ACL = 0x2,
|
||||
FS_FT_ESW_INGRESS_ACL = 0x3,
|
||||
FS_FT_FDB = 0X4,
|
||||
FS_FT_SNIFFER_RX = 0X5,
|
||||
FS_FT_SNIFFER_TX = 0X6,
|
||||
FS_FT_RDMA_RX = 0X7,
|
||||
FS_FT_RDMA_TX = 0X8,
|
||||
FS_FT_PORT_SEL = 0X9,
|
||||
FS_FT_FDB_RX = 0xa,
|
||||
FS_FT_FDB_TX = 0xb,
|
||||
FS_FT_RDMA_TRANSPORT_RX = 0xd,
|
||||
FS_FT_RDMA_TRANSPORT_TX = 0xe,
|
||||
FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
|
||||
};
|
||||
|
||||
enum fs_flow_table_op_mod {
|
||||
FS_FT_OP_MOD_NORMAL,
|
||||
FS_FT_OP_MOD_LAG_DEMUX,
|
||||
@@ -205,6 +187,7 @@ struct mlx5_flow_table {
|
||||
};
|
||||
u32 id;
|
||||
u16 vport;
|
||||
u16 esw_owner_vhca_id;
|
||||
unsigned int max_fte;
|
||||
unsigned int level;
|
||||
enum fs_flow_table_type type;
|
||||
|
||||
@@ -19,13 +19,16 @@ struct mlx5_st {
|
||||
struct mutex lock;
|
||||
struct xa_limit index_limit;
|
||||
struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
|
||||
u8 direct_mode : 1;
|
||||
};
|
||||
|
||||
struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct mlx5_st *st;
|
||||
u8 direct_mode = 0;
|
||||
u16 num_entries;
|
||||
u32 tbl_loc;
|
||||
int ret;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
|
||||
@@ -40,10 +43,16 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
|
||||
if (!pdev->tph_cap)
|
||||
return NULL;
|
||||
|
||||
num_entries = pcie_tph_get_st_table_size(pdev);
|
||||
/* We need a reserved entry for non TPH cases */
|
||||
if (num_entries < 2)
|
||||
return NULL;
|
||||
tbl_loc = pcie_tph_get_st_table_loc(pdev);
|
||||
if (tbl_loc == PCI_TPH_LOC_NONE)
|
||||
direct_mode = 1;
|
||||
|
||||
if (!direct_mode) {
|
||||
num_entries = pcie_tph_get_st_table_size(pdev);
|
||||
/* We need a reserved entry for non TPH cases */
|
||||
if (num_entries < 2)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* The OS doesn't support ST */
|
||||
ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
|
||||
@@ -56,6 +65,10 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
|
||||
|
||||
mutex_init(&st->lock);
|
||||
xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
|
||||
st->direct_mode = direct_mode;
|
||||
if (st->direct_mode)
|
||||
return st;
|
||||
|
||||
/* entry 0 is reserved for non TPH cases */
|
||||
st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
|
||||
st->index_limit.max = num_entries - 1;
|
||||
@@ -96,6 +109,11 @@ int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (st->direct_mode) {
|
||||
*st_index = tag;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&st->lock);
|
||||
|
||||
xa_for_each(&st->idx_xa, index, idx_data) {
|
||||
@@ -145,6 +163,9 @@ int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
|
||||
if (!st)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (st->direct_mode)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&st->lock);
|
||||
idx_data = xa_load(&st->idx_xa, st_index);
|
||||
if (WARN_ON_ONCE(!idx_data)) {
|
||||
|
||||
@@ -78,15 +78,14 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
||||
}
|
||||
|
||||
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
|
||||
u32 *out)
|
||||
bool other_vport, u32 *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
|
||||
|
||||
MLX5_SET(query_nic_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
|
||||
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
|
||||
if (vport)
|
||||
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(query_nic_vport_context_in, in, other_vport, other_vport);
|
||||
|
||||
return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
|
||||
}
|
||||
@@ -97,7 +96,7 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
||||
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
|
||||
int err;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, vport, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
|
||||
if (!err)
|
||||
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
|
||||
nic_vport_context.min_wqe_inline_mode);
|
||||
@@ -219,7 +218,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, false, out);
|
||||
if (!err)
|
||||
*mtu = MLX5_GET(query_nic_vport_context_out, out,
|
||||
nic_vport_context.mtu);
|
||||
@@ -429,7 +428,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, false, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -451,7 +450,7 @@ int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, false, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -462,7 +461,8 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
u16 vport, bool other_vport, u64 *node_guid)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
@@ -472,7 +472,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, vport, other_vport, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -529,7 +529,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, false, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -804,7 +804,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, vport, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -908,7 +908,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, false, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -155,7 +155,16 @@ static u8 get_st_modes(struct pci_dev *pdev)
|
||||
return reg;
|
||||
}
|
||||
|
||||
static u32 get_st_table_loc(struct pci_dev *pdev)
|
||||
/**
|
||||
* pcie_tph_get_st_table_loc - Return the device's ST table location
|
||||
* @pdev: PCI device to query
|
||||
*
|
||||
* Return:
|
||||
* PCI_TPH_LOC_NONE - Not present
|
||||
* PCI_TPH_LOC_CAP - Located in the TPH Requester Extended Capability
|
||||
* PCI_TPH_LOC_MSIX - Located in the MSI-X Table
|
||||
*/
|
||||
u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
@@ -163,6 +172,7 @@ static u32 get_st_table_loc(struct pci_dev *pdev)
|
||||
|
||||
return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg);
|
||||
}
|
||||
EXPORT_SYMBOL(pcie_tph_get_st_table_loc);
|
||||
|
||||
/*
|
||||
* Return the size of ST table. If ST table is not in TPH Requester Extended
|
||||
@@ -174,7 +184,7 @@ u16 pcie_tph_get_st_table_size(struct pci_dev *pdev)
|
||||
u32 loc;
|
||||
|
||||
/* Check ST table location first */
|
||||
loc = get_st_table_loc(pdev);
|
||||
loc = pcie_tph_get_st_table_loc(pdev);
|
||||
|
||||
/* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */
|
||||
loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
|
||||
@@ -299,7 +309,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
|
||||
*/
|
||||
set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE);
|
||||
|
||||
loc = get_st_table_loc(pdev);
|
||||
loc = pcie_tph_get_st_table_loc(pdev);
|
||||
/* Convert loc to match with PCI_TPH_LOC_* */
|
||||
loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
|
||||
|
||||
|
||||
@@ -71,6 +71,7 @@ enum {
|
||||
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
|
||||
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
|
||||
MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
|
||||
MLX5_FLOW_TABLE_OTHER_ESWITCH = BIT(6),
|
||||
};
|
||||
|
||||
#define LEFTOVERS_RULE_NUM 2
|
||||
@@ -128,6 +129,24 @@ enum {
|
||||
FDB_PER_VPORT,
|
||||
};
|
||||
|
||||
enum fs_flow_table_type {
|
||||
FS_FT_NIC_RX = 0x0,
|
||||
FS_FT_NIC_TX = 0x1,
|
||||
FS_FT_ESW_EGRESS_ACL = 0x2,
|
||||
FS_FT_ESW_INGRESS_ACL = 0x3,
|
||||
FS_FT_FDB = 0X4,
|
||||
FS_FT_SNIFFER_RX = 0X5,
|
||||
FS_FT_SNIFFER_TX = 0X6,
|
||||
FS_FT_RDMA_RX = 0X7,
|
||||
FS_FT_RDMA_TX = 0X8,
|
||||
FS_FT_PORT_SEL = 0X9,
|
||||
FS_FT_FDB_RX = 0xa,
|
||||
FS_FT_FDB_TX = 0xb,
|
||||
FS_FT_RDMA_TRANSPORT_RX = 0xd,
|
||||
FS_FT_RDMA_TRANSPORT_TX = 0xe,
|
||||
FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
|
||||
};
|
||||
|
||||
struct mlx5_pkt_reformat;
|
||||
struct mlx5_modify_hdr;
|
||||
struct mlx5_flow_definer;
|
||||
@@ -209,6 +228,7 @@ struct mlx5_flow_table_attr {
|
||||
u32 flags;
|
||||
u16 uid;
|
||||
u16 vport;
|
||||
u16 esw_owner_vhca_id;
|
||||
struct mlx5_flow_table *next_ft;
|
||||
|
||||
struct {
|
||||
@@ -354,4 +374,8 @@ u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
|
||||
|
||||
struct mlx5_flow_root_namespace *
|
||||
mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
|
||||
|
||||
int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_dev *new_dev,
|
||||
enum fs_flow_table_type table_type);
|
||||
#endif
|
||||
|
||||
@@ -5251,13 +5251,15 @@ struct mlx5_ifc_set_fte_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x18];
|
||||
u8 reserved_at_88[0x8];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x8];
|
||||
u8 table_id[0x18];
|
||||
@@ -8809,13 +8811,15 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x18];
|
||||
u8 reserved_at_88[0x8];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x8];
|
||||
u8 table_id[0x18];
|
||||
@@ -8840,13 +8844,15 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x18];
|
||||
u8 reserved_at_88[0x8];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x8];
|
||||
u8 table_id[0x18];
|
||||
@@ -8985,13 +8991,15 @@ struct mlx5_ifc_delete_fte_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x18];
|
||||
u8 reserved_at_88[0x8];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x8];
|
||||
u8 table_id[0x18];
|
||||
@@ -9535,13 +9543,15 @@ struct mlx5_ifc_create_flow_table_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x18];
|
||||
u8 reserved_at_88[0x8];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x20];
|
||||
|
||||
@@ -9580,7 +9590,8 @@ struct mlx5_ifc_create_flow_group_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
@@ -9588,7 +9599,7 @@ struct mlx5_ifc_create_flow_group_in_bits {
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x4];
|
||||
u8 group_type[0x4];
|
||||
u8 reserved_at_90[0x10];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x8];
|
||||
u8 table_id[0x18];
|
||||
@@ -11878,10 +11889,12 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
u8 reserved_at_60[0x10];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x7];
|
||||
@@ -11921,14 +11934,16 @@ struct mlx5_ifc_modify_flow_table_in_bits {
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 other_vport[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 other_eswitch[0x1];
|
||||
u8 reserved_at_42[0xe];
|
||||
u8 vport_number[0x10];
|
||||
|
||||
u8 reserved_at_60[0x10];
|
||||
u8 modify_field_select[0x10];
|
||||
|
||||
u8 table_type[0x8];
|
||||
u8 reserved_at_88[0x18];
|
||||
u8 reserved_at_88[0x8];
|
||||
u8 eswitch_owner_vhca_id[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x8];
|
||||
u8 table_id[0x18];
|
||||
|
||||
@@ -112,6 +112,7 @@ enum mlx5e_ext_link_mode {
|
||||
MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17,
|
||||
MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
|
||||
MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20,
|
||||
MLX5E_1600TAUI_8_1600TBASE_CR8_KR8 = 23,
|
||||
MLX5E_EXT_LINK_MODES_NUMBER,
|
||||
};
|
||||
|
||||
|
||||
@@ -73,7 +73,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
|
||||
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid);
|
||||
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
u16 vport, bool other_vport, u64 *node_guid);
|
||||
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u64 node_guid);
|
||||
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
|
||||
|
||||
@@ -29,6 +29,7 @@ int pcie_tph_get_cpu_st(struct pci_dev *dev,
|
||||
void pcie_disable_tph(struct pci_dev *pdev);
|
||||
int pcie_enable_tph(struct pci_dev *pdev, int mode);
|
||||
u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
|
||||
u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev);
|
||||
#else
|
||||
static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
|
||||
unsigned int index, u16 tag)
|
||||
|
||||
Reference in New Issue
Block a user