Merge branch 'net-mlx5-move-notifiers-outside-the-devlink-lock'

Tariq Toukan says:

====================
net/mlx5: Move notifiers outside the devlink lock

This series by Cosmin moves blocking notifier registration in the mlx5
driver outside the devlink lock during probe.

This is mostly a no-op refactoring that consists of multiple pieces.
It is necessary because upcoming code will introduce a potential locking
cycle between the devlink lock and the blocking notifier head mutexes,
so these notifiers must move out of the devlink-locked critical section.
====================

Link: https://patch.msgid.link/1763325940-1231508-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-11-19 20:32:29 -08:00
11 changed files with 253 additions and 151 deletions

View File

@@ -1474,7 +1474,7 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
info.new_mode = mode;
blocking_notifier_call_chain(&esw->n_head, 0, &info);
blocking_notifier_call_chain(&esw->dev->priv.esw_n_head, 0, &info);
}
static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
@@ -2050,7 +2050,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
@@ -2379,14 +2378,16 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
struct notifier_block *nb)
{
return blocking_notifier_chain_register(&esw->n_head, nb);
return blocking_notifier_chain_register(&dev->priv.esw_n_head, nb);
}
void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&esw->n_head, nb);
blocking_notifier_chain_unregister(&dev->priv.esw_n_head, nb);
}
/**

View File

@@ -403,7 +403,6 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
@@ -864,8 +863,10 @@ struct mlx5_esw_event_info {
u16 new_mode;
};
int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
struct notifier_block *n);
void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
struct notifier_block *n);
bool mlx5_esw_hold(struct mlx5_core_dev *dev);
void mlx5_esw_release(struct mlx5_core_dev *dev);

View File

@@ -1010,16 +1010,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_irq_cleanup;
}
err = mlx5_events_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize events\n");
goto err_eq_cleanup;
}
err = mlx5_fw_reset_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize fw reset events\n");
goto err_events_cleanup;
goto err_eq_cleanup;
}
mlx5_cq_debugfs_init(dev);
@@ -1121,8 +1115,6 @@ err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
err_events_cleanup:
mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
err_irq_cleanup:
@@ -1155,7 +1147,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
@@ -1386,12 +1377,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_vhca_event_start(dev);
err = mlx5_sf_hw_table_create(dev);
if (err) {
mlx5_core_err(dev, "sf table create failed %d\n", err);
goto err_vhca;
}
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1420,8 +1405,6 @@ err_sriov:
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
err_ec:
mlx5_sf_hw_table_destroy(dev);
err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
mlx5_fs_core_cleanup(dev);
@@ -1447,12 +1430,12 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_vhca_event_stop(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
@@ -1833,6 +1816,50 @@ static int vhca_id_show(struct seq_file *file, void *priv)
DEFINE_SHOW_ATTRIBUTE(vhca_id);
static int mlx5_notifiers_init(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_events_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize events\n");
return err;
}
BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.esw_n_head);
mlx5_vhca_state_notifier_init(dev);
err = mlx5_sf_hw_notifier_init(dev);
if (err)
goto err_sf_hw_notifier;
err = mlx5_sf_notifiers_init(dev);
if (err)
goto err_sf_notifiers;
err = mlx5_sf_dev_notifier_init(dev);
if (err)
goto err_sf_dev_notifier;
return 0;
err_sf_dev_notifier:
mlx5_sf_notifiers_cleanup(dev);
err_sf_notifiers:
mlx5_sf_hw_notifier_cleanup(dev);
err_sf_hw_notifier:
mlx5_events_cleanup(dev);
return err;
}
static void mlx5_notifiers_cleanup(struct mlx5_core_dev *dev)
{
mlx5_sf_dev_notifier_cleanup(dev);
mlx5_sf_notifiers_cleanup(dev);
mlx5_sf_hw_notifier_cleanup(dev);
mlx5_events_cleanup(dev);
}
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1888,6 +1915,10 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
err = mlx5_notifiers_init(dev);
if (err)
goto err_hca_caps;
/* The conjunction of sw_vhca_id with sw_owner_id will be a global
* unique id per function which uses mlx5_core.
* Those values are supplied to FW as part of the init HCA command to
@@ -1930,6 +1961,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
if (priv->sw_vhca_id > 0)
ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
mlx5_notifiers_cleanup(dev);
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);

View File

@@ -16,7 +16,6 @@ struct mlx5_sf_dev_table {
struct xarray devices;
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
@@ -156,18 +155,23 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
static int
mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
{
struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
priv.sf_dev_nb);
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev;
u16 max_functions;
u16 sf_index;
u16 base_id;
max_functions = mlx5_sf_max_functions(table->dev);
if (!table)
return 0;
max_functions = mlx5_sf_max_functions(dev);
if (!max_functions)
return 0;
base_id = mlx5_sf_start_function_id(table->dev);
base_id = mlx5_sf_start_function_id(dev);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
@@ -177,19 +181,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
mlx5_sf_dev_del(dev, sf_dev, sf_index);
break;
case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
mlx5_sf_dev_del(dev, sf_dev, sf_index);
else
mlx5_core_err(table->dev,
mlx5_core_err(dev,
"SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id);
break;
case MLX5_VHCA_STATE_ACTIVE:
if (!sf_dev)
mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
mlx5_sf_dev_add(dev, sf_index, event->function_id,
event->sw_function_id);
break;
default:
@@ -315,6 +319,15 @@ static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
}
}
int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
{
if (mlx5_core_is_sf(dev))
return 0;
dev->priv.sf_dev_nb.notifier_call = mlx5_sf_dev_state_change_handler;
return mlx5_vhca_event_notifier_register(dev, &dev->priv.sf_dev_nb);
}
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@@ -329,17 +342,12 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
goto table_err;
}
table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
xa_init(&table->devices);
dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err)
goto vhca_err;
err = mlx5_sf_dev_create_active_works(table);
if (err)
goto add_active_err;
@@ -351,10 +359,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
arm_err:
mlx5_sf_dev_destroy_active_works(table);
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mlx5_vhca_event_work_queues_flush(dev);
vhca_err:
add_active_err:
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
@@ -372,6 +378,14 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
}
}
void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
{
if (mlx5_core_is_sf(dev))
return;
mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_dev_nb);
}
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
@@ -380,8 +394,6 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
return;
mlx5_sf_dev_destroy_active_works(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.

View File

@@ -25,7 +25,9 @@ struct mlx5_sf_peer_devlink_event_ctx {
int err;
};
int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_driver_register(void);
@@ -35,10 +37,19 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
#else
static inline int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
{
return 0;
}
static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
}
static inline void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
{
}
static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
}

View File

@@ -31,9 +31,6 @@ struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
struct notifier_block esw_nb;
struct notifier_block vhca_nb;
struct notifier_block mdev_nb;
};
static struct mlx5_sf *
@@ -391,11 +388,16 @@ static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
priv.sf_table_vhca_nb);
struct mlx5_sf_table *table = dev->priv.sf_table;
const struct mlx5_vhca_state_event *event = data;
bool update = false;
struct mlx5_sf *sf;
if (!table)
return 0;
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
@@ -407,7 +409,7 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
if (update)
sf->hw_state = event->new_vhca_state;
trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
trace_mlx5_sf_update_state(dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
unlock:
mutex_unlock(&table->sf_state_lock);
@@ -425,12 +427,16 @@ static void mlx5_sf_del_all(struct mlx5_sf_table *table)
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
priv.sf_table_esw_nb);
const struct mlx5_esw_event_info *mode = data;
if (!dev->priv.sf_table)
return 0;
switch (mode->new_mode) {
case MLX5_ESWITCH_LEGACY:
mlx5_sf_del_all(table);
mlx5_sf_del_all(dev->priv.sf_table);
break;
default:
break;
@@ -441,15 +447,16 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
priv.sf_table_mdev_nb);
struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
struct mlx5_sf_table *table = dev->priv.sf_table;
int ret = NOTIFY_DONE;
struct mlx5_sf *sf;
if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
return NOTIFY_DONE;
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
if (!sf)
@@ -464,10 +471,40 @@ out:
return ret;
}
int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
{
int err;
if (mlx5_core_is_sf(dev))
return 0;
dev->priv.sf_table_esw_nb.notifier_call = mlx5_sf_esw_event;
err = mlx5_esw_event_notifier_register(dev, &dev->priv.sf_table_esw_nb);
if (err)
return err;
dev->priv.sf_table_vhca_nb.notifier_call = mlx5_sf_vhca_event;
err = mlx5_vhca_event_notifier_register(dev,
&dev->priv.sf_table_vhca_nb);
if (err)
goto vhca_err;
dev->priv.sf_table_mdev_nb.notifier_call = mlx5_sf_mdev_event;
err = mlx5_blocking_notifier_register(dev, &dev->priv.sf_table_mdev_nb);
if (err)
goto mdev_err;
return 0;
mdev_err:
mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
vhca_err:
mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
return err;
}
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_table *table;
int err;
if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
return 0;
@@ -480,28 +517,18 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
table->dev = dev;
xa_init(&table->function_ids);
dev->priv.sf_table = table;
table->esw_nb.notifier_call = mlx5_sf_esw_event;
err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
if (err)
goto reg_err;
table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
if (err)
goto vhca_err;
table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
mlx5_blocking_notifier_register(dev, &table->mdev_nb);
return 0;
}
vhca_err:
mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
reg_err:
mutex_destroy(&table->sf_state_lock);
kfree(table);
dev->priv.sf_table = NULL;
return err;
void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
{
if (mlx5_core_is_sf(dev))
return;
mlx5_blocking_notifier_unregister(dev, &dev->priv.sf_table_mdev_nb);
mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
}
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
@@ -511,9 +538,6 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
mutex_destroy(&table->sf_state_lock);
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);

View File

@@ -30,9 +30,7 @@ enum mlx5_sf_hwc_index {
};
struct mlx5_sf_hw_table {
struct mlx5_core_dev *dev;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
struct notifier_block vhca_nb;
struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
};
@@ -71,14 +69,16 @@ mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
return NULL;
}
static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
static int mlx5_sf_hw_table_id_alloc(struct mlx5_core_dev *dev,
struct mlx5_sf_hw_table *table,
u32 controller,
u32 usr_sfnum)
{
struct mlx5_sf_hwc_table *hwc;
int free_idx = -1;
int i;
hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
hwc = mlx5_sf_controller_to_hwc(dev, controller);
if (!hwc->sfs)
return -ENOSPC;
@@ -100,11 +100,13 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control
return free_idx;
}
static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
static void mlx5_sf_hw_table_id_free(struct mlx5_core_dev *dev,
struct mlx5_sf_hw_table *table,
u32 controller, int id)
{
struct mlx5_sf_hwc_table *hwc;
hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
hwc = mlx5_sf_controller_to_hwc(dev, controller);
hwc->sfs[id].allocated = false;
hwc->sfs[id].pending_delete = false;
}
@@ -120,7 +122,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
return -EOPNOTSUPP;
mutex_lock(&table->table_lock);
sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
sw_id = mlx5_sf_hw_table_id_alloc(dev, table, controller, usr_sfnum);
if (sw_id < 0) {
err = sw_id;
goto exist_err;
@@ -151,7 +153,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
vhca_err:
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err:
mlx5_sf_hw_table_id_free(table, controller, sw_id);
mlx5_sf_hw_table_id_free(dev, table, controller, sw_id);
exist_err:
mutex_unlock(&table->table_lock);
return err;
@@ -165,7 +167,7 @@ void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
mutex_lock(&table->table_lock);
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
mlx5_sf_hw_table_id_free(table, controller, id);
mlx5_sf_hw_table_id_free(dev, table, controller, id);
mutex_unlock(&table->table_lock);
}
@@ -216,10 +218,12 @@ static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
}
}
static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
static void mlx5_sf_hw_table_dealloc_all(struct mlx5_core_dev *dev,
struct mlx5_sf_hw_table *table)
{
mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
mlx5_sf_hw_table_hwc_dealloc_all(dev,
&table->hwc[MLX5_SF_HWC_EXTERNAL]);
mlx5_sf_hw_table_hwc_dealloc_all(dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
}
static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
@@ -301,7 +305,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
}
mutex_init(&table->table_lock);
table->dev = dev;
dev->priv.sf_hw_table = table;
base_id = mlx5_sf_start_function_id(dev);
@@ -338,19 +341,22 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
mutex_destroy(&table->table_lock);
kfree(table);
dev->priv.sf_hw_table = NULL;
res_unregister:
mlx5_sf_hw_table_res_unregister(dev);
}
static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
priv.sf_hw_table_vhca_nb);
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw;
u16 sw_id;
if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
if (!table || event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0;
hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
@@ -365,20 +371,28 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
* Hence recycle the sf hardware id for reuse.
*/
if (sf_hw->allocated && sf_hw->pending_delete)
mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
mlx5_sf_hw_table_hwc_sf_free(dev, hwc, sw_id);
mutex_unlock(&table->table_lock);
return 0;
}
int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
if (!table)
if (mlx5_core_is_sf(dev))
return 0;
table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
dev->priv.sf_hw_table_vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
return mlx5_vhca_event_notifier_register(dev,
&dev->priv.sf_hw_table_vhca_nb);
}
void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
{
if (mlx5_core_is_sf(dev))
return;
mlx5_vhca_event_notifier_unregister(dev,
&dev->priv.sf_hw_table_vhca_nb);
}
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -388,9 +402,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */
mlx5_sf_hw_table_dealloc_all(table);
mlx5_sf_hw_table_dealloc_all(dev, table);
}
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)

View File

@@ -12,10 +12,13 @@
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev);
void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
@@ -44,20 +47,33 @@ static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
{
}
static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
static inline int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
return 0;
}
static inline void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
{
}
static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
{
}
static inline int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
{
return 0;
}
static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
return 0;
}
static inline void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
{
}
static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}

View File

@@ -9,15 +9,9 @@
#define CREATE_TRACE_POINTS
#include "diag/vhca_tracepoint.h"
struct mlx5_vhca_state_notifier {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
struct blocking_notifier_head n_head;
};
struct mlx5_vhca_event_work {
struct work_struct work;
struct mlx5_vhca_state_notifier *notifier;
struct mlx5_core_dev *dev;
struct mlx5_vhca_state_event event;
};
@@ -95,16 +89,14 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
mlx5_vhca_event_arm(dev, event->function_id);
trace_mlx5_sf_vhca_event(dev, event);
blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
blocking_notifier_call_chain(&dev->priv.vhca_state_n_head, 0, event);
}
static void mlx5_vhca_state_work_handler(struct work_struct *_work)
{
struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
struct mlx5_vhca_state_notifier *notifier = work->notifier;
struct mlx5_core_dev *dev = notifier->dev;
mlx5_vhca_event_notify(dev, &work->event);
mlx5_vhca_event_notify(work->dev, &work->event);
kfree(work);
}
@@ -116,8 +108,8 @@ void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct wo
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_vhca_state_notifier *notifier =
mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
struct mlx5_core_dev *dev = mlx5_nb_cof(nb, struct mlx5_core_dev,
priv.vhca_state_nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
int wq_idx;
@@ -126,10 +118,10 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
work->notifier = notifier;
work->dev = dev;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work);
return NOTIFY_OK;
}
@@ -145,9 +137,15 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
}
void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
{
BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.vhca_state_n_head);
MLX5_NB_INIT(&dev->priv.vhca_state_nb, mlx5_vhca_state_change_notifier,
VHCA_STATE_CHANGE);
}
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct mlx5_vhca_events *events;
int err, i;
@@ -160,7 +158,6 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
return -ENOMEM;
events->dev = dev;
dev->priv.vhca_events = events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
events->handler[i].wq = create_singlethread_workqueue(wq_name);
@@ -169,20 +166,10 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
goto err_create_wq;
}
}
dev->priv.vhca_events = events;
notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
if (!notifier) {
err = -ENOMEM;
goto err_notifier;
}
dev->priv.vhca_state_notifier = notifier;
notifier->dev = dev;
BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
err_notifier:
err_create_wq:
for (--i; i >= 0; i--)
destroy_workqueue(events->handler[i].wq);
@@ -211,8 +198,6 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_vhca_event_supported(dev))
return;
kfree(dev->priv.vhca_state_notifier);
dev->priv.vhca_state_notifier = NULL;
vhca_events = dev->priv.vhca_events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
destroy_workqueue(vhca_events->handler[i].wq);
@@ -221,34 +206,30 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
if (!dev->priv.vhca_state_notifier)
if (!mlx5_vhca_event_supported(dev))
return;
notifier = dev->priv.vhca_state_notifier;
mlx5_eq_notifier_register(dev, &notifier->nb);
mlx5_eq_notifier_register(dev, &dev->priv.vhca_state_nb);
}
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
if (!dev->priv.vhca_state_notifier)
if (!mlx5_vhca_event_supported(dev))
return;
notifier = dev->priv.vhca_state_notifier;
mlx5_eq_notifier_unregister(dev, &notifier->nb);
mlx5_eq_notifier_unregister(dev, &dev->priv.vhca_state_nb);
/* Flush workqueues of all pending events. */
mlx5_vhca_event_work_queues_flush(dev);
}
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
if (!dev->priv.vhca_state_notifier)
return -EOPNOTSUPP;
return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
return blocking_notifier_chain_register(&dev->priv.vhca_state_n_head,
nb);
}
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
blocking_notifier_chain_unregister(&dev->priv.vhca_state_n_head, nb);
}

View File

@@ -18,6 +18,7 @@ static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
}
void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev);
int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
@@ -37,6 +38,10 @@ static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *s
{
}
static inline void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
{
}
static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
return 0;

View File

@@ -488,7 +488,6 @@ struct mlx5_devcom_dev;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
struct mlx5_vhca_state_notifier;
struct mlx5_sf_dev_table;
struct mlx5_sf_hw_table;
struct mlx5_sf_table;
@@ -599,6 +598,7 @@ struct mlx5_priv {
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
struct blocking_notifier_head esw_n_head;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
@@ -614,12 +614,18 @@ struct mlx5_priv {
struct mlx5_bfreg_data bfregs;
struct mlx5_sq_bfreg bfreg;
#ifdef CONFIG_MLX5_SF
struct mlx5_vhca_state_notifier *vhca_state_notifier;
struct mlx5_nb vhca_state_nb;
struct blocking_notifier_head vhca_state_n_head;
struct notifier_block sf_dev_nb;
struct mlx5_sf_dev_table *sf_dev_table;
struct mlx5_core_dev *parent_mdev;
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
struct notifier_block sf_hw_table_vhca_nb;
struct mlx5_sf_hw_table *sf_hw_table;
struct notifier_block sf_table_esw_nb;
struct notifier_block sf_table_vhca_nb;
struct notifier_block sf_table_mdev_nb;
struct mlx5_sf_table *sf_table;
#endif
struct blocking_notifier_head lag_nh;