mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
net/mlx5: Use newer affinity descriptor
Use the more refined struct irq_affinity_desc to describe the required IRQ affinity. For the async IRQs request unmanaged affinity and for completion queues use managed affinity. No functionality changes introduced. It will be used in a subsequent patch when we use dynamic MSIX allocation. Signed-off-by: Eli Cohen <elic@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
This commit is contained in:
committed by
Saeed Mahameed
parent
235a25fe28
commit
bbac70c741
@@ -45,30 +45,27 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
|
|||||||
|
|
||||||
/* Creating an IRQ from irq_pool */
|
/* Creating an IRQ from irq_pool */
|
||||||
static struct mlx5_irq *
|
static struct mlx5_irq *
|
||||||
irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
|
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
|
||||||
{
|
{
|
||||||
cpumask_var_t auto_mask;
|
struct irq_affinity_desc auto_desc = {};
|
||||||
struct mlx5_irq *irq;
|
|
||||||
u32 irq_index;
|
u32 irq_index;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!zalloc_cpumask_var(&auto_mask, GFP_KERNEL))
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
|
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
|
||||||
if (err)
|
if (err)
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
if (pool->irqs_per_cpu) {
|
if (pool->irqs_per_cpu) {
|
||||||
if (cpumask_weight(req_mask) > 1)
|
if (cpumask_weight(&af_desc->mask) > 1)
|
||||||
/* if req_mask contain more then one CPU, set the least loadad CPU
|
/* if req_mask contain more then one CPU, set the least loadad CPU
|
||||||
* of req_mask
|
* of req_mask
|
||||||
*/
|
*/
|
||||||
cpumask_set_cpu(cpu_get_least_loaded(pool, req_mask), auto_mask);
|
cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
|
||||||
|
&auto_desc.mask);
|
||||||
else
|
else
|
||||||
cpu_get(pool, cpumask_first(req_mask));
|
cpu_get(pool, cpumask_first(&af_desc->mask));
|
||||||
}
|
}
|
||||||
irq = mlx5_irq_alloc(pool, irq_index, cpumask_empty(auto_mask) ? req_mask : auto_mask);
|
return mlx5_irq_alloc(pool, irq_index,
|
||||||
free_cpumask_var(auto_mask);
|
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc);
|
||||||
return irq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Looking for the IRQ with the smallest refcount that fits req_mask.
|
/* Looking for the IRQ with the smallest refcount that fits req_mask.
|
||||||
@@ -115,22 +112,22 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
|
|||||||
/**
|
/**
|
||||||
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
|
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
|
||||||
* @pool: IRQ pool to request from.
|
* @pool: IRQ pool to request from.
|
||||||
* @req_mask: cpumask requested for this IRQ.
|
* @af_desc: affinity descriptor for this IRQ.
|
||||||
*
|
*
|
||||||
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
|
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
|
||||||
*/
|
*/
|
||||||
struct mlx5_irq *
|
struct mlx5_irq *
|
||||||
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
|
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
|
||||||
{
|
{
|
||||||
struct mlx5_irq *least_loaded_irq, *new_irq;
|
struct mlx5_irq *least_loaded_irq, *new_irq;
|
||||||
|
|
||||||
mutex_lock(&pool->lock);
|
mutex_lock(&pool->lock);
|
||||||
least_loaded_irq = irq_pool_find_least_loaded(pool, req_mask);
|
least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
|
||||||
if (least_loaded_irq &&
|
if (least_loaded_irq &&
|
||||||
mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
|
mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
|
||||||
goto out;
|
goto out;
|
||||||
/* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
|
/* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
|
||||||
new_irq = irq_pool_request_irq(pool, req_mask);
|
new_irq = irq_pool_request_irq(pool, af_desc);
|
||||||
if (IS_ERR(new_irq)) {
|
if (IS_ERR(new_irq)) {
|
||||||
if (!least_loaded_irq) {
|
if (!least_loaded_irq) {
|
||||||
/* We failed to create an IRQ and we didn't find an IRQ */
|
/* We failed to create an IRQ and we didn't find an IRQ */
|
||||||
@@ -194,16 +191,15 @@ int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
|
|||||||
struct mlx5_irq **irqs)
|
struct mlx5_irq **irqs)
|
||||||
{
|
{
|
||||||
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
|
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
|
||||||
cpumask_var_t req_mask;
|
struct irq_affinity_desc af_desc = {};
|
||||||
struct mlx5_irq *irq;
|
struct mlx5_irq *irq;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
|
af_desc.is_managed = 1;
|
||||||
return -ENOMEM;
|
cpumask_copy(&af_desc.mask, cpu_online_mask);
|
||||||
cpumask_copy(req_mask, cpu_online_mask);
|
|
||||||
for (i = 0; i < nirqs; i++) {
|
for (i = 0; i < nirqs; i++) {
|
||||||
if (mlx5_irq_pool_is_sf_pool(pool))
|
if (mlx5_irq_pool_is_sf_pool(pool))
|
||||||
irq = mlx5_irq_affinity_request(pool, req_mask);
|
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||||
else
|
else
|
||||||
/* In case SF pool doesn't exists, fallback to the PF IRQs.
|
/* In case SF pool doesn't exists, fallback to the PF IRQs.
|
||||||
* The PF IRQs are already allocated and binded to CPU
|
* The PF IRQs are already allocated and binded to CPU
|
||||||
@@ -213,13 +209,12 @@ int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
|
|||||||
if (IS_ERR(irq))
|
if (IS_ERR(irq))
|
||||||
break;
|
break;
|
||||||
irqs[i] = irq;
|
irqs[i] = irq;
|
||||||
cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), req_mask);
|
cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), &af_desc.mask);
|
||||||
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
||||||
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
|
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
|
||||||
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
|
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
|
||||||
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
|
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
|
||||||
}
|
}
|
||||||
free_cpumask_var(req_mask);
|
|
||||||
if (!i)
|
if (!i)
|
||||||
return PTR_ERR(irq);
|
return PTR_ERR(irq);
|
||||||
return i;
|
return i;
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
|
|||||||
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
|
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
|
||||||
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
|
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
|
||||||
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
|
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
|
||||||
struct cpumask *affinity);
|
struct irq_affinity_desc *af_desc);
|
||||||
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
|
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
|
||||||
struct mlx5_irq **irqs);
|
struct mlx5_irq **irqs);
|
||||||
void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
|
void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
|
||||||
@@ -39,7 +39,7 @@ struct mlx5_irq_pool;
|
|||||||
int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
|
int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
|
||||||
struct mlx5_irq **irqs);
|
struct mlx5_irq **irqs);
|
||||||
struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
|
struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
|
||||||
const struct cpumask *req_mask);
|
struct irq_affinity_desc *af_desc);
|
||||||
void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
|
void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
|
||||||
int num_irqs);
|
int num_irqs);
|
||||||
#else
|
#else
|
||||||
@@ -50,7 +50,7 @@ static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline struct mlx5_irq *
|
static inline struct mlx5_irq *
|
||||||
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
|
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
|
||||||
{
|
{
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
||||||
const struct cpumask *affinity)
|
struct irq_affinity_desc *af_desc)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *dev = pool->dev;
|
struct mlx5_core_dev *dev = pool->dev;
|
||||||
char name[MLX5_MAX_IRQ_NAME];
|
char name[MLX5_MAX_IRQ_NAME];
|
||||||
@@ -235,8 +235,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_cpumask;
|
goto err_cpumask;
|
||||||
}
|
}
|
||||||
if (affinity) {
|
if (af_desc) {
|
||||||
cpumask_copy(irq->mask, affinity);
|
cpumask_copy(irq->mask, &af_desc->mask);
|
||||||
irq_set_affinity_and_hint(irq->map.virq, irq->mask);
|
irq_set_affinity_and_hint(irq->map.virq, irq->mask);
|
||||||
}
|
}
|
||||||
irq->pool = pool;
|
irq->pool = pool;
|
||||||
@@ -250,7 +250,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
|||||||
}
|
}
|
||||||
return irq;
|
return irq;
|
||||||
err_xa:
|
err_xa:
|
||||||
irq_update_affinity_hint(irq->map.virq, NULL);
|
if (af_desc)
|
||||||
|
irq_update_affinity_hint(irq->map.virq, NULL);
|
||||||
free_cpumask_var(irq->mask);
|
free_cpumask_var(irq->mask);
|
||||||
err_cpumask:
|
err_cpumask:
|
||||||
free_irq(irq->map.virq, &irq->nh);
|
free_irq(irq->map.virq, &irq->nh);
|
||||||
@@ -299,7 +300,7 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
|
|||||||
/* requesting an irq from a given pool according to given index */
|
/* requesting an irq from a given pool according to given index */
|
||||||
static struct mlx5_irq *
|
static struct mlx5_irq *
|
||||||
irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
|
irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
|
||||||
struct cpumask *affinity)
|
struct irq_affinity_desc *af_desc)
|
||||||
{
|
{
|
||||||
struct mlx5_irq *irq;
|
struct mlx5_irq *irq;
|
||||||
|
|
||||||
@@ -309,7 +310,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
|
|||||||
mlx5_irq_get_locked(irq);
|
mlx5_irq_get_locked(irq);
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
irq = mlx5_irq_alloc(pool, vecidx, affinity);
|
irq = mlx5_irq_alloc(pool, vecidx, af_desc);
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&pool->lock);
|
mutex_unlock(&pool->lock);
|
||||||
return irq;
|
return irq;
|
||||||
@@ -386,28 +387,26 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
|
|||||||
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
|
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
|
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
|
||||||
cpumask_var_t req_mask;
|
struct irq_affinity_desc af_desc;
|
||||||
struct mlx5_irq *irq;
|
struct mlx5_irq *irq;
|
||||||
|
|
||||||
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
|
cpumask_copy(&af_desc.mask, cpu_online_mask);
|
||||||
return ERR_PTR(-ENOMEM);
|
af_desc.is_managed = false;
|
||||||
cpumask_copy(req_mask, cpu_online_mask);
|
|
||||||
if (!mlx5_irq_pool_is_sf_pool(pool)) {
|
if (!mlx5_irq_pool_is_sf_pool(pool)) {
|
||||||
/* In case we are allocating a control IRQ from a pci device's pool.
|
/* In case we are allocating a control IRQ from a pci device's pool.
|
||||||
* This can happen also for a SF if the SFs pool is empty.
|
* This can happen also for a SF if the SFs pool is empty.
|
||||||
*/
|
*/
|
||||||
if (!pool->xa_num_irqs.max) {
|
if (!pool->xa_num_irqs.max) {
|
||||||
cpumask_clear(req_mask);
|
cpumask_clear(&af_desc.mask);
|
||||||
/* In case we only have a single IRQ for PF/VF */
|
/* In case we only have a single IRQ for PF/VF */
|
||||||
cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
|
cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
|
||||||
}
|
}
|
||||||
/* Allocate the IRQ in the last index of the pool */
|
/* Allocate the IRQ in the last index of the pool */
|
||||||
irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
|
irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, &af_desc);
|
||||||
} else {
|
} else {
|
||||||
irq = mlx5_irq_affinity_request(pool, req_mask);
|
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_cpumask_var(req_mask);
|
|
||||||
return irq;
|
return irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -416,23 +415,23 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
|
|||||||
* @dev: mlx5 device that requesting the IRQ.
|
* @dev: mlx5 device that requesting the IRQ.
|
||||||
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
|
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
|
||||||
* provided.
|
* provided.
|
||||||
* @affinity: cpumask requested for this IRQ.
|
* @af_desc: affinity descriptor for this IRQ.
|
||||||
*
|
*
|
||||||
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
|
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
|
||||||
*/
|
*/
|
||||||
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
|
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
|
||||||
struct cpumask *affinity)
|
struct irq_affinity_desc *af_desc)
|
||||||
{
|
{
|
||||||
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
|
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
|
||||||
struct mlx5_irq_pool *pool;
|
struct mlx5_irq_pool *pool;
|
||||||
struct mlx5_irq *irq;
|
struct mlx5_irq *irq;
|
||||||
|
|
||||||
pool = irq_table->pf_pool;
|
pool = irq_table->pf_pool;
|
||||||
irq = irq_pool_request_vector(pool, vecidx, affinity);
|
irq = irq_pool_request_vector(pool, vecidx, af_desc);
|
||||||
if (IS_ERR(irq))
|
if (IS_ERR(irq))
|
||||||
return irq;
|
return irq;
|
||||||
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
||||||
irq->map.virq, cpumask_pr_args(affinity),
|
irq->map.virq, cpumask_pr_args(&af_desc->mask),
|
||||||
irq->refcount / MLX5_EQ_REFS_PER_IRQ);
|
irq->refcount / MLX5_EQ_REFS_PER_IRQ);
|
||||||
return irq;
|
return irq;
|
||||||
}
|
}
|
||||||
@@ -463,22 +462,20 @@ void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
|
|||||||
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
|
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
|
||||||
struct mlx5_irq **irqs)
|
struct mlx5_irq **irqs)
|
||||||
{
|
{
|
||||||
cpumask_var_t req_mask;
|
struct irq_affinity_desc af_desc;
|
||||||
struct mlx5_irq *irq;
|
struct mlx5_irq *irq;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
|
af_desc.is_managed = 1;
|
||||||
return -ENOMEM;
|
|
||||||
for (i = 0; i < nirqs; i++) {
|
for (i = 0; i < nirqs; i++) {
|
||||||
cpumask_set_cpu(cpus[i], req_mask);
|
cpumask_set_cpu(cpus[i], &af_desc.mask);
|
||||||
irq = mlx5_irq_request(dev, i, req_mask);
|
irq = mlx5_irq_request(dev, i, &af_desc);
|
||||||
if (IS_ERR(irq))
|
if (IS_ERR(irq))
|
||||||
break;
|
break;
|
||||||
cpumask_clear(req_mask);
|
cpumask_clear(&af_desc.mask);
|
||||||
irqs[i] = irq;
|
irqs[i] = irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
free_cpumask_var(req_mask);
|
|
||||||
return i ? i : PTR_ERR(irq);
|
return i ? i : PTR_ERR(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
||||||
const struct cpumask *affinity);
|
struct irq_affinity_desc *af_desc);
|
||||||
int mlx5_irq_get_locked(struct mlx5_irq *irq);
|
int mlx5_irq_get_locked(struct mlx5_irq *irq);
|
||||||
int mlx5_irq_read_locked(struct mlx5_irq *irq);
|
int mlx5_irq_read_locked(struct mlx5_irq *irq);
|
||||||
int mlx5_irq_put(struct mlx5_irq *irq);
|
int mlx5_irq_put(struct mlx5_irq *irq);
|
||||||
|
|||||||
Reference in New Issue
Block a user