mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
RDMA/irdma: Remove doorbell elision logic
In some cases, this logic can result in doorbell writes being
skipped when they should not have been (at least on GEN3 HW),
so remove it. This also means that the mb() can be safely
downgraded to dma_wmb().
Fixes: 551c46edc7 ("RDMA/irdma: Add user/kernel shared libraries")
Signed-off-by: Jacob Moroni <jmoroni@google.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Link: https://patch.msgid.link/20251125025350.180-9-tatyana.e.nikolova@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
committed by
Leon Romanovsky
parent
eef3ad030b
commit
62356fccb1
@@ -685,7 +685,6 @@ static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
|
||||
ukqp->rq_size = rsrc->rq_size;
|
||||
|
||||
IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
|
||||
IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
|
||||
IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
|
||||
ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
|
||||
|
||||
|
||||
@@ -114,33 +114,8 @@ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
|
||||
*/
|
||||
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
|
||||
{
|
||||
u64 temp;
|
||||
u32 hw_sq_tail;
|
||||
u32 sw_sq_head;
|
||||
|
||||
/* valid bit is written and loads completed before reading shadow */
|
||||
mb();
|
||||
|
||||
/* read the doorbell shadow area */
|
||||
get_64bit_val(qp->shadow_area, 0, &temp);
|
||||
|
||||
hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
|
||||
sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
|
||||
if (sw_sq_head != qp->initial_ring.head) {
|
||||
if (sw_sq_head != hw_sq_tail) {
|
||||
if (sw_sq_head > qp->initial_ring.head) {
|
||||
if (hw_sq_tail >= qp->initial_ring.head &&
|
||||
hw_sq_tail < sw_sq_head)
|
||||
writel(qp->qp_id, qp->wqe_alloc_db);
|
||||
} else {
|
||||
if (hw_sq_tail >= qp->initial_ring.head ||
|
||||
hw_sq_tail < sw_sq_head)
|
||||
writel(qp->qp_id, qp->wqe_alloc_db);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qp->initial_ring.head = qp->sq_ring.head;
|
||||
dma_wmb();
|
||||
writel(qp->qp_id, qp->wqe_alloc_db);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1606,7 +1581,6 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
|
||||
qp->conn_wqes = move_cnt;
|
||||
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
|
||||
IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
|
||||
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1751,7 +1725,6 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
|
||||
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
|
||||
sq_ring_size = qp->sq_size << info->sq_shift;
|
||||
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
|
||||
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
|
||||
if (info->first_sq_wq) {
|
||||
irdma_setup_connection_wqes(qp, info);
|
||||
qp->swqe_polarity = 1;
|
||||
|
||||
@@ -457,7 +457,6 @@ struct irdma_srq_uk {
|
||||
struct irdma_uk_attrs *uk_attrs;
|
||||
__le64 *shadow_area;
|
||||
struct irdma_ring srq_ring;
|
||||
struct irdma_ring initial_ring;
|
||||
u32 srq_id;
|
||||
u32 srq_size;
|
||||
u32 max_srq_frag_cnt;
|
||||
|
||||
Reference in New Issue
Block a user