net/smc: handle -ENOMEM from smc_wr_alloc_link_mem gracefully

Currently if a -ENOMEM from smc_wr_alloc_link_mem() is handled by
giving up and going the way of a TCP fallback. This was reasonable
before the sizes of the allocations there were compile time constants
and reasonably small. But now those are actually configurable.

So instead of giving up, keep retrying with half of the requested size
unless we dip below the old static sizes -- then give up! In terms of
numbers that means we give up when it is certain that we at best would
end up allocating less than 16 send WR buffers or less than 48 recv WR
buffers. This is to avoid regressions due to having fewer buffers
compared the static values of the past.

Please note that SMC-R is supposed to be an optimisation over TCP, and
falling back to TCP is superior to establishing an SMC connection that
is going to perform worse. If the memory allocation fails (and we
propagate -ENOMEM), we fall back to TCP.

Preserve (modulo truncation) the ratio of send/recv WR buffer counts.

Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-by: Mahanta Jambigi <mjambigi@linux.ibm.com>
Reviewed-by: Sidraya Jayagond <sidraya@linux.ibm.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Tested-by: Mahanta Jambigi <mjambigi@linux.ibm.com>
Link: https://patch.msgid.link/20251027224856.2970019-3-pasic@linux.ibm.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Halil Pasic
2025-10-27 23:48:56 +01:00
committed by Paolo Abeni
parent aef3cdb47b
commit 8f736087e5
4 changed files with 46 additions and 26 deletions

View File

@@ -85,7 +85,9 @@ smcr_max_send_wr - INTEGER
Please be aware that all the buffers need to be allocated as a physically
continuous array in which each element is a single buffer and has the size
of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails we give up much
of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails, we keep retrying
with half of the buffer count until it is ether successful or (unlikely)
we dip below the old hard coded value which is 16 where we give up much
like before having this control.
Default: 16
@@ -103,7 +105,9 @@ smcr_max_recv_wr - INTEGER
Please be aware that all the buffers need to be allocated as a physically
continuous array in which each element is a single buffer and has the size
of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails we give up much
of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails, we keep retrying
with half of the buffer count until it is ether successful or (unlikely)
we dip below the old hard coded value which is 16 where we give up much
like before having this control.
Default: 48

View File

@@ -810,6 +810,8 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
lnk->clearing = 0;
lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
lnk->link_id = smcr_next_link_id(lgr);
lnk->max_send_wr = lgr->max_send_wr;
lnk->max_recv_wr = lgr->max_recv_wr;
lnk->lgr = lgr;
smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
lnk->link_idx = link_idx;
@@ -836,27 +838,39 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
rc = smc_llc_link_init(lnk);
if (rc)
goto out;
rc = smc_wr_alloc_link_mem(lnk);
if (rc)
goto clear_llc_lnk;
rc = smc_ib_create_protection_domain(lnk);
if (rc)
goto free_link_mem;
rc = smc_ib_create_queue_pair(lnk);
if (rc)
goto dealloc_pd;
goto clear_llc_lnk;
do {
rc = smc_ib_create_queue_pair(lnk);
if (rc)
goto dealloc_pd;
rc = smc_wr_alloc_link_mem(lnk);
if (!rc)
break;
else if (rc != -ENOMEM) /* give up */
goto destroy_qp;
/* retry with smaller ... */
lnk->max_send_wr /= 2;
lnk->max_recv_wr /= 2;
/* ... unless droping below old SMC_WR_BUF_SIZE */
if (lnk->max_send_wr < 16 || lnk->max_recv_wr < 48)
goto destroy_qp;
smc_ib_destroy_queue_pair(lnk);
} while (1);
rc = smc_wr_create_link(lnk);
if (rc)
goto destroy_qp;
goto free_link_mem;
lnk->state = SMC_LNK_ACTIVATING;
return 0;
free_link_mem:
smc_wr_free_link_mem(lnk);
destroy_qp:
smc_ib_destroy_queue_pair(lnk);
dealloc_pd:
smc_ib_dealloc_protection_domain(lnk);
free_link_mem:
smc_wr_free_link_mem(lnk);
clear_llc_lnk:
smc_llc_link_clear(lnk, false);
out:

View File

@@ -175,6 +175,8 @@ struct smc_link {
struct completion llc_testlink_resp; /* wait for rx of testlink */
int llc_testlink_time; /* testlink interval */
atomic_t conn_cnt; /* connections on this link */
u16 max_send_wr;
u16 max_recv_wr;
};
/* For now we just allow one parallel link per link group. The SMC protocol

View File

@@ -547,9 +547,9 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk)
IB_QP_DEST_QPN,
&init_attr);
lnk->wr_tx_cnt = min_t(size_t, lnk->lgr->max_send_wr,
lnk->wr_tx_cnt = min_t(size_t, lnk->max_send_wr,
lnk->qp_attr.cap.max_send_wr);
lnk->wr_rx_cnt = min_t(size_t, lnk->lgr->max_recv_wr,
lnk->wr_rx_cnt = min_t(size_t, lnk->max_recv_wr,
lnk->qp_attr.cap.max_recv_wr);
}
@@ -741,51 +741,51 @@ int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr)
int smc_wr_alloc_link_mem(struct smc_link *link)
{
/* allocate link related memory */
link->wr_tx_bufs = kcalloc(link->lgr->max_send_wr,
link->wr_tx_bufs = kcalloc(link->max_send_wr,
SMC_WR_BUF_SIZE, GFP_KERNEL);
if (!link->wr_tx_bufs)
goto no_mem;
link->wr_rx_bufs = kcalloc(link->lgr->max_recv_wr, link->wr_rx_buflen,
link->wr_rx_bufs = kcalloc(link->max_recv_wr, link->wr_rx_buflen,
GFP_KERNEL);
if (!link->wr_rx_bufs)
goto no_mem_wr_tx_bufs;
link->wr_tx_ibs = kcalloc(link->lgr->max_send_wr,
link->wr_tx_ibs = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_ibs[0]), GFP_KERNEL);
if (!link->wr_tx_ibs)
goto no_mem_wr_rx_bufs;
link->wr_rx_ibs = kcalloc(link->lgr->max_recv_wr,
link->wr_rx_ibs = kcalloc(link->max_recv_wr,
sizeof(link->wr_rx_ibs[0]),
GFP_KERNEL);
if (!link->wr_rx_ibs)
goto no_mem_wr_tx_ibs;
link->wr_tx_rdmas = kcalloc(link->lgr->max_send_wr,
link->wr_tx_rdmas = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_rdmas[0]),
GFP_KERNEL);
if (!link->wr_tx_rdmas)
goto no_mem_wr_rx_ibs;
link->wr_tx_rdma_sges = kcalloc(link->lgr->max_send_wr,
link->wr_tx_rdma_sges = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_rdma_sges[0]),
GFP_KERNEL);
if (!link->wr_tx_rdma_sges)
goto no_mem_wr_tx_rdmas;
link->wr_tx_sges = kcalloc(link->lgr->max_send_wr, sizeof(link->wr_tx_sges[0]),
link->wr_tx_sges = kcalloc(link->max_send_wr, sizeof(link->wr_tx_sges[0]),
GFP_KERNEL);
if (!link->wr_tx_sges)
goto no_mem_wr_tx_rdma_sges;
link->wr_rx_sges = kcalloc(link->lgr->max_recv_wr,
link->wr_rx_sges = kcalloc(link->max_recv_wr,
sizeof(link->wr_rx_sges[0]) * link->wr_rx_sge_cnt,
GFP_KERNEL);
if (!link->wr_rx_sges)
goto no_mem_wr_tx_sges;
link->wr_tx_mask = bitmap_zalloc(link->lgr->max_send_wr, GFP_KERNEL);
link->wr_tx_mask = bitmap_zalloc(link->max_send_wr, GFP_KERNEL);
if (!link->wr_tx_mask)
goto no_mem_wr_rx_sges;
link->wr_tx_pends = kcalloc(link->lgr->max_send_wr,
link->wr_tx_pends = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_pends[0]),
GFP_KERNEL);
if (!link->wr_tx_pends)
goto no_mem_wr_tx_mask;
link->wr_tx_compl = kcalloc(link->lgr->max_send_wr,
link->wr_tx_compl = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_compl[0]),
GFP_KERNEL);
if (!link->wr_tx_compl)
@@ -906,7 +906,7 @@ int smc_wr_create_link(struct smc_link *lnk)
goto dma_unmap;
}
smc_wr_init_sge(lnk);
bitmap_zero(lnk->wr_tx_mask, lnk->lgr->max_send_wr);
bitmap_zero(lnk->wr_tx_mask, lnk->max_send_wr);
init_waitqueue_head(&lnk->wr_tx_wait);
rc = percpu_ref_init(&lnk->wr_tx_refs, smcr_wr_tx_refs_free, 0, GFP_KERNEL);
if (rc)