mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
net/mlx5: XDP, Enable TX side XDP multi-buffer support
In XDP scenarios, fragmented packets can occur if the MTU is larger than the page size, even when the packet size fits within the linear part. If XDP multi-buffer support is disabled, the fragmented part won't be handled in the TX flow, leading to packet drops. Since XDP multi-buffer support is always available, this commit removes the conditional check for enabling it. This ensures that XDP multi-buffer support is always enabled, regardless of the `is_xdp_mb` parameter, and guarantees the handling of fragmented packets in such scenarios. Signed-off-by: Alexei Lazar <alazar@nvidia.com> Signed-off-by: Tariq Toukan <tariqt@nvidia.com> Link: https://patch.msgid.link/20250209101716.112774-16-tariqt@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
95b9606b15
commit
1a9304859b
@@ -384,7 +384,6 @@ enum {
|
||||
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
|
||||
MLX5E_SQ_STATE_PENDING_XSK_TX,
|
||||
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
|
||||
MLX5E_SQ_STATE_XDP_MULTIBUF,
|
||||
MLX5E_NUM_SQ_STATES, /* Must be kept last */
|
||||
};
|
||||
|
||||
|
||||
@@ -1247,7 +1247,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
|
||||
mlx5e_build_sq_param_common(mdev, param);
|
||||
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
|
||||
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
|
||||
param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
|
||||
mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
|
||||
struct mlx5_wq_param wq;
|
||||
bool is_mpw;
|
||||
bool is_tls;
|
||||
bool is_xdp_mb;
|
||||
u16 stop_room;
|
||||
};
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
|
||||
[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
|
||||
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
|
||||
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
|
||||
[MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
|
||||
};
|
||||
|
||||
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
|
||||
|
||||
@@ -546,6 +546,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
|
||||
bool inline_ok;
|
||||
bool linear;
|
||||
u16 pi;
|
||||
int i;
|
||||
|
||||
struct mlx5e_xdpsq_stats *stats = sq->stats;
|
||||
|
||||
@@ -612,42 +613,34 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
|
||||
|
||||
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
|
||||
|
||||
if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
|
||||
int i;
|
||||
memset(&cseg->trailer, 0, sizeof(cseg->trailer));
|
||||
memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
|
||||
|
||||
memset(&cseg->trailer, 0, sizeof(cseg->trailer));
|
||||
memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
|
||||
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
|
||||
|
||||
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
|
||||
for (i = 0; i < num_frags; i++) {
|
||||
skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
|
||||
dma_addr_t addr;
|
||||
|
||||
for (i = 0; i < num_frags; i++) {
|
||||
skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
|
||||
dma_addr_t addr;
|
||||
addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
|
||||
page_pool_get_dma_addr(skb_frag_page(frag)) +
|
||||
skb_frag_off(frag);
|
||||
|
||||
addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
|
||||
page_pool_get_dma_addr(skb_frag_page(frag)) +
|
||||
skb_frag_off(frag);
|
||||
|
||||
dseg->addr = cpu_to_be64(addr);
|
||||
dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
|
||||
dseg->lkey = sq->mkey_be;
|
||||
dseg++;
|
||||
}
|
||||
|
||||
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
||||
|
||||
sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
|
||||
.num_wqebbs = num_wqebbs,
|
||||
.num_pkts = 1,
|
||||
};
|
||||
|
||||
sq->pc += num_wqebbs;
|
||||
} else {
|
||||
cseg->fm_ce_se = 0;
|
||||
|
||||
sq->pc++;
|
||||
dseg->addr = cpu_to_be64(addr);
|
||||
dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
|
||||
dseg->lkey = sq->mkey_be;
|
||||
dseg++;
|
||||
}
|
||||
|
||||
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
||||
|
||||
sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
|
||||
.num_wqebbs = num_wqebbs,
|
||||
.num_pkts = 1,
|
||||
};
|
||||
|
||||
sq->pc += num_wqebbs;
|
||||
|
||||
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
|
||||
|
||||
sq->doorbell_cseg = cseg;
|
||||
|
||||
@@ -2023,41 +2023,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
csp.min_inline_mode = sq->min_inline_mode;
|
||||
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||
|
||||
if (param->is_xdp_mb)
|
||||
set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
|
||||
|
||||
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
|
||||
if (err)
|
||||
goto err_free_xdpsq;
|
||||
|
||||
mlx5e_set_xmit_fp(sq, param->is_mpw);
|
||||
|
||||
if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
|
||||
unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
|
||||
unsigned int inline_hdr_sz = 0;
|
||||
int i;
|
||||
|
||||
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
|
||||
inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
|
||||
ds_cnt++;
|
||||
}
|
||||
|
||||
/* Pre initialize fixed WQE fields */
|
||||
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
|
||||
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
|
||||
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
|
||||
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
|
||||
|
||||
sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
|
||||
.num_wqebbs = 1,
|
||||
.num_pkts = 1,
|
||||
};
|
||||
|
||||
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
||||
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_xdpsq:
|
||||
|
||||
Reference in New Issue
Block a user