gve: Add Rx HWTS metadata to AF_XDP ZC mode

By overlaying the struct gve_xdp_buff on top of the struct xdp_buff_xsk
that AF_XDP utilizes, the driver records the 32 bit timestamp via the
completion descriptor and the cached 64 bit NIC timestamp via gve_priv.

The driver's implementation of xmo_rx_timestamp extends the timestamp to
the full and up to date 64 bit timestamp and returns it to the user.

gve_rx_xsk_dqo is modified to accept a pointer to the completion
descriptor and no longer takes a buf_len explicitly as it can be pulled
out of the descriptor.

With this patch gve now supports bpf_xdp_metadata_rx_timestamp.

Signed-off-by: Tim Hostetler <thostet@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Link: https://patch.msgid.link/20251114211146.292068-5-joshwash@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Tim Hostetler
2025-11-14 13:11:46 -08:00
committed by Paolo Abeni
parent 66adaf1021
commit 1b42e07af1
2 changed files with 21 additions and 2 deletions

View File

@@ -2348,6 +2348,10 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_set_features_flag_locked(priv->dev, xdp_features);
}
static const struct xdp_metadata_ops gve_xdp_metadata_ops = {
.xmo_rx_timestamp = gve_xdp_rx_timestamp,
};
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -2443,6 +2447,9 @@ setup_device:
}
gve_set_netdev_xdp_features(priv);
if (!gve_is_gqi(priv))
priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops;
err = gve_setup_device_resources(priv);
if (err)
goto err_free_xsk_bitmap;

View File

@@ -240,6 +240,11 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->rx_headroom = 0;
}
/* struct gve_xdp_buff is overlaid on struct xdp_buff_xsk and utilizes
* the 24 byte field cb to store gve specific data.
*/
XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
@@ -701,16 +706,23 @@ err:
}
static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state, int buf_len,
const struct gve_rx_compl_desc_dqo *compl_desc,
struct gve_rx_buf_state_dqo *buf_state,
struct bpf_prog *xprog)
{
struct xdp_buff *xdp = buf_state->xsk_buff;
int buf_len = compl_desc->packet_len;
struct gve_priv *priv = rx->gve;
struct gve_xdp_buff *gve_xdp;
int xdp_act;
xdp->data_end = xdp->data + buf_len;
xsk_buff_dma_sync_for_cpu(xdp);
gve_xdp = (void *)xdp;
gve_xdp->gve = priv;
gve_xdp->compl_desc = compl_desc;
if (xprog) {
xdp_act = bpf_prog_run_xdp(xprog, xdp);
buf_len = xdp->data_end - xdp->data;
@@ -800,7 +812,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
xprog = READ_ONCE(priv->xdp_prog);
if (buf_state->xsk_buff)
return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog);
return gve_rx_xsk_dqo(napi, rx, compl_desc, buf_state, xprog);
/* Page might have not been used for awhile and was likely last written
* by a different thread.