mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
eth: fbnic: Collect packet statistics for XDP
Add support for XDP statistics collection and reporting via rtnl_link and netdev_queue API. For XDP programs without frags support, fbnic requires MTU to be less than the HDS threshold. If an over-sized frame is received, the frame is dropped and recorded as rx_length_errors reported via ip stats to highlight that this is an error. Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Mohsin Bashir <mohsin.bashr@gmail.com> Link: https://patch.msgid.link/20250813221319.3367670-9-mohsin.bashr@gmail.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
committed by
Paolo Abeni
parent
168deb7b31
commit
5213ff0863
@@ -160,3 +160,14 @@ behavior and potential performance bottlenecks.
|
||||
credit exhaustion
|
||||
- ``pcie_ob_rd_no_np_cred``: Read requests dropped due to non-posted
|
||||
credit exhaustion
|
||||
|
||||
XDP Length Error:
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
For XDP programs without frags support, fbnic tries to make sure that MTU fits
|
||||
into a single buffer. If an oversized frame is received and gets fragmented,
|
||||
it is dropped and the following netlink counters are updated
|
||||
|
||||
- ``rx-length``: number of frames dropped due to lack of fragmentation
|
||||
support in the attached XDP program
|
||||
- ``rx-errors``: total number of packets with errors received on the interface
|
||||
|
||||
@@ -407,11 +407,12 @@ static void fbnic_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats64)
|
||||
{
|
||||
u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
|
||||
u64 rx_over = 0, rx_missed = 0, rx_length = 0;
|
||||
u64 tx_bytes, tx_packets, tx_dropped = 0;
|
||||
struct fbnic_net *fbn = netdev_priv(dev);
|
||||
struct fbnic_dev *fbd = fbn->fbd;
|
||||
struct fbnic_queue_stats *stats;
|
||||
u64 rx_over = 0, rx_missed = 0;
|
||||
|
||||
unsigned int start, i;
|
||||
|
||||
fbnic_get_hw_stats(fbd);
|
||||
@@ -489,6 +490,7 @@ static void fbnic_get_stats64(struct net_device *dev,
|
||||
stats64->rx_missed_errors = rx_missed;
|
||||
|
||||
for (i = 0; i < fbn->num_rx_queues; i++) {
|
||||
struct fbnic_ring *xdpr = fbn->tx[FBNIC_MAX_TXQS + i];
|
||||
struct fbnic_ring *rxr = fbn->rx[i];
|
||||
|
||||
if (!rxr)
|
||||
@@ -500,11 +502,29 @@ static void fbnic_get_stats64(struct net_device *dev,
|
||||
rx_bytes = stats->bytes;
|
||||
rx_packets = stats->packets;
|
||||
rx_dropped = stats->dropped;
|
||||
rx_length = stats->rx.length_errors;
|
||||
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
stats64->rx_bytes += rx_bytes;
|
||||
stats64->rx_packets += rx_packets;
|
||||
stats64->rx_dropped += rx_dropped;
|
||||
stats64->rx_errors += rx_length;
|
||||
stats64->rx_length_errors += rx_length;
|
||||
|
||||
if (!xdpr)
|
||||
continue;
|
||||
|
||||
stats = &xdpr->stats;
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&stats->syncp);
|
||||
tx_bytes = stats->bytes;
|
||||
tx_packets = stats->packets;
|
||||
tx_dropped = stats->dropped;
|
||||
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
stats64->tx_bytes += tx_bytes;
|
||||
stats64->tx_packets += tx_packets;
|
||||
stats64->tx_dropped += tx_dropped;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -603,6 +623,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
|
||||
struct fbnic_ring *txr = fbn->tx[idx];
|
||||
struct fbnic_queue_stats *stats;
|
||||
u64 stop, wake, csum, lso;
|
||||
struct fbnic_ring *xdpr;
|
||||
unsigned int start;
|
||||
u64 bytes, packets;
|
||||
|
||||
@@ -626,6 +647,19 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
|
||||
tx->hw_gso_wire_packets = lso;
|
||||
tx->stop = stop;
|
||||
tx->wake = wake;
|
||||
|
||||
xdpr = fbn->tx[FBNIC_MAX_TXQS + idx];
|
||||
if (xdpr) {
|
||||
stats = &xdpr->stats;
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&stats->syncp);
|
||||
bytes = stats->bytes;
|
||||
packets = stats->packets;
|
||||
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
tx->bytes += bytes;
|
||||
tx->packets += packets;
|
||||
}
|
||||
}
|
||||
|
||||
static void fbnic_get_base_stats(struct net_device *dev,
|
||||
|
||||
@@ -620,8 +620,8 @@ static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
|
||||
struct fbnic_ring *ring, bool discard,
|
||||
unsigned int hw_head)
|
||||
{
|
||||
u64 total_bytes = 0, total_packets = 0;
|
||||
unsigned int head = ring->head;
|
||||
u64 total_bytes = 0;
|
||||
|
||||
while (hw_head != head) {
|
||||
struct page *page;
|
||||
@@ -633,6 +633,11 @@ static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
|
||||
twd = le64_to_cpu(ring->desc[head]);
|
||||
page = ring->tx_buf[head];
|
||||
|
||||
/* TYPE_AL is 2, TYPE_LAST_AL is 3. So this trick gives
|
||||
* us one increment per packet, with no branches.
|
||||
*/
|
||||
total_packets += FIELD_GET(FBNIC_TWD_TYPE_MASK, twd) -
|
||||
FBNIC_TWD_TYPE_AL;
|
||||
total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
|
||||
|
||||
page_pool_put_page(nv->page_pool, page, -1, pp_allow_direct);
|
||||
@@ -645,6 +650,18 @@ next_desc:
|
||||
return;
|
||||
|
||||
ring->head = head;
|
||||
|
||||
if (discard) {
|
||||
u64_stats_update_begin(&ring->stats.syncp);
|
||||
ring->stats.dropped += total_packets;
|
||||
u64_stats_update_end(&ring->stats.syncp);
|
||||
return;
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&ring->stats.syncp);
|
||||
ring->stats.bytes += total_bytes;
|
||||
ring->stats.packets += total_packets;
|
||||
u64_stats_update_end(&ring->stats.syncp);
|
||||
}
|
||||
|
||||
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
|
||||
@@ -1040,8 +1057,12 @@ static long fbnic_pkt_tx(struct fbnic_napi_vector *nv,
|
||||
frag = &shinfo->frags[0];
|
||||
}
|
||||
|
||||
if (fbnic_desc_unused(ring) < nsegs)
|
||||
if (fbnic_desc_unused(ring) < nsegs) {
|
||||
u64_stats_update_begin(&ring->stats.syncp);
|
||||
ring->stats.dropped++;
|
||||
u64_stats_update_end(&ring->stats.syncp);
|
||||
return -FBNIC_XDP_CONSUME;
|
||||
}
|
||||
|
||||
page = virt_to_page(pkt->buff.data_hard_start);
|
||||
offset = offset_in_page(pkt->buff.data);
|
||||
@@ -1181,8 +1202,8 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
|
||||
struct fbnic_q_triad *qt, int budget)
|
||||
{
|
||||
unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
|
||||
u64 csum_complete = 0, csum_none = 0, length_errors = 0;
|
||||
s32 head0 = -1, head1 = -1, pkt_tail = -1;
|
||||
u64 csum_complete = 0, csum_none = 0;
|
||||
struct fbnic_ring *rcq = &qt->cmpl;
|
||||
struct fbnic_pkt_buff *pkt;
|
||||
__le64 *raw_rcd, done;
|
||||
@@ -1247,6 +1268,8 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
|
||||
if (!skb) {
|
||||
alloc_failed++;
|
||||
dropped++;
|
||||
} else if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR)) {
|
||||
length_errors++;
|
||||
} else {
|
||||
dropped++;
|
||||
}
|
||||
@@ -1276,6 +1299,7 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
|
||||
rcq->stats.rx.alloc_failed += alloc_failed;
|
||||
rcq->stats.rx.csum_complete += csum_complete;
|
||||
rcq->stats.rx.csum_none += csum_none;
|
||||
rcq->stats.rx.length_errors += length_errors;
|
||||
u64_stats_update_end(&rcq->stats.syncp);
|
||||
|
||||
if (pkt_tail >= 0)
|
||||
@@ -1359,8 +1383,9 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
|
||||
fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
|
||||
fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
|
||||
fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
|
||||
fbn->rx_stats.rx.length_errors += stats->rx.length_errors;
|
||||
/* Remember to add new stats here */
|
||||
BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
|
||||
BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
|
||||
}
|
||||
|
||||
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
|
||||
@@ -1382,6 +1407,22 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
|
||||
BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
|
||||
}
|
||||
|
||||
static void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
|
||||
struct fbnic_ring *xdpr)
|
||||
{
|
||||
struct fbnic_queue_stats *stats = &xdpr->stats;
|
||||
|
||||
if (!(xdpr->flags & FBNIC_RING_F_STATS))
|
||||
return;
|
||||
|
||||
/* Capture stats from queues before dissasociating them */
|
||||
fbn->rx_stats.bytes += stats->bytes;
|
||||
fbn->rx_stats.packets += stats->packets;
|
||||
fbn->rx_stats.dropped += stats->dropped;
|
||||
fbn->tx_stats.bytes += stats->bytes;
|
||||
fbn->tx_stats.packets += stats->packets;
|
||||
}
|
||||
|
||||
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
|
||||
struct fbnic_ring *txr)
|
||||
{
|
||||
@@ -1401,6 +1442,8 @@ static void fbnic_remove_xdp_ring(struct fbnic_net *fbn,
|
||||
if (!(xdpr->flags & FBNIC_RING_F_STATS))
|
||||
return;
|
||||
|
||||
fbnic_aggregate_ring_xdp_counters(fbn, xdpr);
|
||||
|
||||
/* Remove pointer to the Tx ring */
|
||||
WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr);
|
||||
fbn->tx[xdpr->q_idx] = NULL;
|
||||
|
||||
@@ -90,6 +90,7 @@ struct fbnic_queue_stats {
|
||||
u64 alloc_failed;
|
||||
u64 csum_complete;
|
||||
u64 csum_none;
|
||||
u64 length_errors;
|
||||
} rx;
|
||||
};
|
||||
u64 dropped;
|
||||
|
||||
Reference in New Issue
Block a user