Merge branch 'add-af_xdp-zero-copy-support'

Meghana Malladi says:

====================
Add AF_XDP zero copy support

This series adds AF_XDP zero coppy support to icssg driver.

Tests were performed on AM64x-EVM with xdpsock application [1].

A clear improvement is seen Transmit (txonly) and receive (rxdrop)
for 64 byte packets. 1500 byte test seems to be limited by line
rate (1G link) so no improvement seen there in packet rate

Having some issue with l2fwd as the benchmarking numbers show 0
for 64 byte packets after forwading first batch packets and I am
currently looking into it.

AF_XDP performance using 64 byte packets in Kpps.
AF_XDP performance using 64 byte packets in Kpps.
Benchmark:	XDP-SKB		XDP-Native	XDP-Native(ZeroCopy)
rxdrop		253		473		656
txonly		350		354		855
l2fwd 		178		240		0

AF_XDP performance using 1500 byte packets in Kpps.
Benchmark:	XDP-SKB		XDP-Native	XDP-Native(ZeroCopy)
rxdrop		82		82		82
txonly		81		82		82
l2fwd 		81		82		82

[1]: https://github.com/xdp-project/bpf-examples/tree/master/AF_XDP-example
v5: https://lore.kernel.org/all/20251111101523.3160680-1-m-malladi@ti.com/
====================

Link: https://patch.msgid.link/20251118135542.380574-1-m-malladi@ti.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni
2025-11-20 15:24:13 +01:00
3 changed files with 738 additions and 148 deletions

View File

@@ -93,15 +93,91 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
}
EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
static int emac_xsk_xmit_zc(struct prueth_emac *emac,
unsigned int q_idx)
{
struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx];
struct xsk_buff_pool *pool = tx_chn->xsk_pool;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *host_desc;
dma_addr_t dma_desc, dma_buf;
struct prueth_swdata *swdata;
struct xdp_desc xdp_desc;
int num_tx = 0, pkt_len;
int descs_avail, ret;
u32 *epib;
int i;
descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool);
/* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
* will be available for normal TX path and queue is stopped there if
* necessary
*/
if (descs_avail <= MAX_SKB_FRAGS)
return 0;
descs_avail -= MAX_SKB_FRAGS;
for (i = 0; i < descs_avail; i++) {
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
pkt_len = xdp_desc.len;
xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len);
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (unlikely(!host_desc))
break;
cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
cppi5_hdesc_set_pkttype(host_desc, 0);
epib = host_desc->epib;
epib[0] = 0;
epib[1] = 0;
cppi5_hdesc_set_pktlen(host_desc, pkt_len);
cppi5_desc_set_tags_ids(&host_desc->hdr, 0,
(emac->port_id | (q_idx << 8)));
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf,
pkt_len);
swdata = cppi5_hdesc_get_swdata(host_desc);
swdata->type = PRUETH_SWDATA_XSK;
dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
host_desc);
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn,
host_desc, dma_desc);
if (ret) {
ndev->stats.tx_errors++;
k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
break;
}
num_tx++;
}
xsk_tx_release(tx_chn->xsk_pool);
return num_tx;
}
void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
struct prueth_swdata *swdata;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
swdata = cppi5_hdesc_get_swdata(first_desc);
if (swdata->type == PRUETH_SWDATA_XSK)
goto free_pool;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -126,6 +202,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
free_pool:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -139,7 +216,9 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
int xsk_frames_done = 0;
struct xdp_frame *xdpf;
unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -176,6 +255,11 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
total_bytes += xdpf->len;
xdp_return_frame(xdpf);
break;
case PRUETH_SWDATA_XSK:
pkt_len = cppi5_hdesc_get_pktlen(desc_tx);
dev_sw_netstats_tx_add(ndev, 1, pkt_len);
xsk_frames_done++;
break;
default:
prueth_xmit_free(tx_chn, desc_tx);
ndev->stats.tx_dropped++;
@@ -204,6 +288,18 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
__netif_tx_unlock(netif_txq);
}
if (tx_chn->xsk_pool) {
if (xsk_frames_done)
xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done);
if (xsk_uses_need_wakeup(tx_chn->xsk_pool))
xsk_set_tx_need_wakeup(tx_chn->xsk_pool);
netif_txq = netdev_get_tx_queue(ndev, chn);
txq_trans_cond_update(netif_txq);
emac_xsk_xmit_zc(emac, chn);
}
return num_tx;
}
@@ -212,7 +308,10 @@ static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
struct prueth_tx_chn *tx_chns =
container_of(timer, struct prueth_tx_chn, tx_hrtimer);
enable_irq(tx_chns->irq);
if (tx_chns->irq_disabled) {
tx_chns->irq_disabled = false;
enable_irq(tx_chns->irq);
}
return HRTIMER_NORESTART;
}
@@ -235,7 +334,10 @@ static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
ns_to_ktime(tx_chn->tx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
enable_irq(tx_chn->irq);
if (tx_chn->irq_disabled) {
tx_chn->irq_disabled = false;
enable_irq(tx_chn->irq);
}
}
}
@@ -246,6 +348,7 @@ static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
{
struct prueth_tx_chn *tx_chn = dev_id;
tx_chn->irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&tx_chn->napi_tx);
@@ -362,6 +465,29 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
struct device *dma_dev,
int size)
{
struct page_pool_params pp_params = { 0 };
struct page_pool *pool;
pp_params.order = 0;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = size;
pp_params.nid = dev_to_node(emac->prueth->dev);
pp_params.dma_dir = DMA_BIDIRECTIONAL;
pp_params.dev = dma_dev;
pp_params.napi = &emac->napi_rx;
pp_params.max_len = PAGE_SIZE;
pool = page_pool_create(&pp_params);
if (IS_ERR(pool))
netdev_err(emac->ndev, "cannot create rx page pool\n");
return pool;
}
int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
@@ -371,6 +497,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct device *dev = emac->prueth->dev;
struct net_device *ndev = emac->ndev;
u32 fdqring_id, hdesc_size;
struct page_pool *pool;
int i, ret = 0, slice;
int flow_id_base;
@@ -413,6 +540,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
goto fail;
}
pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num);
if (IS_ERR(pool)) {
ret = PTR_ERR(pool);
goto fail;
}
rx_chn->pg_pool = pool;
flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
emac->rx_mgm_flow_id_base = flow_id_base;
@@ -544,15 +679,15 @@ void emac_rx_timestamp(struct prueth_emac *emac,
* emac_xmit_xdp_frame - transmits an XDP frame
* @emac: emac device
* @xdpf: data to transmit
* @page: page from page pool if already DMA mapped
* @q_idx: queue id
* @buff_type: Type of buffer to be transmitted
*
* Return: XDP state
*/
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
struct page *page,
unsigned int q_idx)
unsigned int q_idx,
enum prueth_tx_buff_type buff_type)
{
struct cppi5_host_desc_t *first_desc;
struct net_device *ndev = emac->ndev;
@@ -560,6 +695,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
struct prueth_swdata *swdata;
struct page *page;
u32 *epib;
int ret;
@@ -576,7 +712,12 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
return ICSSG_XDP_CONSUMED; /* drop */
}
if (page) { /* already DMA mapped by page_pool */
if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */
page = virt_to_head_page(xdpf->data);
if (unlikely(!page)) {
netdev_err(ndev, "xdp tx: failed to get page from xdpf\n");
goto drop_free_descs;
}
buf_dma = page_pool_get_dma_addr(page);
buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
} else { /* Map the linear buffer */
@@ -631,13 +772,11 @@ EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
* emac_run_xdp - run an XDP program
* @emac: emac device
* @xdp: XDP buffer containing the frame
* @page: page with RX data if already DMA mapped
* @len: Rx descriptor packet length
*
* Return: XDP state
*/
static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
struct page *page, u32 *len)
static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len)
{
struct net_device *ndev = emac->ndev;
struct netdev_queue *netif_txq;
@@ -664,7 +803,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
q_idx = cpu % emac->tx_ch_num;
netif_txq = netdev_get_tx_queue(ndev, q_idx);
__netif_tx_lock(netif_txq, cpu);
result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
result = emac_xmit_xdp_frame(emac, xdpf, q_idx,
PRUETH_TX_BUFF_TYPE_XDP_TX);
__netif_tx_unlock(netif_txq);
if (result == ICSSG_XDP_CONSUMED) {
ndev->stats.tx_dropped++;
@@ -689,11 +829,188 @@ drop:
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
ndev->stats.rx_dropped++;
page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
return ICSSG_XDP_CONSUMED;
}
}
static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
struct xdp_buff *xdp)
{
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
struct prueth_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
int buf_len;
buf_dma = xsk_buff_xdp_get_dma(xdp);
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
netdev_err(ndev, "rx push: failed to allocate descriptor\n");
return -ENOMEM;
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool);
cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
swdata = cppi5_hdesc_get_swdata(desc_rx);
swdata->type = PRUETH_SWDATA_XSK;
swdata->data.xdp = xdp;
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
desc_rx, desc_dma);
}
static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
struct xdp_buff *xdp;
int i, ret;
for (i = 0; i < budget; i++) {
xdp = xsk_buff_alloc(rx_chn->xsk_pool);
if (!xdp)
break;
ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp);
if (ret) {
netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n");
xsk_buff_free(xdp);
break;
}
}
return i;
}
static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata)
{
unsigned int headroom = xdp->data - xdp->data_hard_start;
unsigned int pkt_len = xdp->data_end - xdp->data;
struct net_device *ndev = emac->ndev;
struct sk_buff *skb;
skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start);
if (unlikely(!skb)) {
ndev->stats.rx_dropped++;
return;
}
skb_reserve(skb, headroom);
skb_put(skb, pkt_len);
skb->dev = ndev;
/* RX HW timestamp */
if (emac->rx_ts_enabled)
emac_rx_timestamp(emac, skb, psdata);
if (emac->prueth->is_switch_mode)
skb->offload_fwd_mark = emac->offload_fwd_mark;
skb->protocol = eth_type_trans(skb, ndev);
skb_mark_for_recycle(skb);
napi_gro_receive(&emac->napi_rx, skb);
ndev->stats.rx_bytes += pkt_len;
ndev->stats.rx_packets++;
}
static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id,
int budget)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
struct xdp_buff *xdp;
int xdp_status = 0;
int count = 0;
u32 *psdata;
int ret;
while (count < budget) {
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
if (ret) {
if (ret != -ENODATA)
netdev_err(ndev, "rx pop: failed: %d\n", ret);
break;
}
if (cppi5_desc_is_tdcm(desc_dma)) {
complete(&emac->tdown_complete);
break;
}
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
if (swdata->type != PRUETH_SWDATA_XSK) {
netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
break;
}
xdp = swdata->data.xdp;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
/* firmware adds 4 CRC bytes, strip them */
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
psdata = cppi5_hdesc_get_psdata(desc_rx);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
count++;
xsk_buff_set_size(xdp, pkt_len);
xsk_buff_dma_sync_for_cpu(xdp);
if (prueth_xdp_is_enabled(emac)) {
ret = emac_run_xdp(emac, xdp, &pkt_len);
switch (ret) {
case ICSSG_XDP_PASS:
/* prepare skb and send to n/w stack */
emac_dispatch_skb_zc(emac, xdp, psdata);
xsk_buff_free(xdp);
break;
case ICSSG_XDP_CONSUMED:
xsk_buff_free(xdp);
break;
case ICSSG_XDP_TX:
case ICSSG_XDP_REDIR:
xdp_status |= ret;
break;
}
} else {
/* prepare skb and send to n/w stack */
emac_dispatch_skb_zc(emac, xdp, psdata);
xsk_buff_free(xdp);
}
}
if (xdp_status & ICSSG_XDP_REDIR)
xdp_do_flush();
/* Allocate xsk buffers from the pool for the "count" number of
* packets processed in order to be able to receive more packets.
*/
ret = prueth_rx_alloc_zc(emac, count);
if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) {
/* If the user space doesn't provide enough buffers then it must
* explicitly wake up the kernel when new buffers are available
*/
if (ret < count)
xsk_set_rx_need_wakeup(rx_chn->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_chn->xsk_pool);
}
return count;
}
static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
@@ -719,8 +1036,10 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
return ret;
}
if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
if (cppi5_desc_is_tdcm(desc_dma)) {
complete(&emac->tdown_complete);
return 0;
}
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
@@ -738,7 +1057,6 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
/* firmware adds 4 CRC bytes, strip them */
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
/* if allocation fails we drop the packet but push the
@@ -752,11 +1070,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
}
pa = page_address(page);
if (emac->xdp_prog) {
if (prueth_xdp_is_enabled(emac)) {
xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
*xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
*xdp_state = emac_run_xdp(emac, &xdp, &pkt_len);
if (*xdp_state != ICSSG_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
@@ -804,24 +1122,29 @@ requeue:
return ret;
}
static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
struct prueth_swdata *swdata;
struct page_pool *pool;
struct xdp_buff *xdp;
struct page *page;
pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
if (swdata->type == PRUETH_SWDATA_PAGE) {
if (rx_chn->xsk_pool) {
xdp = swdata->data.xdp;
xsk_buff_free(xdp);
} else {
page = swdata->data.page;
page_pool_recycle_direct(pool, page);
}
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
}
EXPORT_SYMBOL_GPL(prueth_rx_cleanup);
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
{
@@ -1025,10 +1348,11 @@ drop_stop_q_busy:
}
EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
struct xsk_buff_pool *xsk_pool;
struct prueth_swdata *swdata;
struct xdp_frame *xdpf;
struct sk_buff *skb;
@@ -1045,17 +1369,23 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
xdpf = swdata->data.xdpf;
xdp_return_frame(xdpf);
break;
case PRUETH_SWDATA_XSK:
xsk_pool = tx_chn->xsk_pool;
xsk_tx_completed(xsk_pool, 1);
break;
default:
break;
}
prueth_xmit_free(tx_chn, desc_tx);
}
EXPORT_SYMBOL_GPL(prueth_tx_cleanup);
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
emac->rx_chns.irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&emac->napi_rx);
@@ -1083,6 +1413,7 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
@@ -1090,14 +1421,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
int ret;
while (flow--) {
cur_budget = budget - num_rx;
if (rx_chn->xsk_pool) {
num_rx = emac_rx_packet_zc(emac, flow, budget);
} else {
cur_budget = budget - num_rx;
while (cur_budget--) {
ret = emac_rx_packet(emac, flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret)
break;
num_rx++;
while (cur_budget--) {
ret = emac_rx_packet(emac, flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret)
break;
num_rx++;
}
}
if (num_rx >= budget)
@@ -1113,7 +1448,11 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
ns_to_ktime(emac->rx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
enable_irq(emac->rx_chns.irq[rx_flow]);
if (emac->rx_chns.irq_disabled) {
/* re-enable the RX IRQ */
emac->rx_chns.irq_disabled = false;
enable_irq(emac->rx_chns.irq[rx_flow]);
}
}
}
@@ -1121,62 +1460,48 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
}
EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
struct device *dma_dev,
int size)
{
struct page_pool_params pp_params = { 0 };
struct page_pool *pool;
pp_params.order = 0;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = size;
pp_params.nid = dev_to_node(emac->prueth->dev);
pp_params.dma_dir = DMA_BIDIRECTIONAL;
pp_params.dev = dma_dev;
pp_params.napi = &emac->napi_rx;
pp_params.max_len = PAGE_SIZE;
pool = page_pool_create(&pp_params);
if (IS_ERR(pool))
netdev_err(emac->ndev, "cannot create rx page pool\n");
return pool;
}
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
struct page_pool *pool;
struct page *page;
int desc_avail;
int i, ret;
pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
if (IS_ERR(pool))
return PTR_ERR(pool);
desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool);
if (desc_avail < chn->descs_num)
netdev_warn(emac->ndev,
"not enough RX descriptors available %d < %d\n",
desc_avail, chn->descs_num);
chn->pg_pool = pool;
for (i = 0; i < chn->descs_num; i++) {
/* NOTE: we're not using memory efficiently here.
* 1 full page (4KB?) used here instead of
* PRUETH_MAX_PKT_SIZE (~1.5KB?)
if (chn->xsk_pool) {
/* get pages from xsk_pool and push to RX ring
* queue as much as possible
*/
page = page_pool_dev_alloc_pages(pool);
if (!page) {
netdev_err(emac->ndev, "couldn't allocate rx page\n");
ret = -ENOMEM;
ret = prueth_rx_alloc_zc(emac, desc_avail);
if (!ret)
goto recycle_alloc_pg;
}
} else {
for (i = 0; i < desc_avail; i++) {
/* NOTE: we're not using memory efficiently here.
* 1 full page (4KB?) used here instead of
* PRUETH_MAX_PKT_SIZE (~1.5KB?)
*/
page = page_pool_dev_alloc_pages(chn->pg_pool);
if (!page) {
netdev_err(emac->ndev, "couldn't allocate rx page\n");
ret = -ENOMEM;
goto recycle_alloc_pg;
}
ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
if (ret < 0) {
netdev_err(emac->ndev,
"cannot submit page for rx chan %s ret %d\n",
chn->name, ret);
page_pool_recycle_direct(pool, page);
goto recycle_alloc_pg;
ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
if (ret < 0) {
netdev_err(emac->ndev,
"cannot submit page for rx chan %s ret %d\n",
chn->name, ret);
page_pool_recycle_direct(chn->pg_pool, page);
goto recycle_alloc_pg;
}
}
}

View File

@@ -47,6 +47,9 @@
NETIF_F_HW_HSR_TAG_INS | \
NETIF_F_HW_HSR_TAG_RM)
#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\
DMA_ATTR_WEAK_ORDERING)
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -392,7 +395,11 @@ static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
enable_irq(emac->rx_chns.irq[rx_flow]);
if (emac->rx_chns.irq_disabled) {
/* re-enable the RX IRQ */
emac->rx_chns.irq_disabled = false;
enable_irq(emac->rx_chns.irq[rx_flow]);
}
return HRTIMER_NORESTART;
}
@@ -566,33 +573,43 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
if (xdp_rxq_info_is_reg(rxq))
xdp_rxq_info_unreg(rxq);
}
static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
struct page_pool *pool = emac->rx_chns.pg_pool;
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
int ret;
ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
if (ret)
return ret;
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
if (ret)
xdp_rxq_info_unreg(rxq);
if (rx_chn->xsk_pool) {
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
if (ret)
goto xdp_unreg;
xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
} else {
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
if (ret)
goto xdp_unreg;
}
return 0;
xdp_unreg:
prueth_destroy_xdp_rxqs(emac);
return ret;
}
static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
if (!xdp_rxq_info_is_reg(rxq))
return;
xdp_rxq_info_unreg(rxq);
}
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
struct net_device *real_dev;
@@ -735,6 +752,128 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
return 0;
}
static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
{
struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
if (emac->xsk_qid != queue_id) {
rx_chn->xsk_pool = NULL;
tx_chn->xsk_pool = NULL;
} else {
rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
}
}
static void prueth_destroy_txq(struct prueth_emac *emac)
{
int ret, i;
atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
/* ensure new tdown_cnt value is visible */
smp_mb__after_atomic();
/* tear down and disable UDMA channels */
reinit_completion(&emac->tdown_complete);
for (i = 0; i < emac->tx_ch_num; i++)
k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
ret = wait_for_completion_timeout(&emac->tdown_complete,
msecs_to_jiffies(1000));
if (!ret)
netdev_err(emac->ndev, "tx teardown timeout\n");
for (i = 0; i < emac->tx_ch_num; i++) {
napi_disable(&emac->tx_chns[i].napi_tx);
hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
&emac->tx_chns[i],
prueth_tx_cleanup);
k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
}
}
static void prueth_destroy_rxq(struct prueth_emac *emac)
{
int i, ret;
/* tear down and disable UDMA channels */
reinit_completion(&emac->tdown_complete);
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
/* When RX DMA Channel Teardown is initiated, it will result in an
* interrupt and a Teardown Completion Marker (TDCM) is queued into
* the RX Completion queue. Acknowledging the interrupt involves
* popping the TDCM descriptor from the RX Completion queue via the
* RX NAPI Handler. To avoid timing out when waiting for the TDCM to
* be popped, schedule the RX NAPI handler to run immediately.
*/
if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
if (napi_schedule_prep(&emac->napi_rx))
__napi_schedule(&emac->napi_rx);
}
ret = wait_for_completion_timeout(&emac->tdown_complete,
msecs_to_jiffies(1000));
if (!ret)
netdev_err(emac->ndev, "rx teardown timeout\n");
for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
napi_disable(&emac->napi_rx);
hrtimer_cancel(&emac->rx_hrtimer);
k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
&emac->rx_chns,
prueth_rx_cleanup);
}
prueth_destroy_xdp_rxqs(emac);
k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
}
static int prueth_create_txq(struct prueth_emac *emac)
{
int ret, i;
for (i = 0; i < emac->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
if (ret)
goto reset_tx_chan;
napi_enable(&emac->tx_chns[i].napi_tx);
}
return 0;
reset_tx_chan:
/* Since interface is not yet up, there is wouldn't be
* any SKB for completion. So set false to free_skb
*/
prueth_reset_tx_chan(emac, i, false);
return ret;
}
static int prueth_create_rxq(struct prueth_emac *emac)
{
int ret;
ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
if (ret)
return ret;
ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
if (ret)
goto reset_rx_chn;
ret = prueth_create_xdp_rxqs(emac);
if (ret)
goto reset_rx_chn;
napi_enable(&emac->napi_rx);
return 0;
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
return ret;
}
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -746,7 +885,7 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
int ret, i, num_data_chn = emac->tx_ch_num;
int ret, num_data_chn = emac->tx_ch_num;
struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
@@ -767,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev)
return ret;
}
emac->xsk_qid = -EINVAL;
init_completion(&emac->cmd_complete);
ret = prueth_init_tx_chns(emac);
if (ret) {
@@ -819,28 +959,13 @@ static int emac_ndo_open(struct net_device *ndev)
goto stop;
/* Prepare RX */
ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
ret = prueth_create_rxq(emac);
if (ret)
goto free_tx_ts_irq;
ret = prueth_create_xdp_rxqs(emac);
ret = prueth_create_txq(emac);
if (ret)
goto reset_rx_chn;
ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
if (ret)
goto destroy_xdp_rxqs;
for (i = 0; i < emac->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
if (ret)
goto reset_tx_chan;
}
/* Enable NAPI in Tx and Rx direction */
for (i = 0; i < emac->tx_ch_num; i++)
napi_enable(&emac->tx_chns[i].napi_tx);
napi_enable(&emac->napi_rx);
goto destroy_rxq;
/* start PHY */
phy_start(ndev->phydev);
@@ -851,15 +976,8 @@ static int emac_ndo_open(struct net_device *ndev)
return 0;
reset_tx_chan:
/* Since interface is not yet up, there is wouldn't be
* any SKB for completion. So set false to free_skb
*/
prueth_reset_tx_chan(emac, i, false);
destroy_xdp_rxqs:
prueth_destroy_xdp_rxqs(emac);
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
destroy_rxq:
prueth_destroy_rxq(emac);
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
@@ -889,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
int rx_flow = PRUETH_RX_FLOW_DATA;
int max_rx_flows;
int ret, i;
/* inform the upper layers. */
netif_tx_stop_all_queues(ndev);
@@ -905,32 +1020,8 @@ static int emac_ndo_stop(struct net_device *ndev)
else
__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
/* ensure new tdown_cnt value is visible */
smp_mb__after_atomic();
/* tear down and disable UDMA channels */
reinit_completion(&emac->tdown_complete);
for (i = 0; i < emac->tx_ch_num; i++)
k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
ret = wait_for_completion_timeout(&emac->tdown_complete,
msecs_to_jiffies(1000));
if (!ret)
netdev_err(ndev, "tx teardown timeout\n");
prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
for (i = 0; i < emac->tx_ch_num; i++) {
napi_disable(&emac->tx_chns[i].napi_tx);
hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
}
max_rx_flows = PRUETH_MAX_RX_FLOWS;
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
prueth_destroy_xdp_rxqs(emac);
napi_disable(&emac->napi_rx);
hrtimer_cancel(&emac->rx_hrtimer);
prueth_destroy_txq(emac);
prueth_destroy_rxq(emac);
cancel_work_sync(&emac->rx_mode_work);
@@ -943,10 +1034,10 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->tx_ts_irq, emac);
free_irq(emac->rx_chns.irq[rx_flow], emac);
free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
prueth_cleanup_tx_chns(emac);
prueth->emacs_initialized--;
@@ -1108,7 +1199,8 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame
__netif_tx_lock(netif_txq, cpu);
for (i = 0; i < n; i++) {
xdpf = frames[i];
err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
err = emac_xmit_xdp_frame(emac, xdpf, q_idx,
PRUETH_TX_BUFF_TYPE_XDP_NDO);
if (err != ICSSG_XDP_TX) {
ndev->stats.tx_dropped++;
break;
@@ -1141,6 +1233,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
return 0;
}
static int prueth_xsk_pool_enable(struct prueth_emac *emac,
struct xsk_buff_pool *pool, u16 queue_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 frame_size;
int ret;
if (queue_id >= PRUETH_MAX_RX_FLOWS ||
queue_id >= emac->tx_ch_num) {
netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
return -EINVAL;
}
frame_size = xsk_pool_get_rx_frame_size(pool);
if (frame_size < PRUETH_MAX_PKT_SIZE)
return -EOPNOTSUPP;
ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
if (ret) {
netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
return ret;
}
if (netif_running(emac->ndev)) {
/* stop packets from wire for graceful teardown */
ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
if (ret)
return ret;
prueth_destroy_rxq(emac);
}
emac->xsk_qid = queue_id;
prueth_set_xsk_pool(emac, queue_id);
if (netif_running(emac->ndev)) {
ret = prueth_create_rxq(emac);
if (ret) {
netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
return ret;
}
ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
if (ret) {
prueth_destroy_rxq(emac);
return ret;
}
ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX);
if (ret)
return ret;
}
return 0;
}
static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id)
{
struct xsk_buff_pool *pool;
int ret;
if (queue_id >= PRUETH_MAX_RX_FLOWS ||
queue_id >= emac->tx_ch_num) {
netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
return -EINVAL;
}
if (emac->xsk_qid != queue_id) {
netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id);
return -EINVAL;
}
pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
if (!pool) {
netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id);
return -EINVAL;
}
if (netif_running(emac->ndev)) {
/* stop packets from wire for graceful teardown */
ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
if (ret)
return ret;
prueth_destroy_rxq(emac);
}
xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR);
emac->xsk_qid = -EINVAL;
prueth_set_xsk_pool(emac, queue_id);
if (netif_running(emac->ndev)) {
ret = prueth_create_rxq(emac);
if (ret) {
netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
return ret;
}
ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
if (ret) {
prueth_destroy_rxq(emac);
return ret;
}
}
return 0;
}
/**
* emac_ndo_bpf - implements ndo_bpf for icssg_prueth
* @ndev: network adapter device
@@ -1155,11 +1350,58 @@ static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
switch (bpf->command) {
case XDP_SETUP_PROG:
return emac_xdp_setup(emac, bpf);
case XDP_SETUP_XSK_POOL:
return bpf->xsk.pool ?
prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) :
prueth_xsk_pool_disable(emac, bpf->xsk.queue_id);
default:
return -EINVAL;
}
}
int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid];
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
if (emac->xsk_qid != qid) {
netdev_err(ndev, "XSK queue %d not registered\n", qid);
return -EINVAL;
}
if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
return -EINVAL;
}
if (!tx_chn->xsk_pool) {
netdev_err(ndev, "XSK pool not registered for queue %d\n", qid);
return -EINVAL;
}
if (!rx_chn->xsk_pool) {
netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid);
return -EINVAL;
}
if (flags & XDP_WAKEUP_TX) {
if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) {
if (likely(napi_schedule_prep(&tx_chn->napi_tx)))
__napi_schedule(&tx_chn->napi_tx);
}
}
if (flags & XDP_WAKEUP_RX) {
if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
if (likely(napi_schedule_prep(&emac->napi_rx)))
__napi_schedule(&emac->napi_rx);
}
}
return 0;
}
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1178,6 +1420,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_xdp_xmit = emac_xdp_xmit,
.ndo_hwtstamp_get = icssg_ndo_get_ts_config,
.ndo_hwtstamp_set = icssg_ndo_set_ts_config,
.ndo_xsk_wakeup = prueth_xsk_wakeup,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -1311,7 +1554,8 @@ static int prueth_netdev_init(struct prueth *prueth,
xdp_set_features_flag(ndev,
NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT);
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_XSK_ZEROCOPY);
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,

View File

@@ -38,6 +38,8 @@
#include <net/devlink.h>
#include <net/xdp.h>
#include <net/page_pool/helpers.h>
#include <net/xsk_buff_pool.h>
#include <net/xdp_sock_drv.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -126,6 +128,8 @@ struct prueth_tx_chn {
char name[32];
struct hrtimer tx_hrtimer;
unsigned long tx_pace_timeout_ns;
struct xsk_buff_pool *xsk_pool;
bool irq_disabled;
};
struct prueth_rx_chn {
@@ -138,6 +142,8 @@ struct prueth_rx_chn {
char name[32];
struct page_pool *pg_pool;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
bool irq_disabled;
};
enum prueth_swdata_type {
@@ -146,6 +152,12 @@ enum prueth_swdata_type {
PRUETH_SWDATA_PAGE,
PRUETH_SWDATA_CMD,
PRUETH_SWDATA_XDPF,
PRUETH_SWDATA_XSK,
};
enum prueth_tx_buff_type {
PRUETH_TX_BUFF_TYPE_XDP_TX,
PRUETH_TX_BUFF_TYPE_XDP_NDO,
};
struct prueth_swdata {
@@ -155,6 +167,7 @@ struct prueth_swdata {
struct page *page;
u32 cmd;
struct xdp_frame *xdpf;
struct xdp_buff *xdp;
} data;
};
@@ -241,6 +254,7 @@ struct prueth_emac {
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
struct bpf_prog *xdp_prog;
struct xdp_attachment_info xdpi;
int xsk_qid;
};
/* The buf includes headroom compatible with both skb and xdpf */
@@ -499,7 +513,14 @@ void prueth_put_cores(struct prueth *prueth, int slice);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
struct page *page,
unsigned int q_idx);
unsigned int q_idx,
enum prueth_tx_buff_type buff_type);
void prueth_rx_cleanup(void *data, dma_addr_t desc_dma);
void prueth_tx_cleanup(void *data, dma_addr_t desc_dma);
int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags);
static inline bool prueth_xdp_is_enabled(struct prueth_emac *emac)
{
return !!READ_ONCE(emac->xdp_prog);
}
#endif /* __NET_TI_ICSSG_PRUETH_H */