Merge branch 'net-fec-do-some-cleanup-for-the-driver'

Wei Fang says:

====================
net: fec: do some cleanup for the driver

This patch set removes some unnecessary or invalid code from the FEC
driver. See each patch for details.
====================

Link: https://patch.msgid.link/20251119025148.2817602-1-wei.fang@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-11-20 18:40:15 -08:00
2 changed files with 32 additions and 68 deletions

View File

@@ -24,9 +24,7 @@
#include <linux/timecounter.h>
#include <net/xdp.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -242,23 +240,6 @@ struct bufdesc_ex {
__fec16 res0[4];
};
/*
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
*/
#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
#define BD_SC_BR ((ushort)0x0020) /* Break received */
#define BD_SC_FR ((ushort)0x0010) /* Framing error */
#define BD_SC_PR ((ushort)0x0008) /* Parity error */
#define BD_SC_OV ((ushort)0x0002) /* Overrun */
#define BD_SC_CD ((ushort)0x0001) /* ?? */
/* Buffer descriptor control/status used by Ethernet receive.
*/
#define BD_ENET_RX_EMPTY ((ushort)0x8000)
@@ -530,12 +511,6 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
struct fec_enet_priv_txrx_info {
int offset;
struct page *page;
struct sk_buff *skb;
};
enum {
RX_XDP_REDIRECT = 0,
RX_XDP_PASS,
@@ -575,7 +550,7 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
struct page *rx_buf[RX_RING_SIZE];
/* page_pool */
struct page_pool *page_pool;
@@ -668,7 +643,6 @@ struct fec_enet_private {
struct pm_qos_request pm_qos_req;
unsigned int tx_align;
unsigned int rx_align;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;

View File

@@ -253,9 +253,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
defined(CONFIG_ARM64)
#ifndef CONFIG_M5272
#define OPT_ARCH_HAS_MAX_FL 1
#else
#define OPT_ARCH_HAS_MAX_FL 0
@@ -1012,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
rxq->bd.cur = rxq->bd.base;
}
@@ -1062,7 +1060,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -1657,8 +1655,7 @@ static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
if (unlikely(!new_page))
return -ENOMEM;
rxq->rx_skb_info[index].page = new_page;
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
rxq->rx_buf[index] = new_page;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
@@ -1773,7 +1770,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
__fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272)
/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
* FEC_RACC_SHIFT16 is set by default in the probe function.
*/
@@ -1781,7 +1777,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
data_start += 2;
sub_len += 2;
}
#endif
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
@@ -1840,7 +1835,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
page = rxq->rx_buf[index];
cbd_bufaddr = bdp->cbd_bufaddr;
if (fec_enet_update_cbd(rxq, bdp, index)) {
ndev->stats.rx_dropped++;
@@ -2517,9 +2512,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_set_max_speed(phy_dev, 1000);
phy_remove_link_mode(phy_dev,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
#if !defined(CONFIG_M5272)
phy_support_sym_pause(phy_dev);
#endif
}
else
phy_set_max_speed(phy_dev, 100);
@@ -2710,9 +2703,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
}
/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
static __u32 fec_enet_register_version = 2;
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
@@ -2786,30 +2777,22 @@ static u32 fec_enet_register_offset[] = {
static void fec_enet_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *regbuf)
{
u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
u32 *reg_list = fec_enet_register_offset;
struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
u32 *reg_list;
u32 reg_cnt;
if (!of_machine_is_compatible("fsl,imx6ul")) {
reg_list = fec_enet_register_offset;
reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
} else {
#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
if (of_machine_is_compatible("fsl,imx6ul")) {
reg_list = fec_enet_register_offset_6ul;
reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
}
#else
/* coldfire */
static u32 *reg_list = fec_enet_register_offset;
static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
#endif
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -3328,7 +3311,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
for (i = 0; i < rxq->bd.ring_size; i++)
page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
@@ -3454,6 +3438,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
return err;
}
/* Some platforms require the RX buffer must be 64 bytes alignment.
* Some platforms require 16 bytes alignment. And some platforms
* require 4 bytes alignment. But since the page pool have been
* introduced into the driver, the address of RX buffer is always
* the page address plus FEC_ENET_XDP_HEADROOM, and
* FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can
* satisfy all platforms. To prevent future modifications to
* FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a
* BUILD_BUG_ON() test has been added, which ensures that
* FEC_ENET_XDP_HEADROOM provides the required alignment.
*/
BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f);
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -3462,8 +3459,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
rxq->rx_skb_info[i].page = page;
rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
rxq->rx_buf[i] = page;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3476,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
return 0;
err_alloc:
@@ -3512,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
return 0;
@@ -4089,10 +4085,8 @@ static int fec_enet_init(struct net_device *ndev)
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
@@ -4181,10 +4175,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES)
fep->tx_align = 0;
fep->rx_align = 0x3f;
}
ndev->hw_features = ndev->features;
@@ -4402,11 +4394,9 @@ fec_probe(struct platform_device *pdev)
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);