mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
We seem to be missing a netif_running() check from the devmem installation path. Starting a queue on a stopped device makes no sense. We still want to be able to allocate the memory, just to test that the device is indeed setting up the page pools in a memory provider compatible way. This is not a bug fix, because existing drivers check if the interface is down as part of the ops. But new drivers shouldn't have to do this, as long as they can correctly alloc/free while down. Reviewed-by: Mina Almasry <almasrymina@google.com> Link: https://patch.msgid.link/20250206225638.1387810-3-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
155 lines
3.7 KiB
C
155 lines
3.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
#include <linux/netdevice.h>
|
|
#include <net/netdev_queues.h>
|
|
#include <net/netdev_rx_queue.h>
|
|
#include <net/page_pool/memory_provider.h>
|
|
|
|
#include "page_pool_priv.h"
|
|
|
|
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
|
|
{
|
|
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
|
|
const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
|
|
void *new_mem, *old_mem;
|
|
int err;
|
|
|
|
if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
|
|
!qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
|
|
return -EOPNOTSUPP;
|
|
|
|
ASSERT_RTNL();
|
|
|
|
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
|
|
if (!new_mem)
|
|
return -ENOMEM;
|
|
|
|
old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
|
|
if (!old_mem) {
|
|
err = -ENOMEM;
|
|
goto err_free_new_mem;
|
|
}
|
|
|
|
err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
|
|
if (err)
|
|
goto err_free_old_mem;
|
|
|
|
err = page_pool_check_memory_provider(dev, rxq);
|
|
if (err)
|
|
goto err_free_new_queue_mem;
|
|
|
|
if (netif_running(dev)) {
|
|
err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
|
|
if (err)
|
|
goto err_free_new_queue_mem;
|
|
|
|
err = qops->ndo_queue_start(dev, new_mem, rxq_idx);
|
|
if (err)
|
|
goto err_start_queue;
|
|
} else {
|
|
swap(new_mem, old_mem);
|
|
}
|
|
|
|
qops->ndo_queue_mem_free(dev, old_mem);
|
|
|
|
kvfree(old_mem);
|
|
kvfree(new_mem);
|
|
|
|
return 0;
|
|
|
|
err_start_queue:
|
|
/* Restarting the queue with old_mem should be successful as we haven't
|
|
* changed any of the queue configuration, and there is not much we can
|
|
* do to recover from a failure here.
|
|
*
|
|
* WARN if we fail to recover the old rx queue, and at least free
|
|
* old_mem so we don't also leak that.
|
|
*/
|
|
if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) {
|
|
WARN(1,
|
|
"Failed to restart old queue in error path. RX queue %d may be unhealthy.",
|
|
rxq_idx);
|
|
qops->ndo_queue_mem_free(dev, old_mem);
|
|
}
|
|
|
|
err_free_new_queue_mem:
|
|
qops->ndo_queue_mem_free(dev, new_mem);
|
|
|
|
err_free_old_mem:
|
|
kvfree(old_mem);
|
|
|
|
err_free_new_mem:
|
|
kvfree(new_mem);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
|
|
|
|
static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
|
|
struct pp_memory_provider_params *p)
|
|
{
|
|
struct netdev_rx_queue *rxq;
|
|
int ret;
|
|
|
|
if (ifq_idx >= dev->real_num_rx_queues)
|
|
return -EINVAL;
|
|
ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
|
|
|
|
rxq = __netif_get_rx_queue(dev, ifq_idx);
|
|
if (rxq->mp_params.mp_ops)
|
|
return -EEXIST;
|
|
|
|
rxq->mp_params = *p;
|
|
ret = netdev_rx_queue_restart(dev, ifq_idx);
|
|
if (ret) {
|
|
rxq->mp_params.mp_ops = NULL;
|
|
rxq->mp_params.mp_priv = NULL;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
|
|
struct pp_memory_provider_params *p)
|
|
{
|
|
int ret;
|
|
|
|
rtnl_lock();
|
|
ret = __net_mp_open_rxq(dev, ifq_idx, p);
|
|
rtnl_unlock();
|
|
return ret;
|
|
}
|
|
|
|
static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
|
|
struct pp_memory_provider_params *old_p)
|
|
{
|
|
struct netdev_rx_queue *rxq;
|
|
|
|
if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
|
|
return;
|
|
|
|
rxq = __netif_get_rx_queue(dev, ifq_idx);
|
|
|
|
/* Callers holding a netdev ref may get here after we already
|
|
* went thru shutdown via dev_memory_provider_uninstall().
|
|
*/
|
|
if (dev->reg_state > NETREG_REGISTERED &&
|
|
!rxq->mp_params.mp_ops)
|
|
return;
|
|
|
|
if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
|
|
rxq->mp_params.mp_priv != old_p->mp_priv))
|
|
return;
|
|
|
|
rxq->mp_params.mp_ops = NULL;
|
|
rxq->mp_params.mp_priv = NULL;
|
|
WARN_ON(netdev_rx_queue_restart(dev, ifq_idx));
|
|
}
|
|
|
|
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
|
|
struct pp_memory_provider_params *old_p)
|
|
{
|
|
rtnl_lock();
|
|
__net_mp_close_rxq(dev, ifq_idx, old_p);
|
|
rtnl_unlock();
|
|
}
|