mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
svcrdma: Release transport resources synchronously
NFSD has always supported added network listeners. The new netlink protocol now enables the removal of listeners. Olga noticed that if an RDMA listener is removed and immediately re-added, the deferred __svc_rdma_free() function might not have run yet, so some or all of the old listener's RDMA resources linger, which prevents a new listener on the same address from being created. Also, svc_xprt_free() does a module_put() just after calling ->xpo_free(). That means if there is deferred work going on, the module could be unloaded before that work is even started, resulting in a UAF. Neil asks: > What particular part of __svc_rdma_free() needs to run in order for a > subsequent registration to succeed? > Can that bit be run directory from svc_rdma_free() rather than be > delayed? > (I know almost nothing about rdma so forgive me if the answers to these > questions seems obvious) The reasons I can recall are: - Some of the transport tear-down work can sleep - Releasing a cm_id is tricky and can deadlock We might be able to mitigate the second issue with judicious application of transport reference counting. Reported-by: Olga Kornievskaia <okorniev@redhat.com> Closes: https://lore.kernel.org/linux-nfs/20250821204328.89218-1-okorniev@redhat.com/ Suggested-by: NeilBrown <neil@brown.name> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
@@ -591,12 +591,18 @@ static void svc_rdma_detach(struct svc_xprt *xprt)
|
||||
rdma_disconnect(rdma->sc_cm_id);
|
||||
}
|
||||
|
||||
static void __svc_rdma_free(struct work_struct *work)
|
||||
/**
|
||||
* svc_rdma_free - Release class-specific transport resources
|
||||
* @xprt: Generic svc transport object
|
||||
*/
|
||||
static void svc_rdma_free(struct svc_xprt *xprt)
|
||||
{
|
||||
struct svcxprt_rdma *rdma =
|
||||
container_of(work, struct svcxprt_rdma, sc_work);
|
||||
container_of(xprt, struct svcxprt_rdma, sc_xprt);
|
||||
struct ib_device *device = rdma->sc_cm_id->device;
|
||||
|
||||
might_sleep();
|
||||
|
||||
/* This blocks until the Completion Queues are empty */
|
||||
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
|
||||
ib_drain_qp(rdma->sc_qp);
|
||||
@@ -629,15 +635,6 @@ static void __svc_rdma_free(struct work_struct *work)
|
||||
kfree(rdma);
|
||||
}
|
||||
|
||||
static void svc_rdma_free(struct svc_xprt *xprt)
|
||||
{
|
||||
struct svcxprt_rdma *rdma =
|
||||
container_of(xprt, struct svcxprt_rdma, sc_xprt);
|
||||
|
||||
INIT_WORK(&rdma->sc_work, __svc_rdma_free);
|
||||
schedule_work(&rdma->sc_work);
|
||||
}
|
||||
|
||||
static int svc_rdma_has_wspace(struct svc_xprt *xprt)
|
||||
{
|
||||
struct svcxprt_rdma *rdma =
|
||||
|
||||
Reference in New Issue
Block a user