mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/amdgpu: avoid memory allocation in the critical code path v3
When we run out of VMIDs we need to wait for some to become available. Previously we were using a dma_fence_array for that, but this means that we have to allocate memory. Instead just wait for the first not signaled fence from the least recently used VMID to signal. That is not as efficient since we end up in this function multiple times again, but allocating memory can easily fail or deadlock if we have to wait for memory to become available. v2: remove now unused VM manager fields v3: fix dma_fence reference Signed-off-by: Christian König <christian.koenig@amd.com> Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4258 Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
f8bdb559c0
commit
20459c098d
@@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->vm_hub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct dma_fence **fences;
|
||||
unsigned i;
|
||||
|
||||
/* If anybody is waiting for a VMID let everybody wait for fairness */
|
||||
if (!dma_fence_is_signaled(ring->vmid_wait)) {
|
||||
*fence = dma_fence_get(ring->vmid_wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
|
||||
if (!fences)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Check if we have an idle VMID */
|
||||
i = 0;
|
||||
list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
|
||||
list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
|
||||
/* Don't use per engine and per process VMID at the same time */
|
||||
struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
|
||||
NULL : ring;
|
||||
|
||||
fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
|
||||
if (!fences[i])
|
||||
break;
|
||||
++i;
|
||||
*fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
|
||||
if (!(*fence))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If we can't find a idle VMID to use, wait till one becomes available */
|
||||
if (&(*idle)->list == &id_mgr->ids_lru) {
|
||||
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
|
||||
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
|
||||
struct dma_fence_array *array;
|
||||
unsigned j;
|
||||
|
||||
*idle = NULL;
|
||||
for (j = 0; j < i; ++j)
|
||||
dma_fence_get(fences[j]);
|
||||
|
||||
array = dma_fence_array_create(i, fences, fence_context,
|
||||
seqno, true);
|
||||
if (!array) {
|
||||
for (j = 0; j < i; ++j)
|
||||
dma_fence_put(fences[j]);
|
||||
kfree(fences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*fence = dma_fence_get(&array->base);
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
ring->vmid_wait = &array->base;
|
||||
return 0;
|
||||
}
|
||||
kfree(fences);
|
||||
/*
|
||||
* If we can't find a idle VMID to use, wait on a fence from the least
|
||||
* recently used in the hope that it will be available soon.
|
||||
*/
|
||||
*idle = NULL;
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
ring->vmid_wait = dma_fence_get(*fence);
|
||||
|
||||
/* This is the reference we return */
|
||||
dma_fence_get(*fence);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -2843,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
*/
|
||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Concurrent flushes are only possible starting with Vega10 and
|
||||
* are broken on Navi10 and Navi14.
|
||||
*/
|
||||
@@ -2853,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
||||
adev->asic_type == CHIP_NAVI14);
|
||||
amdgpu_vmid_mgr_init(adev);
|
||||
|
||||
adev->vm_manager.fence_context =
|
||||
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
adev->vm_manager.seqno[i] = 0;
|
||||
|
||||
spin_lock_init(&adev->vm_manager.prt_lock);
|
||||
atomic_set(&adev->vm_manager.num_prt_users, 0);
|
||||
|
||||
|
||||
@@ -453,10 +453,6 @@ struct amdgpu_vm_manager {
|
||||
unsigned int first_kfd_vmid;
|
||||
bool concurrent_flush;
|
||||
|
||||
/* Handling of VM fences */
|
||||
u64 fence_context;
|
||||
unsigned seqno[AMDGPU_MAX_RINGS];
|
||||
|
||||
uint64_t max_pfn;
|
||||
uint32_t num_level;
|
||||
uint32_t block_size;
|
||||
|
||||
Reference in New Issue
Block a user