drm/nouveau/uvmm: Allow larger pages

Now that everything in UVMM knows about the variable page shift, we can
select larger values.

The proposed approach relies on nouveau_bo::page unless if it would cause
alignment issues (in which case we fall back to searching for an
appropriate shift)

Signed-off-by: Mary Guillemard <mary@mary.zone>
Co-developed-by: Mohamed Ahmed <mohamedahmedegypt2001@gmail.com>
Signed-off-by: Mohamed Ahmed <mohamedahmedegypt2001@gmail.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: James Jones <jajones@nvidia.com>
Signed-off-by: Lyude Paul <lyude@redhat.com>
Link: https://patch.msgid.link/20251110-nouveau-compv6-v6-2-83b05475f57c@mary.zone
This commit is contained in:
Mary Guillemard
2025-11-10 16:32:57 +01:00
committed by Lyude Paul
parent 6d13495325
commit c488a94e7e

View File

@@ -454,6 +454,62 @@ op_unmap_prepare_unwind(struct drm_gpuva *va)
drm_gpuva_insert(va->vm, va);
}
static bool
op_map_aligned_to_page_shift(const struct drm_gpuva_op_map *op, u8 page_shift)
{
u64 non_page_bits = (1ULL << page_shift) - 1;
return (op->va.addr & non_page_bits) == 0 &&
(op->va.range & non_page_bits) == 0 &&
(op->gem.offset & non_page_bits) == 0;
}
static u8
select_page_shift(struct nouveau_uvmm *uvmm, struct drm_gpuva_op_map *op)
{
struct nouveau_bo *nvbo = nouveau_gem_object(op->gem.obj);
/* nouveau_bo_fixup_align() guarantees that the page size will be aligned
* for most cases, but it can't handle cases where userspace allocates with
* a size and then binds with a smaller granularity. So in order to avoid
* breaking old userspace, we need to ensure that the VA is actually
* aligned before using it, and if it isn't, then we downgrade to the first
* granularity that will fit, which is optimal from a correctness and
* performance perspective.
*/
if (op_map_aligned_to_page_shift(op, nvbo->page))
return nvbo->page;
struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
int i;
/* If the given granularity doesn't fit, let's find one that will fit. */
for (i = 0; i < vmm->page_nr; i++) {
/* Ignore anything that is bigger or identical to the BO preference. */
if (vmm->page[i].shift >= nvbo->page)
continue;
/* Skip incompatible domains. */
if ((mem->mem.type & NVIF_MEM_VRAM) && !vmm->page[i].vram)
continue;
if ((mem->mem.type & NVIF_MEM_HOST) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* If it fits, return the proposed shift. */
if (op_map_aligned_to_page_shift(op, vmm->page[i].shift))
return vmm->page[i].shift;
}
/* If we get here then nothing can reconcile the requirements. This should never
* happen.
*/
drm_WARN_ONCE(op->gem.obj->dev, 1, "Could not find an appropriate page size.\n");
return PAGE_SHIFT;
}
static void
nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
@@ -506,7 +562,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (vmm_get_range)
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
vmm_get_range,
PAGE_SHIFT);
select_page_shift(uvmm, &op->map));
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -599,7 +655,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
uvma->region = args->region;
uvma->kind = args->kind;
uvma->page_shift = PAGE_SHIFT;
uvma->page_shift = select_page_shift(uvmm, op);
drm_gpuva_map(&uvmm->base, &uvma->va, op);