mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/panfrost: Handle page mapping failure
When mapping the pages of a BO, either a heap type at page fault time or else a non-heap BO at object creation time, if the ARM page table mapping function fails, we unmap what had been mapped so far and bail out. Reviewed-by: Steven Price <steven.price@arm.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com> Link: https://lore.kernel.org/r/20251019145225.3621989-7-adrian.larumbe@collabora.com Signed-off-by: Steven Price <steven.price@arm.com>
This commit is contained in:
committed by
Steven Price
parent
3d7c626716
commit
4da352cf8f
@@ -395,13 +395,32 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
||||
pm_runtime_put_autosuspend(pfdev->base.dev);
|
||||
}
|
||||
|
||||
static void mmu_unmap_range(struct panfrost_mmu *mmu, u64 iova, size_t len)
|
||||
{
|
||||
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
|
||||
size_t pgsize, unmapped_len = 0;
|
||||
size_t unmapped_page, pgcount;
|
||||
|
||||
while (unmapped_len < len) {
|
||||
pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
|
||||
|
||||
unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
|
||||
WARN_ON(unmapped_page != pgsize * pgcount);
|
||||
|
||||
iova += pgsize * pgcount;
|
||||
unmapped_len += pgsize * pgcount;
|
||||
}
|
||||
}
|
||||
|
||||
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
||||
u64 iova, int prot, struct sg_table *sgt)
|
||||
{
|
||||
unsigned int count;
|
||||
struct scatterlist *sgl;
|
||||
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
|
||||
size_t total_mapped = 0;
|
||||
u64 start_iova = iova;
|
||||
int ret;
|
||||
|
||||
for_each_sgtable_dma_sg(sgt, sgl, count) {
|
||||
unsigned long paddr = sg_dma_address(sgl);
|
||||
@@ -415,10 +434,14 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
||||
size_t pgcount, mapped = 0;
|
||||
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
|
||||
|
||||
ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
|
||||
ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
|
||||
GFP_KERNEL, &mapped);
|
||||
if (ret)
|
||||
goto err_unmap_pages;
|
||||
|
||||
/* Don't get stuck if things have gone wrong */
|
||||
mapped = max(mapped, pgsize);
|
||||
total_mapped += mapped;
|
||||
iova += mapped;
|
||||
paddr += mapped;
|
||||
len -= mapped;
|
||||
@@ -428,6 +451,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
||||
panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap_pages:
|
||||
mmu_unmap_range(mmu, start_iova, total_mapped);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
|
||||
@@ -438,6 +465,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
|
||||
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
|
||||
struct sg_table *sgt;
|
||||
int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(mapping->active))
|
||||
return 0;
|
||||
@@ -449,11 +477,18 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
|
||||
if (WARN_ON(IS_ERR(sgt)))
|
||||
return PTR_ERR(sgt);
|
||||
|
||||
mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
|
||||
prot, sgt);
|
||||
ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
|
||||
prot, sgt);
|
||||
if (ret)
|
||||
goto err_put_pages;
|
||||
|
||||
mapping->active = true;
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_pages:
|
||||
drm_gem_shmem_put_pages_locked(shmem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
|
||||
@@ -638,8 +673,10 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
|
||||
if (ret)
|
||||
goto err_map;
|
||||
|
||||
mmu_map_sg(pfdev, bomapping->mmu, addr,
|
||||
IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
|
||||
ret = mmu_map_sg(pfdev, bomapping->mmu, addr,
|
||||
IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
|
||||
if (ret)
|
||||
goto err_mmu_map_sg;
|
||||
|
||||
bomapping->active = true;
|
||||
bo->heap_rss_size += SZ_2M;
|
||||
@@ -653,6 +690,8 @@ out:
|
||||
|
||||
return 0;
|
||||
|
||||
err_mmu_map_sg:
|
||||
dma_unmap_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0);
|
||||
err_map:
|
||||
sg_free_table(sgt);
|
||||
err_unlock:
|
||||
|
||||
Reference in New Issue
Block a user