mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
ARM: dma-mapping: Switch to physical address mapping callbacks
Combine resource and page mappings routines to one function, which handles both these flows at the same manner. This conversion allows us to remove .map_resource/.unmap_resource callbacks completely. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-4-3bbfe3a25cdf@kernel.org
This commit is contained in:
committed by
Marek Szyprowski
parent
52c9aa1adc
commit
50b149be07
@@ -732,6 +732,9 @@ static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
|
||||
if (attrs & DMA_ATTR_PRIVILEGED)
|
||||
prot |= IOMMU_PRIV;
|
||||
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
prot |= IOMMU_MMIO;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
return prot | IOMMU_READ | IOMMU_WRITE;
|
||||
@@ -1350,25 +1353,27 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_map_page
|
||||
* arm_iommu_map_phys
|
||||
* @dev: valid struct device pointer
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @phys: physical address that buffer resides in
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
* @attrs: DMA mapping attributes
|
||||
*
|
||||
* IOMMU aware version of arm_dma_map_page()
|
||||
*/
|
||||
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
static dma_addr_t arm_iommu_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
int len = PAGE_ALIGN(size + offset_in_page(phys));
|
||||
phys_addr_t addr = phys & PAGE_MASK;
|
||||
dma_addr_t dma_addr;
|
||||
int ret, prot, len = PAGE_ALIGN(size + offset);
|
||||
int ret, prot;
|
||||
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(page_to_phys(page), offset, size, dir);
|
||||
if (!dev->dma_coherent &&
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
@@ -1376,12 +1381,11 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
|
||||
prot, GFP_KERNEL);
|
||||
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
return dma_addr + offset;
|
||||
return dma_addr + offset_in_page(phys);
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_MAPPING_ERROR;
|
||||
@@ -1393,10 +1397,11 @@ fail:
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_page)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
||||
* @attrs: DMA mapping attributes
|
||||
*
|
||||
* IOMMU aware version of arm_dma_unmap_page()
|
||||
* IOMMU aware version of arm_dma_unmap_phys()
|
||||
*/
|
||||
static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
static void arm_iommu_unmap_phys(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
@@ -1407,7 +1412,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
if (!iova)
|
||||
return;
|
||||
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
if (!dev->dma_coherent &&
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
|
||||
phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova);
|
||||
|
||||
arch_sync_dma_for_cpu(phys + offset, size, dir);
|
||||
@@ -1417,63 +1423,6 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
__free_iova(mapping, iova, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_map_resource - map a device resource for DMA
|
||||
* @dev: valid struct device pointer
|
||||
* @phys_addr: physical address of resource
|
||||
* @size: size of resource to map
|
||||
* @dir: DMA transfer direction
|
||||
*/
|
||||
static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t dma_addr;
|
||||
int ret, prot;
|
||||
phys_addr_t addr = phys_addr & PAGE_MASK;
|
||||
unsigned int offset = phys_addr & ~PAGE_MASK;
|
||||
size_t len = PAGE_ALIGN(size + offset);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
return dma_addr + offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_unmap_resource - unmap a device DMA resource
|
||||
* @dev: valid struct device pointer
|
||||
* @dma_handle: DMA address to resource
|
||||
* @size: size of resource to map
|
||||
* @dir: DMA transfer direction
|
||||
*/
|
||||
static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = dma_handle & PAGE_MASK;
|
||||
unsigned int offset = dma_handle & ~PAGE_MASK;
|
||||
size_t len = PAGE_ALIGN(size + offset);
|
||||
|
||||
if (!iova)
|
||||
return;
|
||||
|
||||
iommu_unmap(mapping->domain, iova, len);
|
||||
__free_iova(mapping, iova, len);
|
||||
}
|
||||
|
||||
static void arm_iommu_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
@@ -1510,8 +1459,8 @@ static const struct dma_map_ops iommu_ops = {
|
||||
.mmap = arm_iommu_mmap_attrs,
|
||||
.get_sgtable = arm_iommu_get_sgtable,
|
||||
|
||||
.map_page = arm_iommu_map_page,
|
||||
.unmap_page = arm_iommu_unmap_page,
|
||||
.map_phys = arm_iommu_map_phys,
|
||||
.unmap_phys = arm_iommu_unmap_phys,
|
||||
.sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
|
||||
.sync_single_for_device = arm_iommu_sync_single_for_device,
|
||||
|
||||
@@ -1519,9 +1468,6 @@ static const struct dma_map_ops iommu_ops = {
|
||||
.unmap_sg = arm_iommu_unmap_sg,
|
||||
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
|
||||
|
||||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user