mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
xen: swiotlb: Switch to physical address mapping callbacks
Combine resource and page mappings routines to one function and remove .map_resource/.unmap_resource callbacks completely. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-5-3bbfe3a25cdf@kernel.org
This commit is contained in:
committed by
Marek Szyprowski
parent
50b149be07
commit
af85de5a9f
@@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
* physical address to use is returned.
|
||||
*
|
||||
* Once the device is given the dma address, the device owns this memory until
|
||||
* either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
|
||||
* either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed.
|
||||
*/
|
||||
static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t map, phys = page_to_phys(page) + offset;
|
||||
dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
|
||||
dma_addr_t dev_addr;
|
||||
phys_addr_t map;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
if (attrs & DMA_ATTR_MMIO) {
|
||||
if (unlikely(!dma_capable(dev, phys, size, false))) {
|
||||
dev_err_once(
|
||||
dev,
|
||||
"DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&phys, size, *dev->dma_mask,
|
||||
dev->bus_dma_limit);
|
||||
WARN_ON_ONCE(1);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
return phys;
|
||||
}
|
||||
|
||||
dev_addr = xen_phys_to_dma(dev, phys);
|
||||
|
||||
/*
|
||||
* If the address happens to be in the device's DMA window,
|
||||
* we can safely return the device addr and not worry about bounce
|
||||
@@ -257,13 +272,13 @@ done:
|
||||
|
||||
/*
|
||||
* Unmap a single streaming mode DMA translation. The dma_addr and size must
|
||||
* match what was provided for in a previous xen_swiotlb_map_page call. All
|
||||
* match what was provided for in a previous xen_swiotlb_map_phys call. All
|
||||
* other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the cpu to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
||||
static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
|
||||
@@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
|
||||
|
||||
/*
|
||||
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
|
||||
* concerning calls here are the same as for swiotlb_unmap_page() above.
|
||||
* concerning calls here are the same as for swiotlb_unmap_phys() above.
|
||||
*/
|
||||
static void
|
||||
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
@@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
|
||||
xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg),
|
||||
dir, attrs);
|
||||
|
||||
}
|
||||
@@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i) {
|
||||
sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
|
||||
sg->offset, sg->length, dir, attrs);
|
||||
sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg),
|
||||
sg->length, dir, attrs);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
goto out_unmap;
|
||||
sg_dma_len(sg) = sg->length;
|
||||
@@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
||||
}
|
||||
}
|
||||
|
||||
static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
|
||||
phys_addr_t paddr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_addr = paddr;
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
|
||||
dev_err_once(dev,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
WARN_ON_ONCE(1);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return whether the given device DMA address mask can be supported
|
||||
* properly. For example, if your device can only drive the low 24-bits
|
||||
@@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
|
||||
.map_sg = xen_swiotlb_map_sg,
|
||||
.unmap_sg = xen_swiotlb_unmap_sg,
|
||||
.map_page = xen_swiotlb_map_page,
|
||||
.unmap_page = xen_swiotlb_unmap_page,
|
||||
.map_phys = xen_swiotlb_map_phys,
|
||||
.unmap_phys = xen_swiotlb_unmap_phys,
|
||||
.dma_supported = xen_swiotlb_dma_supported,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages_op = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
.max_mapping_size = swiotlb_max_mapping_size,
|
||||
.map_resource = xen_swiotlb_direct_map_resource,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user