mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
parisc: Convert DMA map_page to map_phys interface
Perform mechanical conversion from .map_page to .map_phys callback. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-9-3bbfe3a25cdf@kernel.org
This commit is contained in:
committed by
Marek Szyprowski
parent
e4e3fff66a
commit
96ddf2ef58
@@ -517,10 +517,10 @@ static u32 hint_lookup[] = {
|
|||||||
* ccio_io_pdir_entry - Initialize an I/O Pdir.
|
* ccio_io_pdir_entry - Initialize an I/O Pdir.
|
||||||
* @pdir_ptr: A pointer into I/O Pdir.
|
* @pdir_ptr: A pointer into I/O Pdir.
|
||||||
* @sid: The Space Identifier.
|
* @sid: The Space Identifier.
|
||||||
* @vba: The virtual address.
|
* @pba: The physical address.
|
||||||
* @hints: The DMA Hint.
|
* @hints: The DMA Hint.
|
||||||
*
|
*
|
||||||
* Given a virtual address (vba, arg2) and space id, (sid, arg1),
|
* Given a physical address (pba, arg2) and space id, (sid, arg1),
|
||||||
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
|
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
|
||||||
* entry consists of 8 bytes as shown below (MSB == bit 0):
|
* entry consists of 8 bytes as shown below (MSB == bit 0):
|
||||||
*
|
*
|
||||||
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
|
|||||||
* index are bits 12:19 of the value returned by LCI.
|
* index are bits 12:19 of the value returned by LCI.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
|
||||||
unsigned long hints)
|
unsigned long hints)
|
||||||
{
|
{
|
||||||
register unsigned long pa;
|
register unsigned long pa;
|
||||||
@@ -557,7 +557,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
|||||||
** "hints" parm includes the VALID bit!
|
** "hints" parm includes the VALID bit!
|
||||||
** "dep" clobbers the physical address offset bits as well.
|
** "dep" clobbers the physical address offset bits as well.
|
||||||
*/
|
*/
|
||||||
pa = lpa(vba);
|
pa = pba;
|
||||||
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
|
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
|
||||||
((u32 *)pdir_ptr)[1] = (u32) pa;
|
((u32 *)pdir_ptr)[1] = (u32) pa;
|
||||||
|
|
||||||
@@ -582,7 +582,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
|||||||
** Grab virtual index [0:11]
|
** Grab virtual index [0:11]
|
||||||
** Deposit virt_idx bits into I/O PDIR word
|
** Deposit virt_idx bits into I/O PDIR word
|
||||||
*/
|
*/
|
||||||
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
|
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
|
||||||
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
|
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
|
||||||
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
|
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
|
||||||
|
|
||||||
@@ -704,14 +704,14 @@ ccio_dma_supported(struct device *dev, u64 mask)
|
|||||||
/**
|
/**
|
||||||
* ccio_map_single - Map an address range into the IOMMU.
|
* ccio_map_single - Map an address range into the IOMMU.
|
||||||
* @dev: The PCI device.
|
* @dev: The PCI device.
|
||||||
* @addr: The start address of the DMA region.
|
* @addr: The physical address of the DMA region.
|
||||||
* @size: The length of the DMA region.
|
* @size: The length of the DMA region.
|
||||||
* @direction: The direction of the DMA transaction (to/from device).
|
* @direction: The direction of the DMA transaction (to/from device).
|
||||||
*
|
*
|
||||||
* This function implements the pci_map_single function.
|
* This function implements the pci_map_single function.
|
||||||
*/
|
*/
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
ccio_map_single(struct device *dev, void *addr, size_t size,
|
ccio_map_single(struct device *dev, phys_addr_t addr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
@@ -730,7 +730,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
|||||||
BUG_ON(size <= 0);
|
BUG_ON(size <= 0);
|
||||||
|
|
||||||
/* save offset bits */
|
/* save offset bits */
|
||||||
offset = ((unsigned long) addr) & ~IOVP_MASK;
|
offset = offset_in_page(addr);
|
||||||
|
|
||||||
/* round up to nearest IOVP_SIZE */
|
/* round up to nearest IOVP_SIZE */
|
||||||
size = ALIGN(size + offset, IOVP_SIZE);
|
size = ALIGN(size + offset, IOVP_SIZE);
|
||||||
@@ -746,15 +746,15 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
|||||||
|
|
||||||
pdir_start = &(ioc->pdir_base[idx]);
|
pdir_start = &(ioc->pdir_base[idx]);
|
||||||
|
|
||||||
DBG_RUN("%s() %px -> %#lx size: %zu\n",
|
DBG_RUN("%s() %pa -> %#lx size: %zu\n",
|
||||||
__func__, addr, (long)(iovp | offset), size);
|
__func__, &addr, (long)(iovp | offset), size);
|
||||||
|
|
||||||
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
|
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
|
||||||
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
|
if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES))
|
||||||
hint |= HINT_SAFE_DMA;
|
hint |= HINT_SAFE_DMA;
|
||||||
|
|
||||||
while(size > 0) {
|
while(size > 0) {
|
||||||
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
|
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
|
||||||
|
|
||||||
DBG_RUN(" pdir %p %08x%08x\n",
|
DBG_RUN(" pdir %p %08x%08x\n",
|
||||||
pdir_start,
|
pdir_start,
|
||||||
@@ -773,17 +773,18 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
|||||||
|
|
||||||
|
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
|
ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||||
size_t size, enum dma_data_direction direction,
|
enum dma_data_direction direction, unsigned long attrs)
|
||||||
unsigned long attrs)
|
|
||||||
{
|
{
|
||||||
return ccio_map_single(dev, page_address(page) + offset, size,
|
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||||
direction);
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
|
return ccio_map_single(dev, phys, size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ccio_unmap_page - Unmap an address range from the IOMMU.
|
* ccio_unmap_phys - Unmap an address range from the IOMMU.
|
||||||
* @dev: The PCI device.
|
* @dev: The PCI device.
|
||||||
* @iova: The start address of the DMA region.
|
* @iova: The start address of the DMA region.
|
||||||
* @size: The length of the DMA region.
|
* @size: The length of the DMA region.
|
||||||
@@ -791,7 +792,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
|
|||||||
* @attrs: attributes
|
* @attrs: attributes
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
|
||||||
enum dma_data_direction direction, unsigned long attrs)
|
enum dma_data_direction direction, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct ioc *ioc;
|
struct ioc *ioc;
|
||||||
@@ -853,7 +854,8 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
|
|||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
*dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
|
*dma_handle = ccio_map_single(dev, virt_to_phys(ret), size,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -873,7 +875,7 @@ static void
|
|||||||
ccio_free(struct device *dev, size_t size, void *cpu_addr,
|
ccio_free(struct device *dev, size_t size, void *cpu_addr,
|
||||||
dma_addr_t dma_handle, unsigned long attrs)
|
dma_addr_t dma_handle, unsigned long attrs)
|
||||||
{
|
{
|
||||||
ccio_unmap_page(dev, dma_handle, size, 0, 0);
|
ccio_unmap_phys(dev, dma_handle, size, 0, 0);
|
||||||
free_pages((unsigned long)cpu_addr, get_order(size));
|
free_pages((unsigned long)cpu_addr, get_order(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -920,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
/* Fast path single entry scatterlists. */
|
/* Fast path single entry scatterlists. */
|
||||||
if (nents == 1) {
|
if (nents == 1) {
|
||||||
sg_dma_address(sglist) = ccio_map_single(dev,
|
sg_dma_address(sglist) = ccio_map_single(dev,
|
||||||
sg_virt(sglist), sglist->length,
|
sg_phys(sglist), sglist->length,
|
||||||
direction);
|
direction);
|
||||||
sg_dma_len(sglist) = sglist->length;
|
sg_dma_len(sglist) = sglist->length;
|
||||||
return 1;
|
return 1;
|
||||||
@@ -1004,7 +1006,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
#ifdef CCIO_COLLECT_STATS
|
#ifdef CCIO_COLLECT_STATS
|
||||||
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
|
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
|
||||||
#endif
|
#endif
|
||||||
ccio_unmap_page(dev, sg_dma_address(sglist),
|
ccio_unmap_phys(dev, sg_dma_address(sglist),
|
||||||
sg_dma_len(sglist), direction, 0);
|
sg_dma_len(sglist), direction, 0);
|
||||||
++sglist;
|
++sglist;
|
||||||
nents--;
|
nents--;
|
||||||
@@ -1017,8 +1019,8 @@ static const struct dma_map_ops ccio_ops = {
|
|||||||
.dma_supported = ccio_dma_supported,
|
.dma_supported = ccio_dma_supported,
|
||||||
.alloc = ccio_alloc,
|
.alloc = ccio_alloc,
|
||||||
.free = ccio_free,
|
.free = ccio_free,
|
||||||
.map_page = ccio_map_page,
|
.map_phys = ccio_map_phys,
|
||||||
.unmap_page = ccio_unmap_page,
|
.unmap_phys = ccio_unmap_phys,
|
||||||
.map_sg = ccio_map_sg,
|
.map_sg = ccio_map_sg,
|
||||||
.unmap_sg = ccio_unmap_sg,
|
.unmap_sg = ccio_unmap_sg,
|
||||||
.get_sgtable = dma_common_get_sgtable,
|
.get_sgtable = dma_common_get_sgtable,
|
||||||
@@ -1072,7 +1074,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
|
|||||||
ioc->msingle_calls, ioc->msingle_pages,
|
ioc->msingle_calls, ioc->msingle_pages,
|
||||||
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
|
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
|
||||||
|
|
||||||
/* KLUGE - unmap_sg calls unmap_page for each mapped page */
|
/* KLUGE - unmap_sg calls unmap_phys for each mapped page */
|
||||||
min = ioc->usingle_calls - ioc->usg_calls;
|
min = ioc->usingle_calls - ioc->usg_calls;
|
||||||
max = ioc->usingle_pages - ioc->usg_pages;
|
max = ioc->usingle_pages - ioc->usg_pages;
|
||||||
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
|
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
static inline unsigned int
|
static inline unsigned int
|
||||||
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
||||||
unsigned long hint,
|
unsigned long hint,
|
||||||
void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
|
void (*iommu_io_pdir_entry)(__le64 *, space_t, phys_addr_t,
|
||||||
unsigned long))
|
unsigned long))
|
||||||
{
|
{
|
||||||
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
|
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
|
||||||
@@ -28,7 +28,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|||||||
dma_sg--;
|
dma_sg--;
|
||||||
|
|
||||||
while (nents-- > 0) {
|
while (nents-- > 0) {
|
||||||
unsigned long vaddr;
|
phys_addr_t paddr;
|
||||||
long size;
|
long size;
|
||||||
|
|
||||||
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
|
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
|
||||||
@@ -67,7 +67,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|||||||
|
|
||||||
BUG_ON(pdirp == NULL);
|
BUG_ON(pdirp == NULL);
|
||||||
|
|
||||||
vaddr = (unsigned long)sg_virt(startsg);
|
paddr = sg_phys(startsg);
|
||||||
sg_dma_len(dma_sg) += startsg->length;
|
sg_dma_len(dma_sg) += startsg->length;
|
||||||
size = startsg->length + dma_offset;
|
size = startsg->length + dma_offset;
|
||||||
dma_offset = 0;
|
dma_offset = 0;
|
||||||
@@ -76,8 +76,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|||||||
#endif
|
#endif
|
||||||
do {
|
do {
|
||||||
iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
|
iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
|
||||||
vaddr, hint);
|
paddr, hint);
|
||||||
vaddr += IOVP_SIZE;
|
paddr += IOVP_SIZE;
|
||||||
size -= IOVP_SIZE;
|
size -= IOVP_SIZE;
|
||||||
pdirp++;
|
pdirp++;
|
||||||
} while(unlikely(size > 0));
|
} while(unlikely(size > 0));
|
||||||
|
|||||||
@@ -532,7 +532,7 @@ typedef unsigned long space_t;
|
|||||||
* sba_io_pdir_entry - fill in one IO PDIR entry
|
* sba_io_pdir_entry - fill in one IO PDIR entry
|
||||||
* @pdir_ptr: pointer to IO PDIR entry
|
* @pdir_ptr: pointer to IO PDIR entry
|
||||||
* @sid: process Space ID - currently only support KERNEL_SPACE
|
* @sid: process Space ID - currently only support KERNEL_SPACE
|
||||||
* @vba: Virtual CPU address of buffer to map
|
* @pba: Physical address of buffer to map
|
||||||
* @hint: DMA hint set to use for this mapping
|
* @hint: DMA hint set to use for this mapping
|
||||||
*
|
*
|
||||||
* SBA Mapping Routine
|
* SBA Mapping Routine
|
||||||
@@ -569,20 +569,17 @@ typedef unsigned long space_t;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static void
|
static void
|
||||||
sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
|
sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
|
||||||
unsigned long hint)
|
unsigned long hint)
|
||||||
{
|
{
|
||||||
u64 pa; /* physical address */
|
|
||||||
register unsigned ci; /* coherent index */
|
register unsigned ci; /* coherent index */
|
||||||
|
|
||||||
pa = lpa(vba);
|
asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
|
||||||
pa &= IOVP_MASK;
|
pba &= IOVP_MASK;
|
||||||
|
pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
|
||||||
|
|
||||||
asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
|
pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||||
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
|
*pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */
|
||||||
|
|
||||||
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
|
||||||
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
|
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
|
||||||
@@ -707,7 +704,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
|
|||||||
* See Documentation/core-api/dma-api-howto.rst
|
* See Documentation/core-api/dma-api-howto.rst
|
||||||
*/
|
*/
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
sba_map_single(struct device *dev, void *addr, size_t size,
|
sba_map_single(struct device *dev, phys_addr_t addr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
struct ioc *ioc;
|
struct ioc *ioc;
|
||||||
@@ -722,7 +719,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
|||||||
return DMA_MAPPING_ERROR;
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
/* save offset bits */
|
/* save offset bits */
|
||||||
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
|
offset = offset_in_page(addr);
|
||||||
|
|
||||||
/* round up to nearest IOVP_SIZE */
|
/* round up to nearest IOVP_SIZE */
|
||||||
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
|
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
|
||||||
@@ -739,13 +736,13 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
|||||||
pide = sba_alloc_range(ioc, dev, size);
|
pide = sba_alloc_range(ioc, dev, size);
|
||||||
iovp = (dma_addr_t) pide << IOVP_SHIFT;
|
iovp = (dma_addr_t) pide << IOVP_SHIFT;
|
||||||
|
|
||||||
DBG_RUN("%s() 0x%p -> 0x%lx\n",
|
DBG_RUN("%s() 0x%pa -> 0x%lx\n",
|
||||||
__func__, addr, (long) iovp | offset);
|
__func__, &addr, (long) iovp | offset);
|
||||||
|
|
||||||
pdir_start = &(ioc->pdir_base[pide]);
|
pdir_start = &(ioc->pdir_base[pide]);
|
||||||
|
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
|
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, 0);
|
||||||
|
|
||||||
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
|
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
|
||||||
pdir_start,
|
pdir_start,
|
||||||
@@ -778,17 +775,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
|||||||
|
|
||||||
|
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
sba_map_page(struct device *dev, struct page *page, unsigned long offset,
|
sba_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||||
size_t size, enum dma_data_direction direction,
|
enum dma_data_direction direction, unsigned long attrs)
|
||||||
unsigned long attrs)
|
|
||||||
{
|
{
|
||||||
return sba_map_single(dev, page_address(page) + offset, size,
|
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||||
direction);
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
|
return sba_map_single(dev, phys, size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sba_unmap_page - unmap one IOVA and free resources
|
* sba_unmap_phys - unmap one IOVA and free resources
|
||||||
* @dev: instance of PCI owned by the driver that's asking.
|
* @dev: instance of PCI owned by the driver that's asking.
|
||||||
* @iova: IOVA of driver buffer previously mapped.
|
* @iova: IOVA of driver buffer previously mapped.
|
||||||
* @size: number of bytes mapped in driver buffer.
|
* @size: number of bytes mapped in driver buffer.
|
||||||
@@ -798,7 +796,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
|
|||||||
* See Documentation/core-api/dma-api-howto.rst
|
* See Documentation/core-api/dma-api-howto.rst
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
|
||||||
enum dma_data_direction direction, unsigned long attrs)
|
enum dma_data_direction direction, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct ioc *ioc;
|
struct ioc *ioc;
|
||||||
@@ -893,7 +891,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
|
|||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
*dma_handle = sba_map_single(hwdev, ret, size, 0);
|
*dma_handle = sba_map_single(hwdev, virt_to_phys(ret), size, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -914,7 +912,7 @@ static void
|
|||||||
sba_free(struct device *hwdev, size_t size, void *vaddr,
|
sba_free(struct device *hwdev, size_t size, void *vaddr,
|
||||||
dma_addr_t dma_handle, unsigned long attrs)
|
dma_addr_t dma_handle, unsigned long attrs)
|
||||||
{
|
{
|
||||||
sba_unmap_page(hwdev, dma_handle, size, 0, 0);
|
sba_unmap_phys(hwdev, dma_handle, size, 0, 0);
|
||||||
free_pages((unsigned long) vaddr, get_order(size));
|
free_pages((unsigned long) vaddr, get_order(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -962,7 +960,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
|
|
||||||
/* Fast path single entry scatterlists. */
|
/* Fast path single entry scatterlists. */
|
||||||
if (nents == 1) {
|
if (nents == 1) {
|
||||||
sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
|
sg_dma_address(sglist) = sba_map_single(dev, sg_phys(sglist),
|
||||||
sglist->length, direction);
|
sglist->length, direction);
|
||||||
sg_dma_len(sglist) = sglist->length;
|
sg_dma_len(sglist) = sglist->length;
|
||||||
return 1;
|
return 1;
|
||||||
@@ -1061,7 +1059,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
|
|
||||||
while (nents && sg_dma_len(sglist)) {
|
while (nents && sg_dma_len(sglist)) {
|
||||||
|
|
||||||
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
|
sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist),
|
||||||
direction, 0);
|
direction, 0);
|
||||||
#ifdef SBA_COLLECT_STATS
|
#ifdef SBA_COLLECT_STATS
|
||||||
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
|
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
|
||||||
@@ -1085,8 +1083,8 @@ static const struct dma_map_ops sba_ops = {
|
|||||||
.dma_supported = sba_dma_supported,
|
.dma_supported = sba_dma_supported,
|
||||||
.alloc = sba_alloc,
|
.alloc = sba_alloc,
|
||||||
.free = sba_free,
|
.free = sba_free,
|
||||||
.map_page = sba_map_page,
|
.map_phys = sba_map_phys,
|
||||||
.unmap_page = sba_unmap_page,
|
.unmap_phys = sba_unmap_phys,
|
||||||
.map_sg = sba_map_sg,
|
.map_sg = sba_map_sg,
|
||||||
.unmap_sg = sba_unmap_sg,
|
.unmap_sg = sba_unmap_sg,
|
||||||
.get_sgtable = dma_common_get_sgtable,
|
.get_sgtable = dma_common_get_sgtable,
|
||||||
|
|||||||
Reference in New Issue
Block a user