nvme-pci: migrate to dma_map_phys instead of map_page

After introduction of dma_map_phys(), there is no need to convert
from physical address to struct page in order to map page. So let's
use it directly.

Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Leon Romanovsky
2025-11-14 11:07:03 +02:00
committed by Jens Axboe
parent 8e1bf774ab
commit 61d43b1731
2 changed files with 15 additions and 14 deletions

View File

@@ -92,8 +92,8 @@ static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
struct blk_dma_iter *iter, struct phys_vec *vec)
{
iter->addr = dma_map_page(dma_dev, phys_to_page(vec->paddr),
offset_in_page(vec->paddr), vec->len, rq_dma_dir(req));
iter->addr = dma_map_phys(dma_dev, vec->paddr, vec->len,
rq_dma_dir(req), 0);
if (dma_mapping_error(dma_dev, iter->addr)) {
iter->status = BLK_STS_RESOURCE;
return false;

View File

@@ -698,20 +698,20 @@ static void nvme_free_descriptors(struct request *req)
}
}
static void nvme_free_prps(struct request *req)
static void nvme_free_prps(struct request *req, unsigned int attrs)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
unsigned int i;
for (i = 0; i < iod->nr_dma_vecs; i++)
dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr,
iod->dma_vecs[i].len, rq_dma_dir(req));
dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr,
iod->dma_vecs[i].len, rq_dma_dir(req), attrs);
mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
}
static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
struct nvme_sgl_desc *sg_list)
struct nvme_sgl_desc *sg_list, unsigned int attrs)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
enum dma_data_direction dir = rq_dma_dir(req);
@@ -720,13 +720,14 @@ static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
unsigned int i;
if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir);
dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir,
attrs);
return;
}
for (i = 0; i < len / sizeof(*sg_list); i++)
dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
le32_to_cpu(sg_list[i].length), dir);
dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr),
le32_to_cpu(sg_list[i].length), dir, attrs);
}
static void nvme_unmap_metadata(struct request *req)
@@ -747,10 +748,10 @@ static void nvme_unmap_metadata(struct request *req)
if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
iod->meta_total_len)) {
if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
nvme_free_sgls(req, sge, &sge[1]);
nvme_free_sgls(req, sge, &sge[1], 0);
else
dma_unmap_page(dma_dev, iod->meta_dma,
iod->meta_total_len, dir);
dma_unmap_phys(dma_dev, iod->meta_dma,
iod->meta_total_len, dir, 0);
}
if (iod->meta_descriptor)
@@ -775,9 +776,9 @@ static void nvme_unmap_data(struct request *req)
if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
nvme_free_sgls(req, iod->descriptors[0],
&iod->cmd.common.dptr.sgl);
&iod->cmd.common.dptr.sgl, 0);
else
nvme_free_prps(req);
nvme_free_prps(req, 0);
}
if (iod->nr_descriptors)