mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
remoteproc: Use of_reserved_mem_region_* functions for "memory-region"
Use the newly added of_reserved_mem_region_to_resource() and of_reserved_mem_region_count() functions to handle "memory-region" properties. The error handling is a bit different in some cases. Often "memory-region" is optional, so failed lookup is not an error. But then an error in of_reserved_mem_lookup() is treated as an error. However, that distinction is not really important. Either the region is available and usable or it is not. So now, it is just of_reserved_mem_region_to_resource() which is checked for an error. Acked-by: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> Tested-by: Peng Fan <peng.fan@nxp.com> # i.MX93-11x11-EVK for imx_rproc.c Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be> # rcar Tested-by: Beleswar Padhi <b-padhi@ti.com> # TI Signed-off-by: Rob Herring (Arm) <robh@kernel.org> Link: https://lore.kernel.org/r/20251124182751.507624-1-robh@kernel.org Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
This commit is contained in:
committed by
Mathieu Poirier
parent
6f880e7bd1
commit
67a7bc7f03
@@ -658,11 +658,9 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
|
||||
struct rproc *rproc = priv->rproc;
|
||||
struct device *dev = rproc->dev.parent;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct of_phandle_iterator it;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct reserved_mem *rmem;
|
||||
void __iomem *cpu_addr;
|
||||
int a;
|
||||
int a, i = 0;
|
||||
u64 da;
|
||||
|
||||
/* Remap required addresses */
|
||||
@@ -693,49 +691,40 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
while (1) {
|
||||
int err;
|
||||
struct resource res;
|
||||
|
||||
err = of_reserved_mem_region_to_resource(np, i++, &res);
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ignore the first memory region which will be used vdev buffer.
|
||||
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
|
||||
*/
|
||||
if (!strcmp(it.node->name, "vdev0buffer"))
|
||||
if (strstarts(res.name, "vdev0buffer"))
|
||||
continue;
|
||||
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(dev, "unable to acquire memory-region\n");
|
||||
if (imx_dsp_rproc_sys_to_da(priv, res.start, resource_size(&res), &da))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
|
||||
of_node_put(it.node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
|
||||
cpu_addr = devm_ioremap_resource_wc(dev, &res);
|
||||
if (!cpu_addr) {
|
||||
of_node_put(it.node);
|
||||
dev_err(dev, "failed to map memory %p\n", &rmem->base);
|
||||
dev_err(dev, "failed to map memory %pR\n", &res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Register memory region */
|
||||
mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
|
||||
rmem->size, da, NULL, NULL, it.node->name);
|
||||
|
||||
if (mem) {
|
||||
rproc_coredump_add_segment(rproc, da, rmem->size);
|
||||
} else {
|
||||
of_node_put(it.node);
|
||||
mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)res.start,
|
||||
resource_size(&res), da, NULL, NULL,
|
||||
"%.*s", strchrnul(res.name, '@') - res.name, res.name);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_coredump_add_segment(rproc, da, resource_size(&res));
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Prepare function for rproc_ops */
|
||||
|
||||
@@ -490,50 +490,44 @@ static int imx_rproc_prepare(struct rproc *rproc)
|
||||
{
|
||||
struct imx_rproc *priv = rproc->priv;
|
||||
struct device_node *np = priv->dev->of_node;
|
||||
struct of_phandle_iterator it;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct reserved_mem *rmem;
|
||||
int i = 0;
|
||||
u32 da;
|
||||
|
||||
/* Register associated reserved memory regions */
|
||||
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
while (1) {
|
||||
int err;
|
||||
struct resource res;
|
||||
|
||||
err = of_reserved_mem_region_to_resource(np, i++, &res);
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ignore the first memory region which will be used vdev buffer.
|
||||
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
|
||||
*/
|
||||
if (!strcmp(it.node->name, "vdev0buffer"))
|
||||
if (strstarts(res.name, "vdev0buffer"))
|
||||
continue;
|
||||
|
||||
if (!strcmp(it.node->name, "rsc-table"))
|
||||
if (strstarts(res.name, "rsc-table"))
|
||||
continue;
|
||||
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(priv->dev, "unable to acquire memory-region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No need to translate pa to da, i.MX use same map */
|
||||
da = rmem->base;
|
||||
da = res.start;
|
||||
|
||||
/* Register memory region */
|
||||
mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
|
||||
mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)res.start,
|
||||
resource_size(&res), da,
|
||||
imx_rproc_mem_alloc, imx_rproc_mem_release,
|
||||
it.node->name);
|
||||
|
||||
if (mem) {
|
||||
rproc_coredump_add_segment(rproc, da, rmem->size);
|
||||
} else {
|
||||
of_node_put(it.node);
|
||||
"%.*s", strchrnul(res.name, '@') - res.name,
|
||||
res.name);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_coredump_add_segment(rproc, da, resource_size(&res));
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
|
||||
@@ -671,47 +665,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
|
||||
}
|
||||
|
||||
/* memory-region is optional property */
|
||||
nph = of_count_phandle_with_args(np, "memory-region", NULL);
|
||||
nph = of_reserved_mem_region_count(np);
|
||||
if (nph <= 0)
|
||||
return 0;
|
||||
|
||||
/* remap optional addresses */
|
||||
for (a = 0; a < nph; a++) {
|
||||
struct device_node *node;
|
||||
struct resource res;
|
||||
|
||||
node = of_parse_phandle(np, "memory-region", a);
|
||||
if (!node)
|
||||
continue;
|
||||
/* Not map vdevbuffer, vdevring region */
|
||||
if (!strncmp(node->name, "vdev", strlen("vdev"))) {
|
||||
of_node_put(node);
|
||||
continue;
|
||||
}
|
||||
err = of_address_to_resource(node, 0, &res);
|
||||
err = of_reserved_mem_region_to_resource(np, a, &res);
|
||||
if (err) {
|
||||
dev_err(dev, "unable to resolve memory region\n");
|
||||
of_node_put(node);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (b >= IMX_RPROC_MEM_MAX) {
|
||||
of_node_put(node);
|
||||
/* Not map vdevbuffer, vdevring region */
|
||||
if (strstarts(res.name, "vdev"))
|
||||
continue;
|
||||
|
||||
if (b >= IMX_RPROC_MEM_MAX)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Not use resource version, because we might share region */
|
||||
priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
|
||||
priv->mem[b].cpu_addr = devm_ioremap_resource_wc(&pdev->dev, &res);
|
||||
if (!priv->mem[b].cpu_addr) {
|
||||
dev_err(dev, "failed to remap %pr\n", &res);
|
||||
of_node_put(node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
priv->mem[b].sys_addr = res.start;
|
||||
priv->mem[b].size = resource_size(&res);
|
||||
if (!strcmp(node->name, "rsc-table"))
|
||||
if (!strcmp(res.name, "rsc-table"))
|
||||
priv->rsc_table = priv->mem[b].cpu_addr;
|
||||
of_node_put(node);
|
||||
b++;
|
||||
}
|
||||
|
||||
|
||||
@@ -52,46 +52,36 @@ static int rcar_rproc_prepare(struct rproc *rproc)
|
||||
{
|
||||
struct device *dev = rproc->dev.parent;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct of_phandle_iterator it;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct reserved_mem *rmem;
|
||||
int i = 0;
|
||||
u32 da;
|
||||
|
||||
/* Register associated reserved memory regions */
|
||||
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
while (1) {
|
||||
struct resource res;
|
||||
int ret;
|
||||
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(&rproc->dev,
|
||||
"unable to acquire memory-region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = of_reserved_mem_region_to_resource(np, i++, &res);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
if (rmem->base > U32_MAX) {
|
||||
of_node_put(it.node);
|
||||
if (res.start > U32_MAX)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No need to translate pa to da, R-Car use same map */
|
||||
da = rmem->base;
|
||||
da = res.start;
|
||||
mem = rproc_mem_entry_init(dev, NULL,
|
||||
rmem->base,
|
||||
rmem->size, da,
|
||||
res.start,
|
||||
resource_size(&res), da,
|
||||
rcar_rproc_mem_alloc,
|
||||
rcar_rproc_mem_release,
|
||||
it.node->name);
|
||||
res.name);
|
||||
|
||||
if (!mem) {
|
||||
of_node_put(it.node);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
|
||||
|
||||
@@ -120,40 +120,39 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
|
||||
struct device *dev = rproc->dev.parent;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct reserved_mem *rmem;
|
||||
struct of_phandle_iterator it;
|
||||
int index = 0;
|
||||
int entries;
|
||||
|
||||
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(dev, "unable to acquire memory-region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
entries = of_reserved_mem_region_count(np);
|
||||
|
||||
for (int index = 0; index < entries; index++) {
|
||||
struct resource res;
|
||||
int ret;
|
||||
|
||||
ret = of_reserved_mem_region_to_resource(np, index, &res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* No need to map vdev buffer */
|
||||
if (strcmp(it.node->name, "vdev0buffer")) {
|
||||
if (!strstarts(res.name, "vdev0buffer")) {
|
||||
/* Register memory region */
|
||||
mem = rproc_mem_entry_init(dev, NULL,
|
||||
(dma_addr_t)rmem->base,
|
||||
rmem->size, rmem->base,
|
||||
(dma_addr_t)res.start,
|
||||
resource_size(&res), res.start,
|
||||
st_rproc_mem_alloc,
|
||||
st_rproc_mem_release,
|
||||
it.node->name);
|
||||
"%.*s",
|
||||
strchrnul(res.name, '@') - res.name,
|
||||
res.name);
|
||||
} else {
|
||||
/* Register reserved memory for vdev buffer allocation */
|
||||
mem = rproc_of_resm_mem_entry_init(dev, index,
|
||||
rmem->size,
|
||||
rmem->base,
|
||||
it.node->name);
|
||||
resource_size(&res),
|
||||
res.start,
|
||||
"vdev0buffer");
|
||||
}
|
||||
|
||||
if (!mem) {
|
||||
of_node_put(it.node);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_add_carveout(rproc, mem);
|
||||
index++;
|
||||
|
||||
@@ -213,60 +213,52 @@ static int stm32_rproc_prepare(struct rproc *rproc)
|
||||
{
|
||||
struct device *dev = rproc->dev.parent;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct of_phandle_iterator it;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct reserved_mem *rmem;
|
||||
u64 da;
|
||||
int index = 0;
|
||||
int index = 0, mr = 0;
|
||||
|
||||
/* Register associated reserved memory regions */
|
||||
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(dev, "unable to acquire memory-region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
while (1) {
|
||||
struct resource res;
|
||||
int ret;
|
||||
|
||||
if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
|
||||
of_node_put(it.node);
|
||||
dev_err(dev, "memory region not valid %pa\n",
|
||||
&rmem->base);
|
||||
ret = of_reserved_mem_region_to_resource(np, mr++, &res);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
if (stm32_rproc_pa_to_da(rproc, res.start, &da) < 0) {
|
||||
dev_err(dev, "memory region not valid %pR\n", &res);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No need to map vdev buffer */
|
||||
if (strcmp(it.node->name, "vdev0buffer")) {
|
||||
if (!strstarts(res.name, "vdev0buffer")) {
|
||||
/* Register memory region */
|
||||
mem = rproc_mem_entry_init(dev, NULL,
|
||||
(dma_addr_t)rmem->base,
|
||||
rmem->size, da,
|
||||
(dma_addr_t)res.start,
|
||||
resource_size(&res), da,
|
||||
stm32_rproc_mem_alloc,
|
||||
stm32_rproc_mem_release,
|
||||
it.node->name);
|
||||
|
||||
"%.*s", strchrnul(res.name, '@') - res.name,
|
||||
res.name);
|
||||
if (mem)
|
||||
rproc_coredump_add_segment(rproc, da,
|
||||
rmem->size);
|
||||
resource_size(&res));
|
||||
} else {
|
||||
/* Register reserved memory for vdev buffer alloc */
|
||||
mem = rproc_of_resm_mem_entry_init(dev, index,
|
||||
rmem->size,
|
||||
rmem->base,
|
||||
it.node->name);
|
||||
resource_size(&res),
|
||||
res.start,
|
||||
"vdev0buffer");
|
||||
}
|
||||
|
||||
if (!mem) {
|
||||
of_node_put(it.node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_add_carveout(rproc, mem);
|
||||
index++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
|
||||
|
||||
@@ -470,13 +470,10 @@ int k3_reserved_mem_init(struct k3_rproc *kproc)
|
||||
{
|
||||
struct device *dev = kproc->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct device_node *rmem_np;
|
||||
struct reserved_mem *rmem;
|
||||
int num_rmems;
|
||||
int ret, i;
|
||||
|
||||
num_rmems = of_property_count_elems_of_size(np, "memory-region",
|
||||
sizeof(phandle));
|
||||
num_rmems = of_reserved_mem_region_count(np);
|
||||
if (num_rmems < 0) {
|
||||
dev_err(dev, "device does not reserved memory regions (%d)\n",
|
||||
num_rmems);
|
||||
@@ -505,23 +502,20 @@ int k3_reserved_mem_init(struct k3_rproc *kproc)
|
||||
|
||||
/* use remaining reserved memory regions for static carveouts */
|
||||
for (i = 0; i < num_rmems; i++) {
|
||||
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
|
||||
if (!rmem_np)
|
||||
return -EINVAL;
|
||||
struct resource res;
|
||||
|
||||
rmem = of_reserved_mem_lookup(rmem_np);
|
||||
of_node_put(rmem_np);
|
||||
if (!rmem)
|
||||
return -EINVAL;
|
||||
ret = of_reserved_mem_region_to_resource(np, i + 1, &res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
kproc->rmem[i].bus_addr = rmem->base;
|
||||
kproc->rmem[i].bus_addr = res.start;
|
||||
/* 64-bit address regions currently not supported */
|
||||
kproc->rmem[i].dev_addr = (u32)rmem->base;
|
||||
kproc->rmem[i].size = rmem->size;
|
||||
kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
|
||||
kproc->rmem[i].dev_addr = (u32)res.start;
|
||||
kproc->rmem[i].size = resource_size(&res);
|
||||
kproc->rmem[i].cpu_addr = devm_ioremap_resource_wc(dev, &res);
|
||||
if (!kproc->rmem[i].cpu_addr) {
|
||||
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
|
||||
i + 1, &rmem->base, &rmem->size);
|
||||
dev_err(dev, "failed to map reserved memory#%d at %pR\n",
|
||||
i + 1, &res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
@@ -492,53 +492,46 @@ static int add_mem_regions_carveout(struct rproc *rproc)
|
||||
{
|
||||
struct rproc_mem_entry *rproc_mem;
|
||||
struct zynqmp_r5_core *r5_core;
|
||||
struct of_phandle_iterator it;
|
||||
struct reserved_mem *rmem;
|
||||
int i = 0;
|
||||
|
||||
r5_core = rproc->priv;
|
||||
|
||||
/* Register associated reserved memory regions */
|
||||
of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0);
|
||||
while (1) {
|
||||
int err;
|
||||
struct resource res;
|
||||
|
||||
while (of_phandle_iterator_next(&it) == 0) {
|
||||
rmem = of_reserved_mem_lookup(it.node);
|
||||
if (!rmem) {
|
||||
of_node_put(it.node);
|
||||
dev_err(&rproc->dev, "unable to acquire memory-region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
err = of_reserved_mem_region_to_resource(r5_core->np, i, &res);
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
if (!strcmp(it.node->name, "vdev0buffer")) {
|
||||
if (strstarts(res.name, "vdev0buffer")) {
|
||||
/* Init reserved memory for vdev buffer */
|
||||
rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
|
||||
rmem->size,
|
||||
rmem->base,
|
||||
it.node->name);
|
||||
resource_size(&res),
|
||||
res.start,
|
||||
"vdev0buffer");
|
||||
} else {
|
||||
/* Register associated reserved memory regions */
|
||||
rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
|
||||
(dma_addr_t)rmem->base,
|
||||
rmem->size, rmem->base,
|
||||
(dma_addr_t)res.start,
|
||||
resource_size(&res), res.start,
|
||||
zynqmp_r5_mem_region_map,
|
||||
zynqmp_r5_mem_region_unmap,
|
||||
it.node->name);
|
||||
"%.*s",
|
||||
strchrnul(res.name, '@') - res.name,
|
||||
res.name);
|
||||
}
|
||||
|
||||
if (!rproc_mem) {
|
||||
of_node_put(it.node);
|
||||
if (!rproc_mem)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_add_carveout(rproc, rproc_mem);
|
||||
rproc_coredump_add_segment(rproc, rmem->base, rmem->size);
|
||||
rproc_coredump_add_segment(rproc, res.start, resource_size(&res));
|
||||
|
||||
dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
|
||||
it.node->name, rmem->base, rmem->size);
|
||||
dev_dbg(&rproc->dev, "reserved mem carveout %pR\n", &res);
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_sram_carveouts(struct rproc *rproc)
|
||||
@@ -808,7 +801,6 @@ static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
|
||||
struct device *dev = r5_core->dev;
|
||||
struct rsc_tbl_data *rsc_data_va;
|
||||
struct resource res_mem;
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@@ -818,14 +810,7 @@ static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
|
||||
* contains that data structure which holds resource table address, size
|
||||
* and some magic number to validate correct resource table entry.
|
||||
*/
|
||||
np = of_parse_phandle(r5_core->np, "memory-region", 0);
|
||||
if (!np) {
|
||||
dev_err(dev, "failed to get memory region dev node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(np, 0, &res_mem);
|
||||
of_node_put(np);
|
||||
ret = of_reserved_mem_region_to_resource(r5_core->np, 0, &res_mem);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to get memory-region resource addr\n");
|
||||
return -EINVAL;
|
||||
|
||||
Reference in New Issue
Block a user