drm/amdgpu: Add logic for VF ipd and VF bios to init from dynamic crit_region offsets

1. Added VF logic in amdgpu_virt to init IP discovery using the offsets from dynamic(v2) critical regions;
2. Added VF logic in amdgpu_virt to init bios image using the offsets from dynamic(v2) critical regions;

Signed-off-by: Ellen Pan <yunru.pan@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Ellen Pan
2025-10-07 11:12:39 -05:00
committed by Alex Deucher
parent 13ccaa8443
commit b4a8fcc782
4 changed files with 85 additions and 14 deletions

View File

@@ -96,11 +96,12 @@ void amdgpu_bios_release(struct amdgpu_device *adev)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
* For SR-IOV, the vbios image is also put in VRAM in the VF.
* For SR-IOV, if dynamic critical region is not enabled,
* the vbios image is also put at the start of VRAM in the VF.
*/
static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
uint8_t __iomem *bios;
uint8_t __iomem *bios = NULL;
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
@@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
bios = ioremap_wc(vram_base, size);
if (!bios)
return false;
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
iounmap(bios);
if (!adev->bios)
return false;
/* For SRIOV with dynamic critical region is enabled,
* the vbios image is put at a dynamic offset of VRAM in the VF.
* If dynamic critical region is disabled, follow the existing logic as on baremetal.
*/
if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
if (amdgpu_virt_get_dynamic_data_info(adev,
AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, (uint64_t *)&size)) {
amdgpu_bios_release(adev);
return false;
}
} else {
bios = ioremap_wc(vram_base, size);
if (!bios) {
amdgpu_bios_release(adev);
return false;
}
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
}
adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
if (!check_atom_bios(adev, size)) {
amdgpu_bios_release(adev);

View File

@@ -304,10 +304,26 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* then it is not required to be reserved.
*/
if (sz_valid) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->discovery.size, false);
adev->discovery.reserve_tmr = true;
if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
/* For SRIOV VFs with dynamic critical region enabled,
* we will get the IPD binary via below call.
* If dynamic critical is disabled, fall through to normal seq.
*/
if (amdgpu_virt_get_dynamic_data_info(adev,
AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
(uint64_t *)&adev->discovery.size)) {
dev_err(adev->dev,
"failed to read discovery info from dynamic critical region.");
ret = -EINVAL;
goto exit;
}
} else {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->discovery.size, false);
adev->discovery.reserve_tmr = true;
}
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
@@ -316,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
dev_err(adev->dev,
"failed to read discovery info from memory, vram size read: %llx",
vram_size);
exit:
return ret;
}

View File

@@ -1008,6 +1008,14 @@ int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
init_data_hdr->bad_page_size_in_kb;
}
/* Validation for critical region info */
if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
r = -EINVAL;
goto out;
}
/* reserved memory starts from crit region base offset with the size of 5MB */
adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
@@ -1026,6 +1034,35 @@ out:
return r;
}
int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
int data_id, uint8_t *binary, uint64_t *size)
{
uint32_t data_offset = 0;
uint32_t data_size = 0;
enum amd_sriov_msg_table_id_enum data_table_id = data_id;
if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
return -EINVAL;
data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
/* Validate on input params */
if (!binary || !size || *size < (uint64_t)data_size)
return -EINVAL;
/* Proceed to copy the dynamic content */
amdgpu_device_vram_access(adev,
(uint64_t)data_offset, (uint32_t *)binary, data_size, false);
*size = (uint64_t)data_size;
dev_dbg(adev->dev,
"Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
return 0;
}
void amdgpu_virt_init(struct amdgpu_device *adev)
{
bool is_sriov = false;

View File

@@ -442,6 +442,8 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_init(struct amdgpu_device *adev);
int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
int data_id, uint8_t *binary, uint64_t *size);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);