drm/xe/pf: Switch VF migration GuC save/restore to struct migration data

In upcoming changes, the GuC VF migration data will be handled as part
of separate SAVE/RESTORE states in VF control state machine.
Now that the data is decoupled from both guc_state debugfs and PAUSE
state, we can safely remove the struct xe_gt_sriov_state_snapshot and
modify the GuC save/restore functions to operate on struct
xe_sriov_migration_data.

Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patch.msgid.link/20251112132220.516975-16-michal.winiarski@intel.com
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
This commit is contained in:
Michał Winiarski
2025-11-12 14:22:11 +01:00
parent 6e03c1366a
commit 642a30a946
4 changed files with 92 additions and 242 deletions

View File

@@ -29,6 +29,19 @@ static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt,
return &gt->sriov.pf.vfs[vfid].migration;
}
static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid,
struct xe_sriov_packet *data,
const char *what)
{
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
struct drm_printer p = xe_gt_dbg_printer(gt);
drm_printf(&p, "VF%u %s (%llu bytes)\n", vfid, what, data->hdr.size);
drm_print_hex_dump(&p, "mig_hdr: ", (void *)&data->hdr, sizeof(data->hdr));
drm_print_hex_dump(&p, "mig_data: ", data->vaddr, min(SZ_64, data->hdr.size));
}
}
/* Return: number of dwords saved/restored/required or a negative error code on failure */
static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
u64 addr, u32 ndwords)
@@ -48,7 +61,7 @@ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
}
/* Return: size of the state in dwords or a negative error code on failure */
static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
static int pf_send_guc_query_vf_mig_data_size(struct xe_gt *gt, unsigned int vfid)
{
int ret;
@@ -57,8 +70,8 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
}
/* Return: number of state dwords saved or a negative error code on failure */
static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
void *dst, size_t size)
static int pf_send_guc_save_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
void *dst, size_t size)
{
const int ndwords = size / sizeof(u32);
struct xe_guc *guc = &gt->uc.guc;
@@ -87,8 +100,8 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
}
/* Return: number of state dwords restored or a negative error code on failure */
static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
const void *src, size_t size)
static int pf_send_guc_restore_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
const void *src, size_t size)
{
const int ndwords = size / sizeof(u32);
struct xe_guc *guc = &gt->uc.guc;
@@ -116,120 +129,66 @@ static bool pf_migration_supported(struct xe_gt *gt)
return xe_sriov_pf_migration_supported(gt_to_xe(gt));
}
static struct mutex *pf_migration_mutex(struct xe_gt *gt)
static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
return &gt->sriov.pf.migration.snapshot_lock;
}
static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
unsigned int vfid)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
lockdep_assert_held(pf_migration_mutex(gt));
return &gt->sriov.pf.vfs[vfid].snapshot;
}
static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
{
return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
}
static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
{
struct xe_device *xe = gt_to_xe(gt);
drmm_kfree(&xe->drm, snapshot->guc.buff);
snapshot->guc.buff = NULL;
snapshot->guc.size = 0;
}
static int pf_alloc_guc_state(struct xe_gt *gt,
struct xe_gt_sriov_state_snapshot *snapshot,
size_t size)
{
struct xe_device *xe = gt_to_xe(gt);
void *p;
pf_free_guc_state(gt, snapshot);
if (!size)
return -ENODATA;
if (size % sizeof(u32))
return -EINVAL;
if (size > SZ_2M)
return -EFBIG;
p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
if (!p)
return -ENOMEM;
snapshot->guc.buff = p;
snapshot->guc.size = size;
return 0;
}
static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
{
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
vfid, snapshot->guc.size / sizeof(u32));
print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
}
}
static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
struct xe_sriov_packet *data;
size_t size;
int ret;
ret = pf_send_guc_query_vf_state_size(gt, vfid);
ret = pf_send_guc_query_vf_mig_data_size(gt, vfid);
if (ret < 0)
goto fail;
size = ret * sizeof(u32);
xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
ret = pf_alloc_guc_state(gt, snapshot, size);
if (ret < 0)
data = xe_sriov_packet_alloc(gt_to_xe(gt));
if (!data) {
ret = -ENOMEM;
goto fail;
}
ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id,
XE_SRIOV_PACKET_TYPE_GUC, 0, size);
if (ret)
goto fail_free;
ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size);
if (ret < 0)
goto fail;
goto fail_free;
size = ret * sizeof(u32);
xe_gt_assert(gt, size);
xe_gt_assert(gt, size <= snapshot->guc.size);
snapshot->guc.size = size;
xe_gt_assert(gt, size <= data->hdr.size);
data->hdr.size = size;
data->remaining = size;
pf_dump_mig_data(gt, vfid, data, "GuC data save");
ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
if (ret)
goto fail_free;
pf_dump_guc_state(gt, snapshot);
return 0;
fail_free:
xe_sriov_packet_free(data);
fail:
xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
pf_free_guc_state(gt, snapshot);
xe_gt_sriov_err(gt, "Failed to save VF%u GuC data (%pe)\n",
vfid, ERR_PTR(ret));
return ret;
}
/**
* xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
* xe_gt_sriov_pf_migration_guc_size() - Get the size of VF GuC migration data.
* @gt: the &xe_gt
* @vfid: the VF identifier
*
* This function is for PF only.
*
* Return: 0 on success or a negative error code on failure.
* Return: size in bytes or a negative error code on failure.
*/
int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid)
{
int err;
ssize_t size;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
@@ -238,48 +197,68 @@ int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
if (!pf_migration_supported(gt))
return -ENOPKG;
mutex_lock(pf_migration_mutex(gt));
err = pf_save_vf_guc_state(gt, vfid);
mutex_unlock(pf_migration_mutex(gt));
size = pf_send_guc_query_vf_mig_data_size(gt, vfid);
if (size >= 0)
size *= sizeof(u32);
return err;
return size;
}
static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
/**
* xe_gt_sriov_pf_migration_guc_save() - Save VF GuC migration data.
* @gt: the &xe_gt
* @vfid: the VF identifier
*
* This function is for PF only.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
if (!pf_migration_supported(gt))
return -ENOPKG;
return pf_save_vf_guc_mig_data(gt, vfid);
}
static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid,
struct xe_sriov_packet *data)
{
struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
int ret;
if (!snapshot->guc.size)
return -ENODATA;
xe_gt_assert(gt, data->hdr.size);
xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
snapshot->guc.size / sizeof(u32), vfid);
ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
pf_dump_mig_data(gt, vfid, data, "GuC data restore");
ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->hdr.size);
if (ret < 0)
goto fail;
xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
return 0;
fail:
xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
xe_gt_sriov_err(gt, "Failed to restore VF%u GuC data (%pe)\n",
vfid, ERR_PTR(ret));
return ret;
}
/**
* xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
* xe_gt_sriov_pf_migration_guc_restore() - Restore VF GuC migration data.
* @gt: the &xe_gt
* @vfid: the VF identifier
* @data: the &xe_sriov_packet containing migration data
*
* This function is for PF only.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
struct xe_sriov_packet *data)
{
int ret;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
@@ -287,98 +266,9 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf
if (!pf_migration_supported(gt))
return -ENOPKG;
mutex_lock(pf_migration_mutex(gt));
ret = pf_restore_vf_guc_state(gt, vfid);
mutex_unlock(pf_migration_mutex(gt));
return ret;
return pf_restore_vf_guc_state(gt, vfid, data);
}
#ifdef CONFIG_DEBUG_FS
/**
* xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
* @gt: the &xe_gt
* @vfid: the VF identifier
* @buf: the user space buffer to read to
* @count: the maximum number of bytes to read
* @pos: the current position in the buffer
*
* This function is for PF only.
*
* This function reads up to @count bytes from the saved VF GuC state buffer
* at offset @pos into the user space address starting at @buf.
*
* Return: the number of bytes read or a negative error code on failure.
*/
ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
char __user *buf, size_t count, loff_t *pos)
{
struct xe_gt_sriov_state_snapshot *snapshot;
ssize_t ret;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
if (!pf_migration_supported(gt))
return -ENOPKG;
mutex_lock(pf_migration_mutex(gt));
snapshot = pf_pick_vf_snapshot(gt, vfid);
if (snapshot->guc.size)
ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
snapshot->guc.size);
else
ret = -ENODATA;
mutex_unlock(pf_migration_mutex(gt));
return ret;
}
/**
* xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
* @gt: the &xe_gt
* @vfid: the VF identifier
* @buf: the user space buffer with GuC VF state
* @size: the size of GuC VF state (in bytes)
*
* This function is for PF only.
*
* This function reads @size bytes of the VF GuC state stored at user space
* address @buf and writes it into a internal VF state buffer.
*
* Return: the number of bytes used or a negative error code on failure.
*/
ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
const char __user *buf, size_t size)
{
struct xe_gt_sriov_state_snapshot *snapshot;
loff_t pos = 0;
ssize_t ret;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
if (!pf_migration_supported(gt))
return -ENOPKG;
mutex_lock(pf_migration_mutex(gt));
snapshot = pf_pick_vf_snapshot(gt, vfid);
ret = pf_alloc_guc_state(gt, snapshot, size);
if (!ret) {
ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
if (ret < 0)
pf_free_guc_state(gt, snapshot);
else
pf_dump_guc_state(gt, snapshot);
}
mutex_unlock(pf_migration_mutex(gt));
return ret;
}
#endif /* CONFIG_DEBUG_FS */
/**
* xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT.
* @gt: the &xe_gt
@@ -619,10 +509,6 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
if (!pf_migration_supported(gt))
return 0;
err = drmm_mutex_init(&xe->drm, &gt->sriov.pf.migration.snapshot_lock);
if (err)
return err;
totalvfs = xe_sriov_pf_get_totalvfs(xe);
for (n = 1; n <= totalvfs; n++) {
struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n);

View File

@@ -15,8 +15,10 @@ struct xe_sriov_packet;
#define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M
int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
struct xe_sriov_packet *data);
ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
@@ -34,11 +36,4 @@ int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid
struct xe_sriov_packet *
xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid);
#ifdef CONFIG_DEBUG_FS
ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
char __user *buf, size_t count, loff_t *pos);
ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
const char __user *buf, size_t count);
#endif
#endif

View File

@@ -6,24 +6,7 @@
#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
#include <linux/mutex.h>
#include <linux/ptr_ring.h>
#include <linux/types.h>
/**
* struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
*
* Used by the PF driver to maintain per-VF migration data.
*/
struct xe_gt_sriov_state_snapshot {
/** @guc: GuC VF state snapshot */
struct {
/** @guc.buff: buffer with the VF state */
u32 *buff;
/** @guc.size: size of the buffer (must be dwords aligned) */
u32 size;
} guc;
};
/**
* struct xe_gt_sriov_migration_data - GT-level per-VF migration data.
@@ -35,14 +18,4 @@ struct xe_gt_sriov_migration_data {
struct ptr_ring ring;
};
/**
* struct xe_gt_sriov_pf_migration - GT-level data.
*
* Used by the PF driver to maintain non-VF specific per-GT data.
*/
struct xe_gt_sriov_pf_migration {
/** @snapshot_lock: protects all VFs snapshots */
struct mutex snapshot_lock;
};
#endif

View File

@@ -31,9 +31,6 @@ struct xe_gt_sriov_metadata {
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
/** @snapshot: snapshot of the VF state data */
struct xe_gt_sriov_state_snapshot snapshot;
/** @migration: per-VF migration data. */
struct xe_gt_sriov_migration_data migration;
};
@@ -61,7 +58,6 @@ struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
struct xe_gt_sriov_pf_migration migration;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
};