mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/xe/vf: Create contexts for CCS read write
Create two LRCs to handle CCS meta data read / write from CCS pool in the VM. Read context is used to hold GPU instructions to be executed at save time and write context is used to hold GPU instructions to be executed at the restore time. Allocate batch buffer pool using suballocator for both read and write contexts. Migration framework is reused to create LRCAs for read and write. Signed-off-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250722120506.6483-2-satyanarayana.k.v.p@intel.com
This commit is contained in:
committed by
Matthew Brost
parent
9a220e0659
commit
f3009272ff
@@ -149,6 +149,7 @@ xe-y += \
|
||||
xe_memirq.o \
|
||||
xe_sriov.o \
|
||||
xe_sriov_vf.o \
|
||||
xe_sriov_vf_ccs.o \
|
||||
xe_tile_sriov_vf.o
|
||||
|
||||
xe-$(CONFIG_PCI_IOV) += \
|
||||
|
||||
@@ -940,6 +940,10 @@ int xe_device_probe(struct xe_device *xe)
|
||||
|
||||
xe_vsec_init(xe);
|
||||
|
||||
err = xe_sriov_late_init(xe);
|
||||
if (err)
|
||||
goto err_unregister_display;
|
||||
|
||||
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
|
||||
|
||||
err_unregister_display:
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "xe_sriov_pf_types.h"
|
||||
#include "xe_sriov_types.h"
|
||||
#include "xe_sriov_vf_types.h"
|
||||
#include "xe_sriov_vf_ccs_types.h"
|
||||
#include "xe_step_types.h"
|
||||
#include "xe_survivability_mode_types.h"
|
||||
|
||||
@@ -182,6 +183,9 @@ struct xe_tile {
|
||||
struct {
|
||||
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
|
||||
struct xe_ggtt_node *ggtt_balloon[2];
|
||||
|
||||
/** @sriov.vf.ccs: CCS read and write contexts for VF. */
|
||||
struct xe_tile_vf_ccs ccs[XE_SRIOV_VF_CCS_CTX_COUNT];
|
||||
} vf;
|
||||
} sriov;
|
||||
|
||||
|
||||
@@ -134,6 +134,33 @@ static int sa_info(struct xe_gt *gt, struct drm_printer *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sa_info_vf_ccs(struct xe_gt *gt, struct drm_printer *p)
|
||||
{
|
||||
struct xe_tile *tile = gt_to_tile(gt);
|
||||
struct xe_sa_manager *bb_pool;
|
||||
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
|
||||
|
||||
if (!IS_VF_CCS_READY(gt_to_xe(gt)))
|
||||
return 0;
|
||||
|
||||
xe_pm_runtime_get(gt_to_xe(gt));
|
||||
|
||||
for_each_ccs_rw_ctx(ctx_id) {
|
||||
bb_pool = tile->sriov.vf.ccs[ctx_id].mem.ccs_bb_pool;
|
||||
if (!bb_pool)
|
||||
break;
|
||||
|
||||
drm_printf(p, "ccs %s bb suballoc info\n", ctx_id ? "write" : "read");
|
||||
drm_printf(p, "-------------------------\n");
|
||||
drm_suballoc_dump_debug_info(&bb_pool->base, p, bb_pool->gpu_addr);
|
||||
drm_puts(p, "\n");
|
||||
}
|
||||
|
||||
xe_pm_runtime_put(gt_to_xe(gt));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int topology(struct xe_gt *gt, struct drm_printer *p)
|
||||
{
|
||||
xe_pm_runtime_get(gt_to_xe(gt));
|
||||
@@ -303,6 +330,13 @@ static const struct drm_info_list vf_safe_debugfs_list[] = {
|
||||
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
|
||||
};
|
||||
|
||||
/*
|
||||
* only for GT debugfs files which are valid on VF. Not valid on PF.
|
||||
*/
|
||||
static const struct drm_info_list vf_only_debugfs_list[] = {
|
||||
{"sa_info_vf_ccs", .show = xe_gt_debugfs_simple_show, .data = sa_info_vf_ccs},
|
||||
};
|
||||
|
||||
/* everything else should be added here */
|
||||
static const struct drm_info_list pf_only_debugfs_list[] = {
|
||||
{"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
|
||||
@@ -424,6 +458,11 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
|
||||
drm_debugfs_create_files(pf_only_debugfs_list,
|
||||
ARRAY_SIZE(pf_only_debugfs_list),
|
||||
root, minor);
|
||||
else
|
||||
drm_debugfs_create_files(vf_only_debugfs_list,
|
||||
ARRAY_SIZE(vf_only_debugfs_list),
|
||||
root, minor);
|
||||
|
||||
|
||||
xe_uc_debugfs_register(>->uc, root);
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_sriov_pf.h"
|
||||
#include "xe_sriov_vf.h"
|
||||
#include "xe_sriov_vf_ccs.h"
|
||||
|
||||
/**
|
||||
* xe_sriov_mode_to_string - Convert enum value to string.
|
||||
@@ -157,3 +158,21 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size)
|
||||
strscpy(buf, "PF", size);
|
||||
return buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_late_init() - SR-IOV late initialization functions.
|
||||
* @xe: the &xe_device to initialize
|
||||
*
|
||||
* On VF this function will initialize code for CCS migration.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_late_init(struct xe_device *xe)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (IS_VF_CCS_INIT_NEEDED(xe))
|
||||
err = xe_sriov_vf_ccs_init(xe);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t len);
|
||||
void xe_sriov_probe_early(struct xe_device *xe);
|
||||
void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
|
||||
int xe_sriov_init(struct xe_device *xe);
|
||||
int xe_sriov_late_init(struct xe_device *xe);
|
||||
|
||||
static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe)
|
||||
{
|
||||
|
||||
208
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
Normal file
208
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
Normal file
@@ -0,0 +1,208 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "instructions/xe_mi_commands.h"
|
||||
#include "instructions/xe_gpu_commands.h"
|
||||
#include "xe_bo.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_migrate.h"
|
||||
#include "xe_sa.h"
|
||||
#include "xe_sriov_printk.h"
|
||||
#include "xe_sriov_vf_ccs.h"
|
||||
#include "xe_sriov_vf_ccs_types.h"
|
||||
|
||||
/**
|
||||
* DOC: VF save/restore of compression Meta Data
|
||||
*
|
||||
* VF KMD registers two special contexts/LRCAs.
|
||||
*
|
||||
* Save Context/LRCA: contain necessary cmds+page table to trigger Meta data /
|
||||
* compression control surface (Aka CCS) save in regular System memory in VM.
|
||||
*
|
||||
* Restore Context/LRCA: contain necessary cmds+page table to trigger Meta data /
|
||||
* compression control surface (Aka CCS) Restore from regular System memory in
|
||||
* VM to corresponding CCS pool.
|
||||
*
|
||||
* Below diagram explain steps needed for VF save/Restore of compression Meta Data::
|
||||
*
|
||||
* CCS Save CCS Restore VF KMD Guc BCS
|
||||
* LRCA LRCA
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | Create Save LRCA | | |
|
||||
* [ ]<----------------------------- [ ] | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | Register save LRCA | |
|
||||
* | | | with Guc | |
|
||||
* | | [ ]--------------------------->[ ] |
|
||||
* | | | | |
|
||||
* | | Create restore LRCA | | |
|
||||
* | [ ]<------------------[ ] | |
|
||||
* | | | | |
|
||||
* | | | Register restore LRCA | |
|
||||
* | | | with Guc | |
|
||||
* | | [ ]--------------------------->[ ] |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | [ ]------------------------- | |
|
||||
* | | [ ] Allocate main memory. | | |
|
||||
* | | [ ] Allocate CCS memory. | | |
|
||||
* | | [ ] Update Main memory & | | |
|
||||
* [ ]<------------------------------[ ] CCS pages PPGTT + BB | | |
|
||||
* | [ ]<------------------[ ] cmds to save & restore.| | |
|
||||
* | | [ ]<------------------------ | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* : : : : :
|
||||
* ---------------------------- VF Paused -------------------------------------
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | |Schedule |
|
||||
* | | | |CCS Save |
|
||||
* | | | | LRCA |
|
||||
* | | | [ ]------>[ ]
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | |CCS save |
|
||||
* | | | |completed|
|
||||
* | | | [ ]<------[ ]
|
||||
* | | | | |
|
||||
* : : : : :
|
||||
* ---------------------------- VM Migrated -----------------------------------
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* : : : : :
|
||||
* ---------------------------- VF Resumed ------------------------------------
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | [ ]-------------- | |
|
||||
* | | [ ] Fix up GGTT | | |
|
||||
* | | [ ]<------------- | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | Notify VF_RESFIX_DONE | |
|
||||
* | | [ ]--------------------------->[ ] |
|
||||
* | | | | |
|
||||
* | | | |Schedule |
|
||||
* | | | |CCS |
|
||||
* | | | |Restore |
|
||||
* | | | |LRCA |
|
||||
* | | | [ ]------>[ ]
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | |CCS |
|
||||
* | | | |restore |
|
||||
* | | | |completed|
|
||||
* | | | [ ]<------[ ]
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | VF_RESFIX_DONE complete | |
|
||||
* | | | notification | |
|
||||
* | | [ ]<---------------------------[ ] |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* : : : : :
|
||||
* ------------------------- Continue VM restore ------------------------------
|
||||
*/
|
||||
|
||||
static u64 get_ccs_bb_pool_size(struct xe_device *xe)
|
||||
{
|
||||
u64 sys_mem_size, ccs_mem_size, ptes, bb_pool_size;
|
||||
struct sysinfo si;
|
||||
|
||||
si_meminfo(&si);
|
||||
sys_mem_size = si.totalram * si.mem_unit;
|
||||
ccs_mem_size = div64_u64(sys_mem_size, NUM_BYTES_PER_CCS_BYTE(xe));
|
||||
ptes = DIV_ROUND_UP_ULL(sys_mem_size + ccs_mem_size, XE_PAGE_SIZE);
|
||||
|
||||
/**
|
||||
* We need below BB size to hold PTE mappings and some DWs for copy
|
||||
* command. In reality, we need space for many copy commands. So, let
|
||||
* us allocate double the calculated size which is enough to holds GPU
|
||||
* instructions for the whole region.
|
||||
*/
|
||||
bb_pool_size = ptes * sizeof(u32);
|
||||
|
||||
return round_up(bb_pool_size * 2, SZ_1M);
|
||||
}
|
||||
|
||||
static int alloc_bb_pool(struct xe_tile *tile, struct xe_tile_vf_ccs *ctx)
|
||||
{
|
||||
struct xe_device *xe = tile_to_xe(tile);
|
||||
struct xe_sa_manager *sa_manager;
|
||||
u64 bb_pool_size;
|
||||
int offset, err;
|
||||
|
||||
bb_pool_size = get_ccs_bb_pool_size(xe);
|
||||
xe_sriov_info(xe, "Allocating %s CCS BB pool size = %lldMB\n",
|
||||
ctx->ctx_id ? "Restore" : "Save", bb_pool_size / SZ_1M);
|
||||
|
||||
sa_manager = xe_sa_bo_manager_init(tile, bb_pool_size, SZ_16);
|
||||
|
||||
if (IS_ERR(sa_manager)) {
|
||||
xe_sriov_err(xe, "Suballocator init failed with error: %pe\n",
|
||||
sa_manager);
|
||||
err = PTR_ERR(sa_manager);
|
||||
return err;
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP,
|
||||
bb_pool_size);
|
||||
|
||||
offset = bb_pool_size - sizeof(u32);
|
||||
xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END);
|
||||
|
||||
ctx->mem.ccs_bb_pool = sa_manager;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_vf_ccs_init - Setup LRCA for save & restore.
|
||||
* @xe: the &xe_device to start recovery on
|
||||
*
|
||||
* This function shall be called only by VF. It initializes
|
||||
* LRCA and suballocator needed for CCS save & restore.
|
||||
*
|
||||
* Return: 0 on success. Negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vf_ccs_init(struct xe_device *xe)
|
||||
{
|
||||
struct xe_tile *tile = xe_device_get_root_tile(xe);
|
||||
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
|
||||
struct xe_migrate *migrate;
|
||||
struct xe_tile_vf_ccs *ctx;
|
||||
int err;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_VF(xe));
|
||||
xe_assert(xe, !IS_DGFX(xe));
|
||||
xe_assert(xe, xe_device_has_flat_ccs(xe));
|
||||
|
||||
for_each_ccs_rw_ctx(ctx_id) {
|
||||
ctx = &tile->sriov.vf.ccs[ctx_id];
|
||||
ctx->ctx_id = ctx_id;
|
||||
|
||||
migrate = xe_migrate_init(tile);
|
||||
if (IS_ERR(migrate)) {
|
||||
err = PTR_ERR(migrate);
|
||||
goto err_ret;
|
||||
}
|
||||
ctx->migrate = migrate;
|
||||
|
||||
err = alloc_bb_pool(tile, ctx);
|
||||
if (err)
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
xe->sriov.vf.ccs.initialized = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_ret:
|
||||
return err;
|
||||
}
|
||||
13
drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
Normal file
13
drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
Normal file
@@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_VF_CCS_H_
|
||||
#define _XE_SRIOV_VF_CCS_H_
|
||||
|
||||
struct xe_device;
|
||||
|
||||
int xe_sriov_vf_ccs_init(struct xe_device *xe);
|
||||
|
||||
#endif
|
||||
45
drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
Normal file
45
drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
Normal file
@@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_VF_CCS_TYPES_H_
|
||||
#define _XE_SRIOV_VF_CCS_TYPES_H_
|
||||
|
||||
#define for_each_ccs_rw_ctx(id__) \
|
||||
for ((id__) = 0; (id__) < XE_SRIOV_VF_CCS_CTX_COUNT; (id__)++)
|
||||
|
||||
#define IS_VF_CCS_READY(xe) ({ \
|
||||
struct xe_device *___xe = (xe); \
|
||||
xe_assert(___xe, IS_SRIOV_VF(___xe)); \
|
||||
___xe->sriov.vf.ccs.initialized; \
|
||||
})
|
||||
|
||||
#define IS_VF_CCS_INIT_NEEDED(xe) ({\
|
||||
struct xe_device *___xe = (xe); \
|
||||
IS_SRIOV_VF(___xe) && !IS_DGFX(___xe) && \
|
||||
xe_device_has_flat_ccs(___xe) && GRAPHICS_VER(___xe) >= 20; \
|
||||
})
|
||||
|
||||
enum xe_sriov_vf_ccs_rw_ctxs {
|
||||
XE_SRIOV_VF_CCS_READ_CTX,
|
||||
XE_SRIOV_VF_CCS_WRITE_CTX,
|
||||
XE_SRIOV_VF_CCS_CTX_COUNT
|
||||
};
|
||||
|
||||
struct xe_migrate;
|
||||
struct xe_sa_manager;
|
||||
|
||||
struct xe_tile_vf_ccs {
|
||||
/** @id: Id to which context it belongs to */
|
||||
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
|
||||
/** @migrate: Migration helper for save/restore of CCS data */
|
||||
struct xe_migrate *migrate;
|
||||
|
||||
struct {
|
||||
/** @ccs_rw_bb_pool: Pool from which batch buffers are allocated. */
|
||||
struct xe_sa_manager *ccs_bb_pool;
|
||||
} mem;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -36,6 +36,12 @@ struct xe_device_vf {
|
||||
/** @migration.gt_flags: Per-GT request flags for VF migration recovery */
|
||||
unsigned long gt_flags;
|
||||
} migration;
|
||||
|
||||
/** @ccs: VF CCS state data */
|
||||
struct {
|
||||
/** @ccs.initialized: Initilalization of VF CCS is completed or not */
|
||||
bool initialized;
|
||||
} ccs;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user