mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register
The only case where the GuC submission backend cannot reason 100% correctly is when a GuC context is registered during VF post-migration recovery. In this scenario, it's possible that the GuC context register H2G is processed, but the immediately following schedule-enable H2G gets lost. The schedule-enable G2H "done" response is how the GuC state machine determines whether context registration has completed. A double register is harmless when using `GUC_HXG_TYPE_EVENT`, as GuC simply drops the duplicate H2G. To keep things simple, use `GUC_HXG_TYPE_EVENT` for all context registrations on VFs. v5: - Check for xe_sriov_vf_migration_supported (Tomasz) v7: - Add comment about subsequent protocol failures (Tomasz) - Modify commit message (Michal) Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Reviewed-by: Tomasz Lis <tomasz.lis@intel.com> Link: https://lore.kernel.org/r/20251008214532.3442967-20-matthew.brost@intel.com
This commit is contained in:
@@ -32,6 +32,7 @@
|
||||
#include "xe_guc_tlb_inval.h"
|
||||
#include "xe_map.h"
|
||||
#include "xe_pm.h"
|
||||
#include "xe_sriov_vf.h"
|
||||
#include "xe_trace_guc.h"
|
||||
|
||||
static void receive_g2h(struct xe_guc_ct *ct);
|
||||
@@ -736,6 +737,28 @@ static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
|
||||
return seqno;
|
||||
}
|
||||
|
||||
#define MAKE_ACTION(type, __action) \
|
||||
({ \
|
||||
FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | \
|
||||
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | \
|
||||
GUC_HXG_EVENT_MSG_0_DATA0, __action); \
|
||||
})
|
||||
|
||||
static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
|
||||
{
|
||||
/*
|
||||
* When resuming a VF, we can't reliably track whether context
|
||||
* registration has completed in the GuC state machine. It is harmless
|
||||
* to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
|
||||
* is used. Additionally, if there is an H2G protocol issue on a VF,
|
||||
* subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
|
||||
* fail.
|
||||
*/
|
||||
return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
|
||||
(action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
|
||||
action == XE_GUC_ACTION_REGISTER_CONTEXT);
|
||||
}
|
||||
|
||||
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
|
||||
|
||||
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
|
||||
@@ -807,18 +830,14 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
|
||||
FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
|
||||
FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
|
||||
if (want_response) {
|
||||
cmd[1] =
|
||||
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
|
||||
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
|
||||
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
|
||||
cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
|
||||
} else if (vf_action_can_safely_fail(xe, action[0])) {
|
||||
cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
|
||||
} else {
|
||||
fast_req_track(ct, ct_fence_value,
|
||||
FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
|
||||
|
||||
cmd[1] =
|
||||
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
|
||||
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
|
||||
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
|
||||
cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
|
||||
}
|
||||
|
||||
/* H2G header in cmd[1] replaces action[0] so: */
|
||||
|
||||
Reference in New Issue
Block a user