mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/xe/guc_pc: Use poll_timeout_us() for waiting
Convert wait_for_pc_state() and wait_for_act_freq_limit() to poll_timeout_us(). This brings 2 changes in behavior: Drop the exponential wait and fix a potential much longer sleep. usleep_range() will wait anywhere between `wait` and `wait << 1`, so it's not correct to assume `slept += wait`. This code is not really accurate. Pairing this with the exponential wait increase, it could be waiting much longer than intended. Reviewed-by: Vinay Belgaumkar <vinay.belgaumkar@intel.com> Link: https://lore.kernel.org/r/20250922-xe-iopoll-v4-2-06438311a63f@intel.com Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
@@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/wait_bit.h>
|
||||
@@ -130,26 +131,16 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
|
||||
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
|
||||
|
||||
static int wait_for_pc_state(struct xe_guc_pc *pc,
|
||||
enum slpc_global_state state,
|
||||
enum slpc_global_state target_state,
|
||||
int timeout_ms)
|
||||
{
|
||||
int timeout_us = 1000 * timeout_ms;
|
||||
int slept, wait = 10;
|
||||
enum slpc_global_state state;
|
||||
|
||||
xe_device_assert_mem_access(pc_to_xe(pc));
|
||||
|
||||
for (slept = 0; slept < timeout_us;) {
|
||||
if (slpc_shared_data_read(pc, header.global_state) == state)
|
||||
return 0;
|
||||
|
||||
usleep_range(wait, wait << 1);
|
||||
slept += wait;
|
||||
wait <<= 1;
|
||||
if (slept + wait > timeout_us)
|
||||
wait = timeout_us - slept;
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
|
||||
state == target_state,
|
||||
20, timeout_ms * USEC_PER_MSEC, false);
|
||||
}
|
||||
|
||||
static int wait_for_flush_complete(struct xe_guc_pc *pc)
|
||||
@@ -164,24 +155,15 @@ static int wait_for_flush_complete(struct xe_guc_pc *pc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
|
||||
static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
|
||||
{
|
||||
int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
|
||||
int slept, wait = 10;
|
||||
u32 freq;
|
||||
|
||||
for (slept = 0; slept < timeout_us;) {
|
||||
if (xe_guc_pc_get_act_freq(pc) <= freq)
|
||||
return 0;
|
||||
|
||||
usleep_range(wait, wait << 1);
|
||||
slept += wait;
|
||||
wait <<= 1;
|
||||
if (slept + wait > timeout_us)
|
||||
wait = timeout_us - slept;
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
|
||||
freq <= max_limit,
|
||||
20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
|
||||
}
|
||||
|
||||
static int pc_action_reset(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_guc_ct *ct = pc_to_ct(pc);
|
||||
@@ -983,7 +965,7 @@ void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
|
||||
* Wait for actual freq to go below the flush cap: even if the previous
|
||||
* max was below cap, the current one might still be above it
|
||||
*/
|
||||
ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
|
||||
ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
|
||||
if (ret)
|
||||
xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
|
||||
BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
|
||||
|
||||
Reference in New Issue
Block a user