mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/xe: Implement xe_pagefault_init
Create pagefault queues and initialize them. v2: - Fix kernel doc + add comment for number PF queue (Francois) v4: - Move init after GT init (CI, Francois) Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Francois Dugast <francois.dugast@intel.com> Tested-by: Francois Dugast <francois.dugast@intel.com> Link: https://patch.msgid.link/20251031165416.2871503-3-matthew.brost@intel.com
This commit is contained in:
@@ -52,6 +52,7 @@
|
||||
#include "xe_nvm.h"
|
||||
#include "xe_oa.h"
|
||||
#include "xe_observation.h"
|
||||
#include "xe_pagefault.h"
|
||||
#include "xe_pat.h"
|
||||
#include "xe_pcode.h"
|
||||
#include "xe_pm.h"
|
||||
@@ -896,6 +897,10 @@ int xe_device_probe(struct xe_device *xe)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = xe_pagefault_init(xe);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (xe->tiles->media_gt &&
|
||||
XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
|
||||
XE_DEVICE_WA_DISABLE(xe, 15015404425);
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "xe_lmtt_types.h"
|
||||
#include "xe_memirq_types.h"
|
||||
#include "xe_oa_types.h"
|
||||
#include "xe_pagefault_types.h"
|
||||
#include "xe_platform_types.h"
|
||||
#include "xe_pmu_types.h"
|
||||
#include "xe_pt_types.h"
|
||||
@@ -413,6 +414,16 @@ struct xe_device {
|
||||
u32 next_asid;
|
||||
/** @usm.lock: protects UM state */
|
||||
struct rw_semaphore lock;
|
||||
/** @usm.pf_wq: page fault work queue, unbound, high priority */
|
||||
struct workqueue_struct *pf_wq;
|
||||
/*
|
||||
* We pick 4 here because, in the current implementation, it
|
||||
* yields the best bandwidth utilization of the kernel paging
|
||||
* engine.
|
||||
*/
|
||||
#define XE_PAGEFAULT_QUEUE_COUNT 4
|
||||
/** @usm.pf_queue: Page fault queues */
|
||||
struct xe_pagefault_queue pf_queue[XE_PAGEFAULT_QUEUE_COUNT];
|
||||
} usm;
|
||||
|
||||
/** @pinned: pinned BO state */
|
||||
|
||||
@@ -3,6 +3,10 @@
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
|
||||
#include "xe_device.h"
|
||||
#include "xe_gt_types.h"
|
||||
#include "xe_pagefault.h"
|
||||
#include "xe_pagefault_types.h"
|
||||
|
||||
@@ -21,6 +25,76 @@
|
||||
* xe_pagefault.c implements the consumer layer.
|
||||
*/
|
||||
|
||||
static int xe_pagefault_entry_size(void)
|
||||
{
|
||||
/*
|
||||
* Power of two alignment is not a hardware requirement, rather a
|
||||
* software restriction which makes the math for page fault queue
|
||||
* management simplier.
|
||||
*/
|
||||
return roundup_pow_of_two(sizeof(struct xe_pagefault));
|
||||
}
|
||||
|
||||
static void xe_pagefault_queue_work(struct work_struct *w)
|
||||
{
|
||||
/* TODO: Implement */
|
||||
}
|
||||
|
||||
static int xe_pagefault_queue_init(struct xe_device *xe,
|
||||
struct xe_pagefault_queue *pf_queue)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
int total_num_eus = 0;
|
||||
u8 id;
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
xe_dss_mask_t all_dss;
|
||||
int num_dss, num_eus;
|
||||
|
||||
bitmap_or(all_dss, gt->fuse_topo.g_dss_mask,
|
||||
gt->fuse_topo.c_dss_mask, XE_MAX_DSS_FUSE_BITS);
|
||||
|
||||
num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
|
||||
num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
|
||||
XE_MAX_EU_FUSE_BITS) * num_dss;
|
||||
|
||||
total_num_eus += num_eus;
|
||||
}
|
||||
|
||||
xe_assert(xe, total_num_eus);
|
||||
|
||||
/*
|
||||
* user can issue separate page faults per EU and per CS
|
||||
*
|
||||
* XXX: Multiplier required as compute UMD are getting PF queue errors
|
||||
* without it. Follow on why this multiplier is required.
|
||||
*/
|
||||
#define PF_MULTIPLIER 8
|
||||
pf_queue->size = (total_num_eus + XE_NUM_HW_ENGINES) *
|
||||
xe_pagefault_entry_size() * PF_MULTIPLIER;
|
||||
pf_queue->size = roundup_pow_of_two(pf_queue->size);
|
||||
#undef PF_MULTIPLIER
|
||||
|
||||
drm_dbg(&xe->drm, "xe_pagefault_entry_size=%d, total_num_eus=%d, pf_queue->size=%u",
|
||||
xe_pagefault_entry_size(), total_num_eus, pf_queue->size);
|
||||
|
||||
spin_lock_init(&pf_queue->lock);
|
||||
INIT_WORK(&pf_queue->worker, xe_pagefault_queue_work);
|
||||
|
||||
pf_queue->data = drmm_kzalloc(&xe->drm, pf_queue->size, GFP_KERNEL);
|
||||
if (!pf_queue->data)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xe_pagefault_fini(void *arg)
|
||||
{
|
||||
struct xe_device *xe = arg;
|
||||
|
||||
destroy_workqueue(xe->usm.pf_wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_pagefault_init() - Page fault init
|
||||
* @xe: xe device instance
|
||||
@@ -31,8 +105,28 @@
|
||||
*/
|
||||
int xe_pagefault_init(struct xe_device *xe)
|
||||
{
|
||||
/* TODO - implement */
|
||||
return 0;
|
||||
int err, i;
|
||||
|
||||
if (!xe->info.has_usm)
|
||||
return 0;
|
||||
|
||||
xe->usm.pf_wq = alloc_workqueue("xe_page_fault_work_queue",
|
||||
WQ_UNBOUND | WQ_HIGHPRI,
|
||||
XE_PAGEFAULT_QUEUE_COUNT);
|
||||
if (!xe->usm.pf_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < XE_PAGEFAULT_QUEUE_COUNT; ++i) {
|
||||
err = xe_pagefault_queue_init(xe, xe->usm.pf_queue + i);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return devm_add_action_or_reset(xe->drm.dev, xe_pagefault_fini, xe);
|
||||
|
||||
err_out:
|
||||
destroy_workqueue(xe->usm.pf_wq);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user