mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages
update the amdgpu_ttm_tt_get_user_pages and all dependent function along with it callers to use a user allocated hmm_range buffer instead hmm layer allocates the buffer. This is a need to get hmm_range pointers easily accessible without accessing the bo and that is a requirement for the userqueue to lock the userptrs effectively. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
079ae5118e
commit
e095b55155
@@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
|
||||
range = kzalloc(sizeof(*range), GFP_KERNEL);
|
||||
if (unlikely(!range)) {
|
||||
ret = -ENOMEM;
|
||||
goto unregister_out;
|
||||
}
|
||||
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, range);
|
||||
if (ret) {
|
||||
kfree(range);
|
||||
if (ret == -EAGAIN)
|
||||
pr_debug("Failed to get user pages, try again\n");
|
||||
else
|
||||
@@ -2566,9 +2573,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
||||
}
|
||||
}
|
||||
|
||||
mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL);
|
||||
if (unlikely(!mem->range))
|
||||
return -ENOMEM;
|
||||
/* Get updated user pages */
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
|
||||
if (ret) {
|
||||
kfree(mem->range);
|
||||
mem->range = NULL;
|
||||
pr_debug("Failed %d to get user pages\n", ret);
|
||||
|
||||
/* Return -EFAULT bad address error as success. It will
|
||||
|
||||
@@ -891,7 +891,11 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
bool userpage_invalidated = false;
|
||||
struct amdgpu_bo *bo = e->bo;
|
||||
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
|
||||
e->range = kzalloc(sizeof(*e->range), GFP_KERNEL);
|
||||
if (unlikely(!e->range))
|
||||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
|
||||
if (r)
|
||||
goto out_free_user_pages;
|
||||
|
||||
|
||||
@@ -572,10 +572,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
goto release_object;
|
||||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, &range);
|
||||
if (r)
|
||||
range = kzalloc(sizeof(*range), GFP_KERNEL);
|
||||
if (unlikely(!range))
|
||||
return -ENOMEM;
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, range);
|
||||
if (r) {
|
||||
kfree(range);
|
||||
goto release_object;
|
||||
|
||||
}
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
if (r)
|
||||
goto user_pages_done;
|
||||
|
||||
@@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
|
||||
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
|
||||
uint64_t start, uint64_t npages, bool readonly,
|
||||
void *owner,
|
||||
struct hmm_range **phmm_range)
|
||||
struct hmm_range *hmm_range)
|
||||
{
|
||||
struct hmm_range *hmm_range;
|
||||
unsigned long end;
|
||||
unsigned long timeout;
|
||||
unsigned long *pfns;
|
||||
int r = 0;
|
||||
|
||||
hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
|
||||
if (unlikely(!hmm_range))
|
||||
return -ENOMEM;
|
||||
|
||||
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
|
||||
if (unlikely(!pfns)) {
|
||||
r = -ENOMEM;
|
||||
@@ -221,15 +216,11 @@ retry:
|
||||
hmm_range->start = start;
|
||||
hmm_range->hmm_pfns = pfns;
|
||||
|
||||
*phmm_range = hmm_range;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_pfns:
|
||||
kvfree(pfns);
|
||||
out_free_range:
|
||||
kfree(hmm_range);
|
||||
|
||||
if (r == -EBUSY)
|
||||
r = -EAGAIN;
|
||||
return r;
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
|
||||
uint64_t start, uint64_t npages, bool readonly,
|
||||
void *owner,
|
||||
struct hmm_range **phmm_range);
|
||||
struct hmm_range *hmm_range);
|
||||
bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
|
||||
|
||||
#if defined(CONFIG_HMM_MIRROR)
|
||||
|
||||
@@ -706,10 +706,11 @@ struct amdgpu_ttm_tt {
|
||||
* memory and start HMM tracking CPU page table update
|
||||
*
|
||||
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
|
||||
* once afterwards to stop HMM tracking
|
||||
* once afterwards to stop HMM tracking. Its the caller responsibility to ensure
|
||||
* that range is a valid memory and it is freed too.
|
||||
*/
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
struct hmm_range **range)
|
||||
struct hmm_range *range)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
@@ -719,9 +720,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
bool readonly;
|
||||
int r = 0;
|
||||
|
||||
/* Make sure get_user_pages_done() can cleanup gracefully */
|
||||
*range = NULL;
|
||||
|
||||
mm = bo->notifier.mm;
|
||||
if (unlikely(!mm)) {
|
||||
DRM_DEBUG_DRIVER("BO is not registered?\n");
|
||||
|
||||
@@ -192,14 +192,14 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
struct hmm_range **range);
|
||||
struct hmm_range *range);
|
||||
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
|
||||
struct hmm_range *range);
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
|
||||
struct hmm_range *range);
|
||||
#else
|
||||
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
struct hmm_range **range)
|
||||
struct hmm_range *range)
|
||||
{
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
||||
}
|
||||
|
||||
WRITE_ONCE(p->svms.faulting_task, current);
|
||||
hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
|
||||
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
|
||||
readonly, owner,
|
||||
&hmm_range);
|
||||
hmm_range);
|
||||
WRITE_ONCE(p->svms.faulting_task, NULL);
|
||||
if (r)
|
||||
if (r) {
|
||||
kfree(hmm_range);
|
||||
pr_debug("failed %d to get svm range pages\n", r);
|
||||
}
|
||||
} else {
|
||||
r = -EFAULT;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user