mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 11:56:58 +00:00
Enable MIGRATE_VMA_SELECT_COMPOUND support in nouveau driver to take advantage of THP zone device migration capabilities. Update migration and eviction code paths to handle compound page sizes appropriately, improving memory bandwidth utilization and reducing migration overhead for large GPU memory allocations. [balbirs@nvidia.com: fix sparse error] Link: https://lkml.kernel.org/r/20251115003333.3516870-1-balbirs@nvidia.com Link: https://lkml.kernel.org/r/20251001065707.920170-17-balbirs@nvidia.com Signed-off-by: Balbir Singh <balbirs@nvidia.com> Cc: David Hildenbrand <david@redhat.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Byungchul Park <byungchul@sk.com> Cc: Gregory Price <gourry@gourry.net> Cc: Ying Huang <ying.huang@linux.alibaba.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com> Cc: Nico Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Barry Song <baohua@kernel.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Danilo Krummrich <dakr@kernel.org> Cc: David Airlie <airlied@gmail.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Mika Penttilä <mpenttil@redhat.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Francois Dugast <francois.dugast@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
66 lines
2.0 KiB
C
66 lines
2.0 KiB
C
#ifndef __NOUVEAU_SVM_H__
|
|
#define __NOUVEAU_SVM_H__
|
|
#include <nvif/os.h>
|
|
#include <linux/mmu_notifier.h>
|
|
struct drm_device;
|
|
struct drm_file;
|
|
struct nouveau_drm;
|
|
|
|
struct nouveau_svmm {
|
|
struct mmu_notifier notifier;
|
|
struct nouveau_vmm *vmm;
|
|
struct {
|
|
unsigned long start;
|
|
unsigned long limit;
|
|
} unmanaged;
|
|
|
|
struct mutex mutex;
|
|
};
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
|
|
void nouveau_svm_init(struct nouveau_drm *);
|
|
void nouveau_svm_fini(struct nouveau_drm *);
|
|
void nouveau_svm_suspend(struct nouveau_drm *);
|
|
void nouveau_svm_resume(struct nouveau_drm *);
|
|
|
|
int nouveau_svmm_init(struct drm_device *, void *, struct drm_file *);
|
|
void nouveau_svmm_fini(struct nouveau_svmm **);
|
|
int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
|
|
void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
|
|
int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
|
|
|
|
void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit);
|
|
u64 *nouveau_pfns_alloc(unsigned long npages);
|
|
void nouveau_pfns_free(u64 *pfns);
|
|
void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
|
|
unsigned long addr, u64 *pfns, unsigned long npages,
|
|
unsigned int page_shift);
|
|
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
|
|
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
|
|
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
|
|
static inline void nouveau_svm_suspend(struct nouveau_drm *drm) {}
|
|
static inline void nouveau_svm_resume(struct nouveau_drm *drm) {}
|
|
|
|
static inline int nouveau_svmm_init(struct drm_device *device, void *p,
|
|
struct drm_file *file)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static inline void nouveau_svmm_fini(struct nouveau_svmm **svmmp) {}
|
|
|
|
static inline int nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst) {}
|
|
|
|
static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
|
|
struct drm_file *file)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
#endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
|
|
#endif
|