drm/ttm: Replace multiple booleans with flags in device init

Multiple consecutive boolean function arguments are usually not very
readable.

Replace the ones in ttm_device_init() with flags with the additional
benefit of soon being able to pass in more data with just a one off
code base churning cost.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <mripard@kernel.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Sui Jingfeng <suijingfeng@loongson.cn>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Zack Rusin <zack.rusin@broadcom.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Zack Rusin <zack.rusin@broadcom.com>
Acked-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> # For xe
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://lore.kernel.org/r/20251020115411.36818-4-tvrtko.ursulin@igalia.com
[tursulin: fixup checkpatch while applying]
This commit is contained in:
Tvrtko Ursulin
2025-10-20 12:54:08 +01:00
committed by Tvrtko Ursulin
parent 0af5b6a8f8
commit 77e19f8d32
16 changed files with 50 additions and 54 deletions

View File

@@ -1930,8 +1930,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager, adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb, (adev->need_swiotlb ?
dma_addressing_limited(adev->dev)); TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
(dma_addressing_limited(adev->dev) ?
TTM_ALLOCATION_POOL_USE_DMA32 : 0));
if (r) { if (r) {
dev_err(adev->dev, dev_err(adev->dev,
"failed initializing buffer object driver(%d).\n", r); "failed initializing buffer object driver(%d).\n", r);

View File

@@ -860,7 +860,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev, ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping, dev->anon_inode->i_mapping,
dev->vma_offset_manager, dev->vma_offset_manager,
false, true); TTM_ALLOCATION_POOL_USE_DMA32);
if (ret) if (ret)
return ret; return ret;

View File

@@ -34,7 +34,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(), return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
drm->dev, drm->anon_inode->i_mapping, drm->dev, drm->anon_inode->i_mapping,
drm->vma_offset_manager, false, false); drm->vma_offset_manager, 0);
} }
/** /**

View File

@@ -545,7 +545,8 @@ int lsdc_ttm_init(struct lsdc_device *ldev)
ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev, ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
ddev->anon_inode->i_mapping, ddev->anon_inode->i_mapping,
ddev->vma_offset_manager, false, true); ddev->vma_offset_manager,
TTM_ALLOCATION_POOL_USE_DMA32);
if (ret) if (ret)
return ret; return ret;

View File

@@ -302,8 +302,10 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev, ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
dev->anon_inode->i_mapping, dev->anon_inode->i_mapping,
dev->vma_offset_manager, dev->vma_offset_manager,
drm_need_swiotlb(drm->client.mmu.dmabits), (drm_need_swiotlb(drm->client.mmu.dmabits) ?
drm->client.mmu.dmabits <= 32); TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
(drm->client.mmu.dmabits <= 32 ?
TTM_ALLOCATION_POOL_USE_DMA32 : 0));
if (ret) { if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret); NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret; return ret;

View File

@@ -197,7 +197,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping, qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager, qdev->ddev.vma_offset_manager,
false, false); 0);
if (r) { if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r); DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r; return r;

View File

@@ -683,8 +683,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev_to_drm(rdev)->anon_inode->i_mapping, rdev_to_drm(rdev)->anon_inode->i_mapping,
rdev_to_drm(rdev)->vma_offset_manager, rdev_to_drm(rdev)->vma_offset_manager,
rdev->need_swiotlb, (rdev->need_swiotlb ?
dma_addressing_limited(&rdev->pdev->dev)); TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
(dma_addressing_limited(&rdev->pdev->dev) ?
TTM_ALLOCATION_POOL_USE_DMA32 : 0));
if (r) { if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r); DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r; return r;

View File

@@ -251,7 +251,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;
@@ -290,7 +290,7 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;
@@ -342,7 +342,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL); resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, resv); KUNIT_ASSERT_NOT_NULL(test, resv);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;
@@ -394,7 +394,7 @@ static void ttm_bo_fini_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;
@@ -437,7 +437,7 @@ static void ttm_bo_fini_shared_resv(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;
@@ -477,7 +477,7 @@ static void ttm_bo_pin_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;
@@ -512,7 +512,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;
@@ -563,7 +563,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev; priv->ttm_dev = ttm_dev;

View File

@@ -995,7 +995,7 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
*/ */
ttm_device_fini(priv->ttm_dev); ttm_device_fini(priv->ttm_dev);
err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev, false, false); err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE); ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);

View File

@@ -25,7 +25,7 @@ static void ttm_device_init_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs); KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
@@ -55,7 +55,7 @@ static void ttm_device_init_multiple(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_devs); KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
for (i = 0; i < num_dev; i++) { for (i = 0; i < num_dev; i++) {
err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false); err = ttm_device_kunit_init(priv, &ttm_devs[i], 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping, KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
@@ -81,7 +81,7 @@ static void ttm_device_fini_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM); man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
@@ -109,7 +109,7 @@ static void ttm_device_init_no_vma_man(struct kunit *test)
vma_man = drm->vma_offset_manager; vma_man = drm->vma_offset_manager;
drm->vma_offset_manager = NULL; drm->vma_offset_manager = NULL;
err = ttm_device_kunit_init(priv, ttm_dev, false, false); err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_EXPECT_EQ(test, err, -EINVAL); KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Bring the manager back for a graceful cleanup */ /* Bring the manager back for a graceful cleanup */
@@ -158,9 +158,7 @@ static void ttm_device_init_pools(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, err = ttm_device_kunit_init(priv, ttm_dev, params->alloc_flags);
params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
pool = &ttm_dev->pool; pool = &ttm_dev->pool;

View File

@@ -117,8 +117,7 @@ static void bad_evict_flags(struct ttm_buffer_object *bo,
static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv, static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
struct ttm_device *ttm, struct ttm_device *ttm,
bool use_dma_alloc, unsigned int alloc_flags,
bool use_dma32,
struct ttm_device_funcs *funcs) struct ttm_device_funcs *funcs)
{ {
struct drm_device *drm = priv->drm; struct drm_device *drm = priv->drm;
@@ -127,7 +126,7 @@ static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
err = ttm_device_init(ttm, funcs, drm->dev, err = ttm_device_init(ttm, funcs, drm->dev,
drm->anon_inode->i_mapping, drm->anon_inode->i_mapping,
drm->vma_offset_manager, drm->vma_offset_manager,
use_dma_alloc, use_dma32); alloc_flags);
return err; return err;
} }
@@ -143,11 +142,10 @@ EXPORT_SYMBOL_GPL(ttm_dev_funcs);
int ttm_device_kunit_init(struct ttm_test_devices *priv, int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_device *ttm, struct ttm_device *ttm,
bool use_dma_alloc, unsigned int alloc_flags)
bool use_dma32)
{ {
return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc, return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags,
use_dma32, &ttm_dev_funcs); &ttm_dev_funcs);
} }
EXPORT_SYMBOL_GPL(ttm_device_kunit_init); EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
@@ -161,12 +159,10 @@ struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict); EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv, int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
struct ttm_device *ttm, struct ttm_device *ttm)
bool use_dma_alloc,
bool use_dma32)
{ {
return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc, return ttm_device_kunit_init_with_funcs(priv, ttm, 0,
use_dma32, &ttm_dev_funcs_bad_evict); &ttm_dev_funcs_bad_evict);
} }
EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict); EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
@@ -252,7 +248,7 @@ struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev); KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(devs, ttm_dev, false, false); err = ttm_device_kunit_init(devs, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, err, 0);
devs->ttm_dev = ttm_dev; devs->ttm_dev = ttm_dev;

View File

@@ -28,12 +28,9 @@ struct ttm_test_devices {
/* Building blocks for test-specific init functions */ /* Building blocks for test-specific init functions */
int ttm_device_kunit_init(struct ttm_test_devices *priv, int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_device *ttm, struct ttm_device *ttm,
bool use_dma_alloc, unsigned int alloc_flags);
bool use_dma32);
int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv, int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
struct ttm_device *ttm, struct ttm_device *ttm);
bool use_dma_alloc,
bool use_dma32);
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test, struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs, struct ttm_test_devices *devs,
size_t size, size_t size,

View File

@@ -199,8 +199,7 @@ EXPORT_SYMBOL(ttm_device_swapout);
* @dev: The core kernel device pointer for DMA mappings and allocations. * @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo. * @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager. * @vma_manager: A pointer to a vma manager.
* @use_dma_alloc: If coherent DMA allocation API should be used. * @alloc_flags: TTM_ALLOCATION_ flags.
* @use_dma32: If we should use GFP_DMA32 for device memory allocations.
* *
* Initializes a struct ttm_device: * Initializes a struct ttm_device:
* Returns: * Returns:
@@ -209,7 +208,7 @@ EXPORT_SYMBOL(ttm_device_swapout);
int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs, int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping, struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager, struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32) unsigned int alloc_flags)
{ {
struct ttm_global *glob = &ttm_glob; struct ttm_global *glob = &ttm_glob;
int ret, nid; int ret, nid;
@@ -237,9 +236,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
else else
nid = NUMA_NO_NODE; nid = NUMA_NO_NODE;
ttm_pool_init(&bdev->pool, dev, nid, ttm_pool_init(&bdev->pool, dev, nid, alloc_flags);
(use_dma_alloc ? TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
(use_dma32 ? TTM_ALLOCATION_POOL_USE_DMA32 : 0));
bdev->vma_manager = vma_manager; bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock); spin_lock_init(&bdev->lru_lock);

View File

@@ -1023,8 +1023,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->drm.dev, dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping, dev_priv->drm.anon_inode->i_mapping,
dev_priv->drm.vma_offset_manager, dev_priv->drm.vma_offset_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent, (dev_priv->map_mode == vmw_dma_alloc_coherent) ?
false); TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
drm_err(&dev_priv->drm, drm_err(&dev_priv->drm,
"Failed initializing TTM buffer object driver.\n"); "Failed initializing TTM buffer object driver.\n");

View File

@@ -437,7 +437,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
xe->drm.anon_inode->i_mapping, xe->drm.anon_inode->i_mapping,
xe->drm.vma_offset_manager, false, false); xe->drm.vma_offset_manager, 0);
if (WARN_ON(err)) if (WARN_ON(err))
goto err; goto err;

View File

@@ -27,6 +27,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_pool.h> #include <drm/ttm/ttm_pool.h>
@@ -292,7 +293,7 @@ static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs, int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping, struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager, struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32); unsigned int alloc_flags);
void ttm_device_fini(struct ttm_device *bdev); void ttm_device_fini(struct ttm_device *bdev);
void ttm_device_clear_dma_mappings(struct ttm_device *bdev); void ttm_device_clear_dma_mappings(struct ttm_device *bdev);