drm/ttm: Replace multiple booleans with flags in pool init

Multiple consecutive boolean function arguments are usually not very
readable.

Replace the ones in ttm_pool_init() with flags with the additional
benefit of soon being able to pass in more data with just this one
code base churning cost.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://lore.kernel.org/r/20251020115411.36818-3-tvrtko.ursulin@igalia.com
This commit is contained in:
Tvrtko Ursulin
2025-10-20 12:54:07 +01:00
committed by Tvrtko Ursulin
parent d53adc244f
commit 0af5b6a8f8
8 changed files with 45 additions and 42 deletions

View File

@@ -1837,7 +1837,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
adev->gmc.mem_partitions[i].numa.node,
false, false);
0);
}
return 0;
}

View File

@@ -7,11 +7,11 @@
#include <drm/ttm/ttm_placement.h>
#include "ttm_kunit_helpers.h"
#include "../ttm_pool_internal.h"
struct ttm_device_test_case {
const char *description;
bool use_dma_alloc;
bool use_dma32;
unsigned int alloc_flags;
bool pools_init_expected;
};
@@ -119,26 +119,22 @@ static void ttm_device_init_no_vma_man(struct kunit *test)
static const struct ttm_device_test_case ttm_device_cases[] = {
{
.description = "No DMA allocations, no DMA32 required",
.use_dma_alloc = false,
.use_dma32 = false,
.pools_init_expected = false,
},
{
.description = "DMA allocations, DMA32 required",
.use_dma_alloc = true,
.use_dma32 = true,
.alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC |
TTM_ALLOCATION_POOL_USE_DMA32,
.pools_init_expected = true,
},
{
.description = "No DMA allocations, DMA32 required",
.use_dma_alloc = false,
.use_dma32 = true,
.alloc_flags = TTM_ALLOCATION_POOL_USE_DMA32,
.pools_init_expected = false,
},
{
.description = "DMA allocations, no DMA32 required",
.use_dma_alloc = true,
.use_dma32 = false,
.alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
.pools_init_expected = true,
},
};
@@ -163,15 +159,14 @@ static void ttm_device_init_pools(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev,
params->use_dma_alloc,
params->use_dma32);
params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32);
KUNIT_ASSERT_EQ(test, err, 0);
pool = &ttm_dev->pool;
KUNIT_ASSERT_NOT_NULL(test, pool);
KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
KUNIT_EXPECT_EQ(test, pool->alloc_flags, params->alloc_flags);
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
@@ -181,7 +176,7 @@ static void ttm_device_init_pools(struct kunit *test)
KUNIT_EXPECT_EQ(test, pt.caching, i);
KUNIT_EXPECT_EQ(test, pt.order, j);
if (params->use_dma_alloc)
if (ttm_pool_uses_dma_alloc(pool))
KUNIT_ASSERT_FALSE(test,
list_empty(&pt.pages));
}

View File

@@ -13,7 +13,7 @@
struct ttm_pool_test_case {
const char *description;
unsigned int order;
bool use_dma_alloc;
unsigned int alloc_flags;
};
struct ttm_pool_test_priv {
@@ -87,7 +87,7 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -114,12 +114,12 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
{
.description = "One page, with coherent DMA mappings enabled",
.order = 0,
.use_dma_alloc = true,
.alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
},
{
.description = "Above the allocation limit, with coherent DMA mappings enabled",
.order = MAX_PAGE_ORDER + 1,
.use_dma_alloc = true,
.alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
},
};
@@ -151,13 +151,11 @@ static void ttm_pool_alloc_basic(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
false);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags);
KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
KUNIT_ASSERT_EQ(test, ttm_pool_uses_dma_alloc(pool),
params->use_dma_alloc);
KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -167,14 +165,14 @@ static void ttm_pool_alloc_basic(struct kunit *test)
last_page = tt->pages[tt->num_pages - 1];
if (params->order <= MAX_PAGE_ORDER) {
if (params->use_dma_alloc) {
if (ttm_pool_uses_dma_alloc(pool)) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
} else {
KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
}
} else {
if (params->use_dma_alloc) {
if (ttm_pool_uses_dma_alloc(pool)) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NULL(test, (void *)last_page->private);
} else {
@@ -220,7 +218,7 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -350,7 +348,7 @@ static void ttm_pool_free_dma_alloc(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];
@@ -381,7 +379,7 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];

View File

@@ -31,6 +31,7 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_tt.h>
@@ -236,7 +237,9 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
else
nid = NUMA_NO_NODE;
ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
ttm_pool_init(&bdev->pool, dev, nid,
(use_dma_alloc ? TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
(use_dma32 ? TTM_ALLOCATION_POOL_USE_DMA32 : 0));
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);

View File

@@ -1059,13 +1059,12 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
* @pool: the pool to initialize
* @dev: device for DMA allocations and mappings
* @nid: NUMA node to use for allocations
* @use_dma_alloc: true if coherent DMA alloc should be used
* @use_dma32: true if GFP_DMA32 should be used
* @alloc_flags: TTM_ALLOCATION_POOL_ flags
*
* Initialize the pool and its pool types.
*/
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
int nid, bool use_dma_alloc, bool use_dma32)
int nid, unsigned int alloc_flags)
{
unsigned int i, j;
@@ -1073,8 +1072,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->dev = dev;
pool->nid = nid;
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
pool->alloc_flags = alloc_flags;
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {

View File

@@ -4,16 +4,17 @@
#ifndef _TTM_POOL_INTERNAL_H_
#define _TTM_POOL_INTERNAL_H_
#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_pool.h>
static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool)
{
return pool->use_dma_alloc;
return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC;
}
static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
{
return pool->use_dma32;
return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32;
}
#endif

View File

@@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright (c) 2025 Valve Corporation */
#ifndef _TTM_ALLOCATION_H_
#define _TTM_ALLOCATION_H_
#define TTM_ALLOCATION_POOL_USE_DMA_ALLOC BIT(0) /* Use coherent DMA allocations. */
#define TTM_ALLOCATION_POOL_USE_DMA32 BIT(1) /* Use GFP_DMA32 allocations. */
#endif

View File

@@ -64,16 +64,14 @@ struct ttm_pool_type {
*
* @dev: the device we allocate pages for
* @nid: which numa node to use
* @use_dma_alloc: if coherent DMA allocations should be used
* @use_dma32: if GFP_DMA32 should be used
* @alloc_flags: TTM_ALLOCATION_POOL_ flags
* @caching: pools for each caching/order
*/
struct ttm_pool {
struct device *dev;
int nid;
bool use_dma_alloc;
bool use_dma32;
unsigned int alloc_flags;
struct {
struct ttm_pool_type orders[NR_PAGE_ORDERS];
@@ -85,7 +83,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
int nid, bool use_dma_alloc, bool use_dma32);
int nid, unsigned int alloc_flags);
void ttm_pool_fini(struct ttm_pool *pool);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);