1// SPDX-License-Identifier: GPL-2.0 AND MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <linux/export.h>
7
8#include <drm/ttm/ttm_tt.h>
9
10#include "ttm_kunit_helpers.h"
11
12static const struct ttm_place sys_place = {
13 .fpfn = 0,
14 .lpfn = 0,
15 .mem_type = TTM_PL_SYSTEM,
16 .flags = TTM_PL_FLAG_FALLBACK,
17};
18
19static const struct ttm_place mock1_place = {
20 .fpfn = 0,
21 .lpfn = 0,
22 .mem_type = TTM_PL_MOCK1,
23 .flags = TTM_PL_FLAG_FALLBACK,
24};
25
26static const struct ttm_place mock2_place = {
27 .fpfn = 0,
28 .lpfn = 0,
29 .mem_type = TTM_PL_MOCK2,
30 .flags = TTM_PL_FLAG_FALLBACK,
31};
32
33static struct ttm_placement sys_placement = {
34 .num_placement = 1,
35 .placement = &sys_place,
36};
37
38static struct ttm_placement bad_placement = {
39 .num_placement = 1,
40 .placement = &mock1_place,
41};
42
43static struct ttm_placement mock_placement = {
44 .num_placement = 1,
45 .placement = &mock2_place,
46};
47
48static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, u32 page_flags)
49{
50 struct ttm_tt *tt;
51
52 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
53 ttm_tt_init(ttm: tt, bo, page_flags, caching: ttm_cached, extra_pages: 0);
54
55 return tt;
56}
57
58static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
59{
60 kfree(objp: ttm);
61}
62
63static int mock_move(struct ttm_buffer_object *bo, bool evict,
64 struct ttm_operation_ctx *ctx,
65 struct ttm_resource *new_mem,
66 struct ttm_place *hop)
67{
68 struct ttm_resource *old_mem = bo->resource;
69
70 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) {
71 ttm_bo_move_null(bo, new_mem);
72 return 0;
73 }
74
75 if (bo->resource->mem_type == TTM_PL_VRAM &&
76 new_mem->mem_type == TTM_PL_SYSTEM) {
77 hop->mem_type = TTM_PL_TT;
78 hop->flags = TTM_PL_FLAG_TEMPORARY;
79 hop->fpfn = 0;
80 hop->lpfn = 0;
81 return -EMULTIHOP;
82 }
83
84 if ((old_mem->mem_type == TTM_PL_SYSTEM &&
85 new_mem->mem_type == TTM_PL_TT) ||
86 (old_mem->mem_type == TTM_PL_TT &&
87 new_mem->mem_type == TTM_PL_SYSTEM)) {
88 ttm_bo_move_null(bo, new_mem);
89 return 0;
90 }
91
92 return ttm_bo_move_memcpy(bo, ctx, new_mem);
93}
94
95static void mock_evict_flags(struct ttm_buffer_object *bo,
96 struct ttm_placement *placement)
97{
98 switch (bo->resource->mem_type) {
99 case TTM_PL_VRAM:
100 case TTM_PL_SYSTEM:
101 *placement = sys_placement;
102 break;
103 case TTM_PL_TT:
104 *placement = mock_placement;
105 break;
106 case TTM_PL_MOCK1:
107 /* Purge objects coming from this domain */
108 break;
109 }
110}
111
112static void bad_evict_flags(struct ttm_buffer_object *bo,
113 struct ttm_placement *placement)
114{
115 *placement = bad_placement;
116}
117
118static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
119 struct ttm_device *ttm,
120 unsigned int alloc_flags,
121 struct ttm_device_funcs *funcs)
122{
123 struct drm_device *drm = priv->drm;
124 int err;
125
126 err = ttm_device_init(bdev: ttm, funcs, dev: drm->dev,
127 mapping: drm->anon_inode->i_mapping,
128 vma_manager: drm->vma_offset_manager,
129 alloc_flags);
130
131 return err;
132}
133
134struct ttm_device_funcs ttm_dev_funcs = {
135 .ttm_tt_create = ttm_tt_simple_create,
136 .ttm_tt_destroy = ttm_tt_simple_destroy,
137 .move = mock_move,
138 .eviction_valuable = ttm_bo_eviction_valuable,
139 .evict_flags = mock_evict_flags,
140};
141EXPORT_SYMBOL_GPL(ttm_dev_funcs);
142
143int ttm_device_kunit_init(struct ttm_test_devices *priv,
144 struct ttm_device *ttm,
145 unsigned int alloc_flags)
146{
147 return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags,
148 funcs: &ttm_dev_funcs);
149}
150EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
151
152struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
153 .ttm_tt_create = ttm_tt_simple_create,
154 .ttm_tt_destroy = ttm_tt_simple_destroy,
155 .move = mock_move,
156 .eviction_valuable = ttm_bo_eviction_valuable,
157 .evict_flags = bad_evict_flags,
158};
159EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
160
161int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
162 struct ttm_device *ttm)
163{
164 return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags: 0,
165 funcs: &ttm_dev_funcs_bad_evict);
166}
167EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
168
169struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
170 struct ttm_test_devices *devs,
171 size_t size,
172 struct dma_resv *obj)
173{
174 struct drm_gem_object gem_obj = { };
175 struct ttm_buffer_object *bo;
176 int err;
177
178 bo = kunit_kzalloc(test, size: sizeof(*bo), GFP_KERNEL);
179 KUNIT_ASSERT_NOT_NULL(test, bo);
180
181 bo->base = gem_obj;
182
183 if (obj)
184 bo->base.resv = obj;
185
186 err = drm_gem_object_init(dev: devs->drm, obj: &bo->base, size);
187 KUNIT_ASSERT_EQ(test, err, 0);
188
189 bo->bdev = devs->ttm_dev;
190 bo->destroy = dummy_ttm_bo_destroy;
191
192 kref_init(kref: &bo->kref);
193
194 return bo;
195}
196EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
197
198struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type, u32 flags)
199{
200 struct ttm_place *place;
201
202 place = kunit_kzalloc(test, size: sizeof(*place), GFP_KERNEL);
203 KUNIT_ASSERT_NOT_NULL(test, place);
204
205 place->mem_type = mem_type;
206 place->flags = flags;
207
208 return place;
209}
210EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
211
212void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
213{
214 drm_gem_object_release(obj: &bo->base);
215}
216EXPORT_SYMBOL_GPL(dummy_ttm_bo_destroy);
217
218struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
219{
220 struct ttm_test_devices *devs;
221
222 devs = kunit_kzalloc(test, size: sizeof(*devs), GFP_KERNEL);
223 KUNIT_ASSERT_NOT_NULL(test, devs);
224
225 devs->dev = drm_kunit_helper_alloc_device(test);
226 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
227
228 /* Set mask for alloc_coherent mappings to enable ttm_pool_alloc testing */
229 devs->dev->coherent_dma_mask = -1;
230
231 devs->drm = __drm_kunit_helper_alloc_drm_device(test, dev: devs->dev,
232 size: sizeof(*devs->drm), offset: 0,
233 features: DRIVER_GEM);
234 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
235
236 return devs;
237}
238EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
239
240struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
241{
242 struct ttm_test_devices *devs;
243 struct ttm_device *ttm_dev;
244 int err;
245
246 devs = ttm_test_devices_basic(test);
247
248 ttm_dev = kunit_kzalloc(test, size: sizeof(*ttm_dev), GFP_KERNEL);
249 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
250
251 err = ttm_device_kunit_init(devs, ttm_dev, 0);
252 KUNIT_ASSERT_EQ(test, err, 0);
253
254 devs->ttm_dev = ttm_dev;
255
256 return devs;
257}
258EXPORT_SYMBOL_GPL(ttm_test_devices_all);
259
260void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
261{
262 if (devs->ttm_dev)
263 ttm_device_fini(bdev: devs->ttm_dev);
264
265 drm_kunit_helper_free_device(test, dev: devs->dev);
266}
267EXPORT_SYMBOL_GPL(ttm_test_devices_put);
268
269int ttm_test_devices_init(struct kunit *test)
270{
271 struct ttm_test_devices *priv;
272
273 priv = kunit_kzalloc(test, size: sizeof(*priv), GFP_KERNEL);
274 KUNIT_ASSERT_NOT_NULL(test, priv);
275
276 priv = ttm_test_devices_basic(test);
277 test->priv = priv;
278
279 return 0;
280}
281EXPORT_SYMBOL_GPL(ttm_test_devices_init);
282
283int ttm_test_devices_all_init(struct kunit *test)
284{
285 struct ttm_test_devices *priv;
286
287 priv = kunit_kzalloc(test, size: sizeof(*priv), GFP_KERNEL);
288 KUNIT_ASSERT_NOT_NULL(test, priv);
289
290 priv = ttm_test_devices_all(test);
291 test->priv = priv;
292
293 return 0;
294}
295EXPORT_SYMBOL_GPL(ttm_test_devices_all_init);
296
297void ttm_test_devices_fini(struct kunit *test)
298{
299 ttm_test_devices_put(test, test->priv);
300}
301EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
302
303MODULE_DESCRIPTION("TTM KUnit test helper functions");
304MODULE_LICENSE("GPL and additional rights");
305

source code of linux/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c