1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include <drm/drm_fourcc.h>
7#include <drm/drm_print.h>
8
9#include "display/intel_display.h"
10#include "gem/i915_gem_ioctls.h"
11#include "gem/i915_gem_lmem.h"
12#include "gem/i915_gem_region.h"
13#include "pxp/intel_pxp.h"
14
15#include "i915_drv.h"
16#include "i915_gem_create.h"
17#include "i915_trace.h"
18#include "i915_user_extensions.h"
19
20static u32 object_max_page_size(struct intel_memory_region **placements,
21 unsigned int n_placements)
22{
23 u32 max_page_size = 0;
24 int i;
25
26 for (i = 0; i < n_placements; i++) {
27 struct intel_memory_region *mr = placements[i];
28
29 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
30 max_page_size = max_t(u32, max_page_size, mr->min_page_size);
31 }
32
33 GEM_BUG_ON(!max_page_size);
34 return max_page_size;
35}
36
37static int object_set_placements(struct drm_i915_gem_object *obj,
38 struct intel_memory_region **placements,
39 unsigned int n_placements)
40{
41 struct intel_memory_region **arr;
42 unsigned int i;
43
44 GEM_BUG_ON(!n_placements);
45
46 /*
47 * For the common case of one memory region, skip storing an
48 * allocated array and just point at the region directly.
49 */
50 if (n_placements == 1) {
51 struct intel_memory_region *mr = placements[0];
52 struct drm_i915_private *i915 = mr->i915;
53
54 obj->mm.placements = &i915->mm.regions[mr->id];
55 obj->mm.n_placements = 1;
56 } else {
57 arr = kmalloc_array(n_placements,
58 sizeof(struct intel_memory_region *),
59 GFP_KERNEL);
60 if (!arr)
61 return -ENOMEM;
62
63 for (i = 0; i < n_placements; i++)
64 arr[i] = placements[i];
65
66 obj->mm.placements = arr;
67 obj->mm.n_placements = n_placements;
68 }
69
70 return 0;
71}
72
73static int i915_gem_publish(struct drm_i915_gem_object *obj,
74 struct drm_file *file,
75 u64 *size_p,
76 u32 *handle_p)
77{
78 u64 size = obj->base.size;
79 int ret;
80
81 ret = drm_gem_handle_create(file_priv: file, obj: &obj->base, handlep: handle_p);
82 /* drop reference from allocate - handle holds it now */
83 i915_gem_object_put(obj);
84 if (ret)
85 return ret;
86
87 *size_p = size;
88 return 0;
89}
90
91static struct drm_i915_gem_object *
92__i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
93 struct intel_memory_region **placements,
94 unsigned int n_placements,
95 unsigned int ext_flags)
96{
97 struct intel_memory_region *mr = placements[0];
98 struct drm_i915_gem_object *obj;
99 unsigned int flags;
100 int ret;
101
102 i915_gem_flush_free_objects(i915);
103
104 size = round_up(size, object_max_page_size(placements, n_placements));
105 if (size == 0)
106 return ERR_PTR(error: -EINVAL);
107
108 /* For most of the ABI (e.g. mmap) we think in system pages */
109 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
110
111 if (i915_gem_object_size_2big(size))
112 return ERR_PTR(error: -E2BIG);
113
114 obj = i915_gem_object_alloc();
115 if (!obj)
116 return ERR_PTR(error: -ENOMEM);
117
118 ret = object_set_placements(obj, placements, n_placements);
119 if (ret)
120 goto object_free;
121
122 /*
123 * I915_BO_ALLOC_USER will make sure the object is cleared before
124 * any user access.
125 */
126 flags = I915_BO_ALLOC_USER;
127
128 ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
129 if (ret)
130 goto object_free;
131
132 GEM_BUG_ON(size != obj->base.size);
133
134 /* Add any flag set by create_ext options */
135 obj->flags |= ext_flags;
136
137 trace_i915_gem_object_create(obj);
138 return obj;
139
140object_free:
141 if (obj->mm.n_placements > 1)
142 kfree(objp: obj->mm.placements);
143 i915_gem_object_free(obj);
144 return ERR_PTR(error: ret);
145}
146
147/**
148 * __i915_gem_object_create_user - Creates a new object using the same path as
149 * DRM_I915_GEM_CREATE_EXT
150 * @i915: i915 private
151 * @size: size of the buffer, in bytes
152 * @placements: possible placement regions, in priority order
153 * @n_placements: number of possible placement regions
154 *
155 * This function is exposed primarily for selftests and does very little
156 * error checking. It is assumed that the set of placement regions has
157 * already been verified to be valid.
158 */
159struct drm_i915_gem_object *
160__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
161 struct intel_memory_region **placements,
162 unsigned int n_placements)
163{
164 return __i915_gem_object_create_user_ext(i915, size, placements,
165 n_placements, ext_flags: 0);
166}
167
168int
169i915_gem_dumb_create(struct drm_file *file,
170 struct drm_device *dev,
171 struct drm_mode_create_dumb *args)
172{
173 struct drm_i915_gem_object *obj;
174 struct intel_memory_region *mr;
175 enum intel_memory_type mem_type;
176 int cpp = DIV_ROUND_UP(args->bpp, 8);
177 u32 format;
178
179 switch (cpp) {
180 case 1:
181 format = DRM_FORMAT_C8;
182 break;
183 case 2:
184 format = DRM_FORMAT_RGB565;
185 break;
186 case 4:
187 format = DRM_FORMAT_XRGB8888;
188 break;
189 default:
190 return -EINVAL;
191 }
192
193 /* have to work out size/pitch and return them */
194 args->pitch = ALIGN(args->width * cpp, 64);
195
196 /* align stride to page size so that we can remap */
197 if (args->pitch > intel_dumb_fb_max_stride(drm: dev, pixel_format: format,
198 DRM_FORMAT_MOD_LINEAR))
199 args->pitch = ALIGN(args->pitch, 4096);
200
201 if (args->pitch < args->width)
202 return -EINVAL;
203
204 args->size = mul_u32_u32(a: args->pitch, b: args->height);
205
206 mem_type = INTEL_MEMORY_SYSTEM;
207 if (HAS_LMEM(to_i915(dev)))
208 mem_type = INTEL_MEMORY_LOCAL;
209
210 mr = intel_memory_region_by_type(i915: to_i915(dev), mem_type);
211
212 obj = __i915_gem_object_create_user(i915: to_i915(dev), size: args->size, placements: &mr, n_placements: 1);
213 if (IS_ERR(ptr: obj))
214 return PTR_ERR(ptr: obj);
215
216 return i915_gem_publish(obj, file, size_p: &args->size, handle_p: &args->handle);
217}
218
219/**
220 * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
221 * @dev: drm device pointer
222 * @data: ioctl data blob
223 * @file: drm file pointer
224 */
225int
226i915_gem_create_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *file)
228{
229 struct drm_i915_private *i915 = to_i915(dev);
230 struct drm_i915_gem_create *args = data;
231 struct drm_i915_gem_object *obj;
232 struct intel_memory_region *mr;
233
234 mr = intel_memory_region_by_type(i915, mem_type: INTEL_MEMORY_SYSTEM);
235
236 obj = __i915_gem_object_create_user(i915, size: args->size, placements: &mr, n_placements: 1);
237 if (IS_ERR(ptr: obj))
238 return PTR_ERR(ptr: obj);
239
240 return i915_gem_publish(obj, file, size_p: &args->size, handle_p: &args->handle);
241}
242
243struct create_ext {
244 struct drm_i915_private *i915;
245 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
246 unsigned int n_placements;
247 unsigned int placement_mask;
248 unsigned long flags;
249 unsigned int pat_index;
250};
251
252static void repr_placements(char *buf, size_t size,
253 struct intel_memory_region **placements,
254 int n_placements)
255{
256 int i;
257
258 buf[0] = '\0';
259
260 for (i = 0; i < n_placements; i++) {
261 struct intel_memory_region *mr = placements[i];
262 int r;
263
264 r = snprintf(buf, size, fmt: "\n %s -> { class: %d, inst: %d }",
265 mr->name, mr->type, mr->instance);
266 if (r >= size)
267 return;
268
269 buf += r;
270 size -= r;
271 }
272}
273
274static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
275 struct create_ext *ext_data)
276{
277 struct drm_i915_private *i915 = ext_data->i915;
278 struct drm_i915_gem_memory_class_instance __user *uregions =
279 u64_to_user_ptr(args->regions);
280 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
281 u32 mask;
282 int i, ret = 0;
283
284 if (args->pad) {
285 drm_dbg(&i915->drm, "pad should be zero\n");
286 ret = -EINVAL;
287 }
288
289 if (!args->num_regions) {
290 drm_dbg(&i915->drm, "num_regions is zero\n");
291 ret = -EINVAL;
292 }
293
294 BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
295 BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
296 if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
297 drm_dbg(&i915->drm, "num_regions is too large\n");
298 ret = -EINVAL;
299 }
300
301 if (ret)
302 return ret;
303
304 mask = 0;
305 for (i = 0; i < args->num_regions; i++) {
306 struct drm_i915_gem_memory_class_instance region;
307 struct intel_memory_region *mr;
308
309 if (copy_from_user(to: &region, from: uregions, n: sizeof(region)))
310 return -EFAULT;
311
312 mr = intel_memory_region_lookup(i915,
313 class: region.memory_class,
314 instance: region.memory_instance);
315 if (!mr || mr->private) {
316 drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
317 region.memory_class, region.memory_instance, i);
318 ret = -EINVAL;
319 goto out_dump;
320 }
321
322 if (mask & BIT(mr->id)) {
323 drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
324 mr->name, region.memory_class,
325 region.memory_instance, i);
326 ret = -EINVAL;
327 goto out_dump;
328 }
329
330 placements[i] = mr;
331 mask |= BIT(mr->id);
332
333 ++uregions;
334 }
335
336 if (ext_data->n_placements) {
337 ret = -EINVAL;
338 goto out_dump;
339 }
340
341 ext_data->n_placements = args->num_regions;
342 for (i = 0; i < args->num_regions; i++)
343 ext_data->placements[i] = placements[i];
344
345 ext_data->placement_mask = mask;
346 return 0;
347
348out_dump:
349 if (1) {
350 char buf[256];
351
352 if (ext_data->n_placements) {
353 repr_placements(buf,
354 size: sizeof(buf),
355 placements: ext_data->placements,
356 n_placements: ext_data->n_placements);
357 drm_dbg(&i915->drm,
358 "Placements were already set in previous EXT. Existing placements: %s\n",
359 buf);
360 }
361
362 repr_placements(buf, size: sizeof(buf), placements, n_placements: i);
363 drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
364 }
365
366 return ret;
367}
368
369static int ext_set_placements(struct i915_user_extension __user *base,
370 void *data)
371{
372 struct drm_i915_gem_create_ext_memory_regions ext;
373
374 if (copy_from_user(to: &ext, from: base, n: sizeof(ext)))
375 return -EFAULT;
376
377 return set_placements(args: &ext, ext_data: data);
378}
379
380static int ext_set_protected(struct i915_user_extension __user *base, void *data)
381{
382 struct drm_i915_gem_create_ext_protected_content ext;
383 struct create_ext *ext_data = data;
384
385 if (copy_from_user(to: &ext, from: base, n: sizeof(ext)))
386 return -EFAULT;
387
388 if (ext.flags)
389 return -EINVAL;
390
391 if (!intel_pxp_is_enabled(pxp: ext_data->i915->pxp))
392 return -ENODEV;
393
394 ext_data->flags |= I915_BO_PROTECTED;
395
396 return 0;
397}
398
399static int ext_set_pat(struct i915_user_extension __user *base, void *data)
400{
401 struct create_ext *ext_data = data;
402 struct drm_i915_private *i915 = ext_data->i915;
403 struct drm_i915_gem_create_ext_set_pat ext;
404 unsigned int max_pat_index;
405
406 BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
407 offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
408
409 /* Limiting the extension only to Xe_LPG and beyond */
410 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))
411 return -ENODEV;
412
413 if (copy_from_user(to: &ext, from: base, n: sizeof(ext)))
414 return -EFAULT;
415
416 max_pat_index = INTEL_INFO(i915)->max_pat_index;
417
418 if (ext.pat_index > max_pat_index) {
419 drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
420 ext.pat_index);
421 return -EINVAL;
422 }
423
424 ext_data->pat_index = ext.pat_index;
425
426 return 0;
427}
428
429static const i915_user_extension_fn create_extensions[] = {
430 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
431 [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
432 [I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
433};
434
435#define PAT_INDEX_NOT_SET 0xffff
436/**
437 * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
438 * @dev: drm device pointer
439 * @data: ioctl data blob
440 * @file: drm file pointer
441 */
442int
443i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file)
445{
446 struct drm_i915_private *i915 = to_i915(dev);
447 struct drm_i915_gem_create_ext *args = data;
448 struct create_ext ext_data = { .i915 = i915 };
449 struct drm_i915_gem_object *obj;
450 int ret;
451
452 if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
453 return -EINVAL;
454
455 ext_data.pat_index = PAT_INDEX_NOT_SET;
456 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
457 tbl: create_extensions,
458 ARRAY_SIZE(create_extensions),
459 data: &ext_data);
460 if (ret)
461 return ret;
462
463 if (!ext_data.n_placements) {
464 ext_data.placements[0] =
465 intel_memory_region_by_type(i915, mem_type: INTEL_MEMORY_SYSTEM);
466 ext_data.n_placements = 1;
467 }
468
469 if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
470 if (ext_data.n_placements == 1)
471 return -EINVAL;
472
473 /*
474 * We always need to be able to spill to system memory, if we
475 * can't place in the mappable part of LMEM.
476 */
477 if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
478 return -EINVAL;
479 } else {
480 if (ext_data.n_placements > 1 ||
481 ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
482 ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
483 }
484
485 obj = __i915_gem_object_create_user_ext(i915, size: args->size,
486 placements: ext_data.placements,
487 n_placements: ext_data.n_placements,
488 ext_flags: ext_data.flags);
489 if (IS_ERR(ptr: obj))
490 return PTR_ERR(ptr: obj);
491
492 if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
493 i915_gem_object_set_pat_index(obj, pat_index: ext_data.pat_index);
494 /* Mark pat_index is set by UMD */
495 obj->pat_set_by_user = true;
496 }
497
498 return i915_gem_publish(obj, file, size_p: &args->size, handle_p: &args->handle);
499}
500

source code of linux/drivers/gpu/drm/i915/gem/i915_gem_create.c