1// SPDX-License-Identifier: GPL-2.0 or MIT
2/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4/* Copyright 2019 Collabora ltd. */
5
6#ifdef CONFIG_ARM_ARCH_TIMER
7#include <asm/arch_timer.h>
8#endif
9
10#include <linux/list.h>
11#include <linux/module.h>
12#include <linux/of_platform.h>
13#include <linux/pagemap.h>
14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h>
16#include <linux/time64.h>
17
18#include <drm/drm_auth.h>
19#include <drm/drm_debugfs.h>
20#include <drm/drm_drv.h>
21#include <drm/drm_exec.h>
22#include <drm/drm_ioctl.h>
23#include <drm/drm_print.h>
24#include <drm/drm_syncobj.h>
25#include <drm/drm_utils.h>
26#include <drm/gpu_scheduler.h>
27#include <drm/panthor_drm.h>
28
29#include "panthor_devfreq.h"
30#include "panthor_device.h"
31#include "panthor_fw.h"
32#include "panthor_gem.h"
33#include "panthor_gpu.h"
34#include "panthor_heap.h"
35#include "panthor_mmu.h"
36#include "panthor_regs.h"
37#include "panthor_sched.h"
38
39/**
40 * DOC: user <-> kernel object copy helpers.
41 */
42
43/**
44 * panthor_set_uobj() - Copy kernel object to user object.
45 * @usr_ptr: Users pointer.
46 * @usr_size: Size of the user object.
47 * @min_size: Minimum size for this object.
48 * @kern_size: Size of the kernel object.
49 * @in: Address of the kernel object to copy.
50 *
51 * Helper automating kernel -> user object copies.
52 *
53 * Don't use this function directly, use PANTHOR_UOBJ_SET() instead.
54 *
55 * Return: 0 on success, a negative error code otherwise.
56 */
57static int
58panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, const void *in)
59{
60 /* User size shouldn't be smaller than the minimal object size. */
61 if (usr_size < min_size)
62 return -EINVAL;
63
64 if (copy_to_user(u64_to_user_ptr(usr_ptr), from: in, min_t(u32, usr_size, kern_size)))
65 return -EFAULT;
66
67 /* When the kernel object is smaller than the user object, we fill the gap with
68 * zeros.
69 */
70 if (usr_size > kern_size &&
71 clear_user(u64_to_user_ptr(usr_ptr + kern_size), n: usr_size - kern_size)) {
72 return -EFAULT;
73 }
74
75 return 0;
76}
77
78/**
79 * panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array.
80 * @in: The object array to copy.
81 * @min_stride: Minimum array stride.
82 * @obj_size: Kernel object size.
83 *
84 * Helper automating user -> kernel object copies.
85 *
86 * Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead.
87 *
88 * Return: newly allocated object array or an ERR_PTR on error.
89 */
90static void *
91panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
92 u32 obj_size)
93{
94 int ret = 0;
95 void *out_alloc;
96
97 if (!in->count)
98 return NULL;
99
100 /* User stride must be at least the minimum object size, otherwise it might
101 * lack useful information.
102 */
103 if (in->stride < min_stride)
104 return ERR_PTR(error: -EINVAL);
105
106 out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
107 if (!out_alloc)
108 return ERR_PTR(error: -ENOMEM);
109
110 if (obj_size == in->stride) {
111 /* Fast path when user/kernel have the same uAPI header version. */
112 if (copy_from_user(to: out_alloc, u64_to_user_ptr(in->array),
113 n: (unsigned long)obj_size * in->count))
114 ret = -EFAULT;
115 } else {
116 void __user *in_ptr = u64_to_user_ptr(in->array);
117 void *out_ptr = out_alloc;
118
119 /* If the sizes differ, we need to copy elements one by one. */
120 for (u32 i = 0; i < in->count; i++) {
121 ret = copy_struct_from_user(dst: out_ptr, ksize: obj_size, src: in_ptr, usize: in->stride);
122 if (ret)
123 break;
124
125 out_ptr += obj_size;
126 in_ptr += in->stride;
127 }
128 }
129
130 if (ret) {
131 kvfree(addr: out_alloc);
132 return ERR_PTR(error: ret);
133 }
134
135 return out_alloc;
136}
137
138/**
139 * PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size
140 * @_typename: Object type.
141 * @_last_mandatory_field: Last mandatory field.
142 *
143 * Get the minimum user object size based on the last mandatory field name,
144 * A.K.A, the name of the last field of the structure at the time this
145 * structure was added to the uAPI.
146 *
147 * Don't use directly, use PANTHOR_UOBJ_DECL() instead.
148 */
149#define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
150 (offsetof(_typename, _last_mandatory_field) + \
151 sizeof(((_typename *)NULL)->_last_mandatory_field))
152
153/**
154 * PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to
155 * evolutions.
156 * @_typename: Object type.
157 * @_last_mandatory_field: Last mandatory field.
158 *
159 * Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
160 */
161#define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
162 _typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
163
164/**
165 * PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object
166 * @_obj_name: Object to get the minimum size of.
167 *
168 * Don't use this macro directly, it's automatically called by
169 * PANTHOR_UOBJ_{SET,GET_ARRAY}().
170 */
171#define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
172 _Generic(_obj_name, \
173 PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
174 PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
175 PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \
176 PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \
177 PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
178 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
179 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
180 PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
181
182/**
183 * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object.
184 * @_dest_usr_ptr: User pointer to copy to.
185 * @_usr_size: Size of the user object.
186 * @_src_obj: Kernel object to copy (not a pointer).
187 *
188 * Return: 0 on success, a negative error code otherwise.
189 */
190#define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
191 panthor_set_uobj(_dest_usr_ptr, _usr_size, \
192 PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
193 sizeof(_src_obj), &(_src_obj))
194
195/**
196 * PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible
197 * object array.
198 * @_dest_array: Local variable that will hold the newly allocated kernel
199 * object array.
200 * @_uobj_array: The drm_panthor_obj_array object describing the user object
201 * array.
202 *
203 * Return: 0 on success, a negative error code otherwise.
204 */
205#define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
206 ({ \
207 typeof(_dest_array) _tmp; \
208 _tmp = panthor_get_uobj_array(_uobj_array, \
209 PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
210 sizeof((_dest_array)[0])); \
211 if (!IS_ERR(_tmp)) \
212 _dest_array = _tmp; \
213 PTR_ERR_OR_ZERO(_tmp); \
214 })
215
216/**
217 * struct panthor_sync_signal - Represent a synchronization object point to attach
218 * our job fence to.
219 *
220 * This structure is here to keep track of fences that are currently bound to
221 * a specific syncobj point.
222 *
223 * At the beginning of a job submission, the fence
224 * is retrieved from the syncobj itself, and can be NULL if no fence was attached
225 * to this point.
226 *
227 * At the end, it points to the fence of the last job that had a
228 * %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj.
229 *
230 * With jobs being submitted in batches, the fence might change several times during
231 * the process, allowing one job to wait on a job that's part of the same submission
232 * but appears earlier in the drm_panthor_group_submit::queue_submits array.
233 */
234struct panthor_sync_signal {
235 /** @node: list_head to track signal ops within a submit operation */
236 struct list_head node;
237
238 /** @handle: The syncobj handle. */
239 u32 handle;
240
241 /**
242 * @point: The syncobj point.
243 *
244 * Zero for regular syncobjs, and non-zero for timeline syncobjs.
245 */
246 u64 point;
247
248 /**
249 * @syncobj: The sync object pointed by @handle.
250 */
251 struct drm_syncobj *syncobj;
252
253 /**
254 * @chain: Chain object used to link the new fence to an existing
255 * timeline syncobj.
256 *
257 * NULL for regular syncobj, non-NULL for timeline syncobjs.
258 */
259 struct dma_fence_chain *chain;
260
261 /**
262 * @fence: The fence to assign to the syncobj or syncobj-point.
263 */
264 struct dma_fence *fence;
265};
266
267/**
268 * struct panthor_job_ctx - Job context
269 */
270struct panthor_job_ctx {
271 /** @job: The job that is about to be submitted to drm_sched. */
272 struct drm_sched_job *job;
273
274 /** @syncops: Array of sync operations. */
275 struct drm_panthor_sync_op *syncops;
276
277 /** @syncop_count: Number of sync operations. */
278 u32 syncop_count;
279};
280
281/**
282 * struct panthor_submit_ctx - Submission context
283 *
284 * Anything that's related to a submission (%DRM_IOCTL_PANTHOR_VM_BIND or
285 * %DRM_IOCTL_PANTHOR_GROUP_SUBMIT) is kept here, so we can automate the
286 * initialization and cleanup steps.
287 */
288struct panthor_submit_ctx {
289 /** @file: DRM file this submission happens on. */
290 struct drm_file *file;
291
292 /**
293 * @signals: List of struct panthor_sync_signal.
294 *
295 * %DRM_PANTHOR_SYNC_OP_SIGNAL operations will be recorded here,
296 * and %DRM_PANTHOR_SYNC_OP_WAIT will first check if an entry
297 * matching the syncobj+point exists before calling
298 * drm_syncobj_find_fence(). This allows us to describe dependencies
299 * existing between jobs that are part of the same batch.
300 */
301 struct list_head signals;
302
303 /** @jobs: Array of jobs. */
304 struct panthor_job_ctx *jobs;
305
306 /** @job_count: Number of entries in the @jobs array. */
307 u32 job_count;
308
309 /** @exec: drm_exec context used to acquire and prepare resv objects. */
310 struct drm_exec exec;
311};
312
313#define PANTHOR_SYNC_OP_FLAGS_MASK \
314 (DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK | DRM_PANTHOR_SYNC_OP_SIGNAL)
315
316static bool sync_op_is_signal(const struct drm_panthor_sync_op *sync_op)
317{
318 return !!(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
319}
320
321static bool sync_op_is_wait(const struct drm_panthor_sync_op *sync_op)
322{
323 /* Note that DRM_PANTHOR_SYNC_OP_WAIT == 0 */
324 return !(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
325}
326
327/**
328 * panthor_check_sync_op() - Check drm_panthor_sync_op fields
329 * @sync_op: The sync operation to check.
330 *
331 * Return: 0 on success, -EINVAL otherwise.
332 */
333static int
334panthor_check_sync_op(const struct drm_panthor_sync_op *sync_op)
335{
336 u8 handle_type;
337
338 if (sync_op->flags & ~PANTHOR_SYNC_OP_FLAGS_MASK)
339 return -EINVAL;
340
341 handle_type = sync_op->flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK;
342 if (handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
343 handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ)
344 return -EINVAL;
345
346 if (handle_type == DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
347 sync_op->timeline_value != 0)
348 return -EINVAL;
349
350 return 0;
351}
352
353/**
354 * panthor_sync_signal_free() - Release resources and free a panthor_sync_signal object
355 * @sig_sync: Signal object to free.
356 */
357static void
358panthor_sync_signal_free(struct panthor_sync_signal *sig_sync)
359{
360 if (!sig_sync)
361 return;
362
363 drm_syncobj_put(obj: sig_sync->syncobj);
364 dma_fence_chain_free(chain: sig_sync->chain);
365 dma_fence_put(fence: sig_sync->fence);
366 kfree(objp: sig_sync);
367}
368
369/**
370 * panthor_submit_ctx_add_sync_signal() - Add a signal operation to a submit context
371 * @ctx: Context to add the signal operation to.
372 * @handle: Syncobj handle.
373 * @point: Syncobj point.
374 *
375 * Return: 0 on success, otherwise negative error value.
376 */
377static int
378panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
379{
380 struct panthor_sync_signal *sig_sync;
381 struct dma_fence *cur_fence;
382 int ret;
383
384 sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
385 if (!sig_sync)
386 return -ENOMEM;
387
388 sig_sync->handle = handle;
389 sig_sync->point = point;
390
391 if (point > 0) {
392 sig_sync->chain = dma_fence_chain_alloc();
393 if (!sig_sync->chain) {
394 ret = -ENOMEM;
395 goto err_free_sig_sync;
396 }
397 }
398
399 sig_sync->syncobj = drm_syncobj_find(file_private: ctx->file, handle);
400 if (!sig_sync->syncobj) {
401 ret = -EINVAL;
402 goto err_free_sig_sync;
403 }
404
405 /* Retrieve the current fence attached to that point. It's
406 * perfectly fine to get a NULL fence here, it just means there's
407 * no fence attached to that point yet.
408 */
409 if (!drm_syncobj_find_fence(file_private: ctx->file, handle, point, flags: 0, fence: &cur_fence))
410 sig_sync->fence = cur_fence;
411
412 list_add_tail(new: &sig_sync->node, head: &ctx->signals);
413
414 return 0;
415
416err_free_sig_sync:
417 panthor_sync_signal_free(sig_sync);
418 return ret;
419}
420
421/**
422 * panthor_submit_ctx_search_sync_signal() - Search an existing signal operation in a
423 * submit context.
424 * @ctx: Context to search the signal operation in.
425 * @handle: Syncobj handle.
426 * @point: Syncobj point.
427 *
428 * Return: A valid panthor_sync_signal object if found, NULL otherwise.
429 */
430static struct panthor_sync_signal *
431panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
432{
433 struct panthor_sync_signal *sig_sync;
434
435 list_for_each_entry(sig_sync, &ctx->signals, node) {
436 if (handle == sig_sync->handle && point == sig_sync->point)
437 return sig_sync;
438 }
439
440 return NULL;
441}
442
443/**
444 * panthor_submit_ctx_add_job() - Add a job to a submit context
445 * @ctx: Context to search the signal operation in.
446 * @idx: Index of the job in the context.
447 * @job: Job to add.
448 * @syncs: Sync operations provided by userspace.
449 *
450 * Return: 0 on success, a negative error code otherwise.
451 */
452static int
453panthor_submit_ctx_add_job(struct panthor_submit_ctx *ctx, u32 idx,
454 struct drm_sched_job *job,
455 const struct drm_panthor_obj_array *syncs)
456{
457 int ret;
458
459 ctx->jobs[idx].job = job;
460
461 ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs);
462 if (ret)
463 return ret;
464
465 ctx->jobs[idx].syncop_count = syncs->count;
466 return 0;
467}
468
469/**
470 * panthor_submit_ctx_get_sync_signal() - Search signal operation and add one if none was found.
471 * @ctx: Context to search the signal operation in.
472 * @handle: Syncobj handle.
473 * @point: Syncobj point.
474 *
475 * Return: 0 on success, a negative error code otherwise.
476 */
477static int
478panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
479{
480 struct panthor_sync_signal *sig_sync;
481
482 sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle, point);
483 if (sig_sync)
484 return 0;
485
486 return panthor_submit_ctx_add_sync_signal(ctx, handle, point);
487}
488
489/**
490 * panthor_submit_ctx_update_job_sync_signal_fences() - Update fences
491 * on the signal operations specified by a job.
492 * @ctx: Context to search the signal operation in.
493 * @job_idx: Index of the job to operate on.
494 *
495 * Return: 0 on success, a negative error code otherwise.
496 */
497static int
498panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx *ctx,
499 u32 job_idx)
500{
501 struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
502 struct panthor_device,
503 base);
504 struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished;
505 const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
506 u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
507
508 for (u32 i = 0; i < sync_op_count; i++) {
509 struct dma_fence *old_fence;
510 struct panthor_sync_signal *sig_sync;
511
512 if (!sync_op_is_signal(sync_op: &sync_ops[i]))
513 continue;
514
515 sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle: sync_ops[i].handle,
516 point: sync_ops[i].timeline_value);
517 if (drm_WARN_ON(&ptdev->base, !sig_sync))
518 return -EINVAL;
519
520 old_fence = sig_sync->fence;
521 sig_sync->fence = dma_fence_get(fence: done_fence);
522 dma_fence_put(fence: old_fence);
523
524 if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
525 return -EINVAL;
526 }
527
528 return 0;
529}
530
531/**
532 * panthor_submit_ctx_collect_job_signal_ops() - Iterate over all job signal operations
533 * and add them to the context.
534 * @ctx: Context to search the signal operation in.
535 * @job_idx: Index of the job to operate on.
536 *
537 * Return: 0 on success, a negative error code otherwise.
538 */
539static int
540panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx *ctx,
541 u32 job_idx)
542{
543 const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
544 u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
545
546 for (u32 i = 0; i < sync_op_count; i++) {
547 int ret;
548
549 if (!sync_op_is_signal(sync_op: &sync_ops[i]))
550 continue;
551
552 ret = panthor_check_sync_op(sync_op: &sync_ops[i]);
553 if (ret)
554 return ret;
555
556 ret = panthor_submit_ctx_get_sync_signal(ctx,
557 handle: sync_ops[i].handle,
558 point: sync_ops[i].timeline_value);
559 if (ret)
560 return ret;
561 }
562
563 return 0;
564}
565
566/**
567 * panthor_submit_ctx_push_fences() - Iterate over the signal array, and for each entry, push
568 * the currently assigned fence to the associated syncobj.
569 * @ctx: Context to push fences on.
570 *
571 * This is the last step of a submission procedure, and is done once we know the submission
572 * is effective and job fences are guaranteed to be signaled in finite time.
573 */
574static void
575panthor_submit_ctx_push_fences(struct panthor_submit_ctx *ctx)
576{
577 struct panthor_sync_signal *sig_sync;
578
579 list_for_each_entry(sig_sync, &ctx->signals, node) {
580 if (sig_sync->chain) {
581 drm_syncobj_add_point(syncobj: sig_sync->syncobj, chain: sig_sync->chain,
582 fence: sig_sync->fence, point: sig_sync->point);
583 sig_sync->chain = NULL;
584 } else {
585 drm_syncobj_replace_fence(syncobj: sig_sync->syncobj, fence: sig_sync->fence);
586 }
587 }
588}
589
590/**
591 * panthor_submit_ctx_add_sync_deps_to_job() - Add sync wait operations as
592 * job dependencies.
593 * @ctx: Submit context.
594 * @job_idx: Index of the job to operate on.
595 *
596 * Return: 0 on success, a negative error code otherwise.
597 */
598static int
599panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx *ctx,
600 u32 job_idx)
601{
602 struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
603 struct panthor_device,
604 base);
605 const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
606 struct drm_sched_job *job = ctx->jobs[job_idx].job;
607 u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
608 int ret = 0;
609
610 for (u32 i = 0; i < sync_op_count; i++) {
611 struct panthor_sync_signal *sig_sync;
612 struct dma_fence *fence;
613
614 if (!sync_op_is_wait(sync_op: &sync_ops[i]))
615 continue;
616
617 ret = panthor_check_sync_op(sync_op: &sync_ops[i]);
618 if (ret)
619 return ret;
620
621 sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle: sync_ops[i].handle,
622 point: sync_ops[i].timeline_value);
623 if (sig_sync) {
624 if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
625 return -EINVAL;
626
627 fence = dma_fence_get(fence: sig_sync->fence);
628 } else {
629 ret = drm_syncobj_find_fence(file_private: ctx->file, handle: sync_ops[i].handle,
630 point: sync_ops[i].timeline_value,
631 flags: 0, fence: &fence);
632 if (ret)
633 return ret;
634 }
635
636 ret = drm_sched_job_add_dependency(job, fence);
637 if (ret)
638 return ret;
639 }
640
641 return 0;
642}
643
644/**
645 * panthor_submit_ctx_collect_jobs_signal_ops() - Collect all signal operations
646 * and add them to the submit context.
647 * @ctx: Submit context.
648 *
649 * Return: 0 on success, a negative error code otherwise.
650 */
651static int
652panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx *ctx)
653{
654 for (u32 i = 0; i < ctx->job_count; i++) {
655 int ret;
656
657 ret = panthor_submit_ctx_collect_job_signal_ops(ctx, job_idx: i);
658 if (ret)
659 return ret;
660 }
661
662 return 0;
663}
664
665/**
666 * panthor_submit_ctx_add_deps_and_arm_jobs() - Add jobs dependencies and arm jobs
667 * @ctx: Submit context.
668 *
669 * Must be called after the resv preparation has been taken care of.
670 *
671 * Return: 0 on success, a negative error code otherwise.
672 */
673static int
674panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx *ctx)
675{
676 for (u32 i = 0; i < ctx->job_count; i++) {
677 int ret;
678
679 ret = panthor_submit_ctx_add_sync_deps_to_job(ctx, job_idx: i);
680 if (ret)
681 return ret;
682
683 drm_sched_job_arm(job: ctx->jobs[i].job);
684
685 ret = panthor_submit_ctx_update_job_sync_signal_fences(ctx, job_idx: i);
686 if (ret)
687 return ret;
688 }
689
690 return 0;
691}
692
693/**
694 * panthor_submit_ctx_push_jobs() - Push jobs to their scheduling entities.
695 * @ctx: Submit context.
696 * @upd_resvs: Callback used to update reservation objects that were previously
697 * preapred.
698 */
699static void
700panthor_submit_ctx_push_jobs(struct panthor_submit_ctx *ctx,
701 void (*upd_resvs)(struct drm_exec *, struct drm_sched_job *))
702{
703 for (u32 i = 0; i < ctx->job_count; i++) {
704 upd_resvs(&ctx->exec, ctx->jobs[i].job);
705 drm_sched_entity_push_job(sched_job: ctx->jobs[i].job);
706
707 /* Job is owned by the scheduler now. */
708 ctx->jobs[i].job = NULL;
709 }
710
711 panthor_submit_ctx_push_fences(ctx);
712}
713
714/**
715 * panthor_submit_ctx_init() - Initializes a submission context
716 * @ctx: Submit context to initialize.
717 * @file: drm_file this submission happens on.
718 * @job_count: Number of jobs that will be submitted.
719 *
720 * Return: 0 on success, a negative error code otherwise.
721 */
722static int panthor_submit_ctx_init(struct panthor_submit_ctx *ctx,
723 struct drm_file *file, u32 job_count)
724{
725 ctx->jobs = kvmalloc_array(job_count, sizeof(*ctx->jobs),
726 GFP_KERNEL | __GFP_ZERO);
727 if (!ctx->jobs)
728 return -ENOMEM;
729
730 ctx->file = file;
731 ctx->job_count = job_count;
732 INIT_LIST_HEAD(list: &ctx->signals);
733 drm_exec_init(exec: &ctx->exec,
734 DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES,
735 nr: 0);
736 return 0;
737}
738
739/**
740 * panthor_submit_ctx_cleanup() - Cleanup a submission context
741 * @ctx: Submit context to cleanup.
742 * @job_put: Job put callback.
743 */
744static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx,
745 void (*job_put)(struct drm_sched_job *))
746{
747 struct panthor_sync_signal *sig_sync, *tmp;
748 unsigned long i;
749
750 drm_exec_fini(exec: &ctx->exec);
751
752 list_for_each_entry_safe(sig_sync, tmp, &ctx->signals, node)
753 panthor_sync_signal_free(sig_sync);
754
755 for (i = 0; i < ctx->job_count; i++) {
756 job_put(ctx->jobs[i].job);
757 kvfree(addr: ctx->jobs[i].syncops);
758 }
759
760 kvfree(addr: ctx->jobs);
761}
762
763static int panthor_query_timestamp_info(struct panthor_device *ptdev,
764 struct drm_panthor_timestamp_info *arg)
765{
766 int ret;
767
768 ret = panthor_device_resume_and_get(ptdev);
769 if (ret)
770 return ret;
771
772#ifdef CONFIG_ARM_ARCH_TIMER
773 arg->timestamp_frequency = arch_timer_get_cntfrq();
774#else
775 arg->timestamp_frequency = 0;
776#endif
777 arg->current_timestamp = gpu_read64_counter(ptdev, GPU_TIMESTAMP);
778 arg->timestamp_offset = gpu_read64(ptdev, GPU_TIMESTAMP_OFFSET);
779
780 pm_runtime_put(dev: ptdev->base.dev);
781 return 0;
782}
783
784static int group_priority_permit(struct drm_file *file,
785 u8 priority)
786{
787 /* Ensure that priority is valid */
788 if (priority > PANTHOR_GROUP_PRIORITY_REALTIME)
789 return -EINVAL;
790
791 /* Medium priority and below are always allowed */
792 if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
793 return 0;
794
795 /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
796 if (capable(CAP_SYS_NICE) || drm_is_current_master(fpriv: file))
797 return 0;
798
799 return -EACCES;
800}
801
802static void panthor_query_group_priorities_info(struct drm_file *file,
803 struct drm_panthor_group_priorities_info *arg)
804{
805 int prio;
806
807 memset(arg, 0, sizeof(*arg));
808 for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
809 if (!group_priority_permit(file, priority: prio))
810 arg->allowed_mask |= BIT(prio);
811 }
812}
813
814static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file)
815{
816 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
817 struct drm_panthor_dev_query *args = data;
818 struct drm_panthor_timestamp_info timestamp_info;
819 struct drm_panthor_group_priorities_info priorities_info;
820 int ret;
821
822 if (!args->pointer) {
823 switch (args->type) {
824 case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
825 args->size = sizeof(ptdev->gpu_info);
826 return 0;
827
828 case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
829 args->size = sizeof(ptdev->csif_info);
830 return 0;
831
832 case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
833 args->size = sizeof(timestamp_info);
834 return 0;
835
836 case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
837 args->size = sizeof(priorities_info);
838 return 0;
839
840 default:
841 return -EINVAL;
842 }
843 }
844
845 switch (args->type) {
846 case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
847 return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->gpu_info);
848
849 case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
850 return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info);
851
852 case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
853 ret = panthor_query_timestamp_info(ptdev, arg: &timestamp_info);
854
855 if (ret)
856 return ret;
857
858 return PANTHOR_UOBJ_SET(args->pointer, args->size, timestamp_info);
859
860 case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
861 panthor_query_group_priorities_info(file, arg: &priorities_info);
862 return PANTHOR_UOBJ_SET(args->pointer, args->size, priorities_info);
863
864 default:
865 return -EINVAL;
866 }
867}
868
869#define PANTHOR_VM_CREATE_FLAGS 0
870
871static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data,
872 struct drm_file *file)
873{
874 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
875 struct panthor_file *pfile = file->driver_priv;
876 struct drm_panthor_vm_create *args = data;
877 int cookie, ret;
878
879 if (!drm_dev_enter(dev: ddev, idx: &cookie))
880 return -ENODEV;
881
882 ret = panthor_vm_pool_create_vm(ptdev, pool: pfile->vms, args);
883 if (ret >= 0) {
884 args->id = ret;
885 ret = 0;
886 }
887
888 drm_dev_exit(idx: cookie);
889 return ret;
890}
891
892static int panthor_ioctl_vm_destroy(struct drm_device *ddev, void *data,
893 struct drm_file *file)
894{
895 struct panthor_file *pfile = file->driver_priv;
896 struct drm_panthor_vm_destroy *args = data;
897
898 if (args->pad)
899 return -EINVAL;
900
901 return panthor_vm_pool_destroy_vm(pool: pfile->vms, handle: args->id);
902}
903
904#define PANTHOR_BO_FLAGS DRM_PANTHOR_BO_NO_MMAP
905
906static int panthor_ioctl_bo_create(struct drm_device *ddev, void *data,
907 struct drm_file *file)
908{
909 struct panthor_file *pfile = file->driver_priv;
910 struct drm_panthor_bo_create *args = data;
911 struct panthor_vm *vm = NULL;
912 int cookie, ret;
913
914 if (!drm_dev_enter(dev: ddev, idx: &cookie))
915 return -ENODEV;
916
917 if (!args->size || args->pad ||
918 (args->flags & ~PANTHOR_BO_FLAGS)) {
919 ret = -EINVAL;
920 goto out_dev_exit;
921 }
922
923 if (args->exclusive_vm_id) {
924 vm = panthor_vm_pool_get_vm(pool: pfile->vms, handle: args->exclusive_vm_id);
925 if (!vm) {
926 ret = -EINVAL;
927 goto out_dev_exit;
928 }
929 }
930
931 ret = panthor_gem_create_with_handle(file, ddev, exclusive_vm: vm, size: &args->size,
932 flags: args->flags, handle: &args->handle);
933
934 panthor_vm_put(vm);
935
936out_dev_exit:
937 drm_dev_exit(idx: cookie);
938 return ret;
939}
940
941static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
942 struct drm_file *file)
943{
944 struct drm_panthor_bo_mmap_offset *args = data;
945 struct panthor_gem_object *bo;
946 struct drm_gem_object *obj;
947 int ret;
948
949 if (args->pad)
950 return -EINVAL;
951
952 obj = drm_gem_object_lookup(filp: file, handle: args->handle);
953 if (!obj)
954 return -ENOENT;
955
956 bo = to_panthor_bo(obj);
957 if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) {
958 ret = -EPERM;
959 goto out;
960 }
961
962 ret = drm_gem_create_mmap_offset(obj);
963 if (ret)
964 goto out;
965
966 args->offset = drm_vma_node_offset_addr(node: &obj->vma_node);
967
968out:
969 drm_gem_object_put(obj);
970 return ret;
971}
972
973static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
974 struct drm_file *file)
975{
976 struct panthor_file *pfile = file->driver_priv;
977 struct drm_panthor_group_submit *args = data;
978 struct drm_panthor_queue_submit *jobs_args;
979 struct panthor_submit_ctx ctx;
980 int ret = 0, cookie;
981
982 if (args->pad)
983 return -EINVAL;
984
985 if (!drm_dev_enter(dev: ddev, idx: &cookie))
986 return -ENODEV;
987
988 ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits);
989 if (ret)
990 goto out_dev_exit;
991
992 ret = panthor_submit_ctx_init(ctx: &ctx, file, job_count: args->queue_submits.count);
993 if (ret)
994 goto out_free_jobs_args;
995
996 /* Create jobs and attach sync operations */
997 for (u32 i = 0; i < args->queue_submits.count; i++) {
998 const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
999 struct drm_sched_job *job;
1000
1001 job = panthor_job_create(pfile, group_handle: args->group_handle, qsubmit,
1002 drm_client_id: file->client_id);
1003 if (IS_ERR(ptr: job)) {
1004 ret = PTR_ERR(ptr: job);
1005 goto out_cleanup_submit_ctx;
1006 }
1007
1008 ret = panthor_submit_ctx_add_job(ctx: &ctx, idx: i, job, syncs: &qsubmit->syncs);
1009 if (ret)
1010 goto out_cleanup_submit_ctx;
1011 }
1012
1013 /*
1014 * Collect signal operations on all jobs, such that each job can pick
1015 * from it for its dependencies and update the fence to signal when the
1016 * job is submitted.
1017 */
1018 ret = panthor_submit_ctx_collect_jobs_signal_ops(ctx: &ctx);
1019 if (ret)
1020 goto out_cleanup_submit_ctx;
1021
1022 /*
1023 * We acquire/prepare revs on all jobs before proceeding with the
1024 * dependency registration.
1025 *
1026 * This is solving two problems:
1027 * 1. drm_sched_job_arm() and drm_sched_entity_push_job() must be
1028 * protected by a lock to make sure no concurrent access to the same
1029 * entity get interleaved, which would mess up with the fence seqno
1030 * ordering. Luckily, one of the resv being acquired is the VM resv,
1031 * and a scheduling entity is only bound to a single VM. As soon as
1032 * we acquire the VM resv, we should be safe.
1033 * 2. Jobs might depend on fences that were issued by previous jobs in
1034 * the same batch, so we can't add dependencies on all jobs before
1035 * arming previous jobs and registering the fence to the signal
1036 * array, otherwise we might miss dependencies, or point to an
1037 * outdated fence.
1038 */
1039 if (args->queue_submits.count > 0) {
1040 /* All jobs target the same group, so they also point to the same VM. */
1041 struct panthor_vm *vm = panthor_job_vm(sched_job: ctx.jobs[0].job);
1042
1043 drm_exec_until_all_locked(&ctx.exec) {
1044 ret = panthor_vm_prepare_mapped_bos_resvs(exec: &ctx.exec, vm,
1045 slot_count: args->queue_submits.count);
1046 }
1047
1048 if (ret)
1049 goto out_cleanup_submit_ctx;
1050 }
1051
1052 /*
1053 * Now that resvs are locked/prepared, we can iterate over each job to
1054 * add the dependencies, arm the job fence, register the job fence to
1055 * the signal array.
1056 */
1057 ret = panthor_submit_ctx_add_deps_and_arm_jobs(ctx: &ctx);
1058 if (ret)
1059 goto out_cleanup_submit_ctx;
1060
1061 /* Nothing can fail after that point, so we can make our job fences
1062 * visible to the outside world. Push jobs and set the job fences to
1063 * the resv slots we reserved. This also pushes the fences to the
1064 * syncobjs that are part of the signal array.
1065 */
1066 panthor_submit_ctx_push_jobs(ctx: &ctx, upd_resvs: panthor_job_update_resvs);
1067
1068out_cleanup_submit_ctx:
1069 panthor_submit_ctx_cleanup(ctx: &ctx, job_put: panthor_job_put);
1070
1071out_free_jobs_args:
1072 kvfree(addr: jobs_args);
1073
1074out_dev_exit:
1075 drm_dev_exit(idx: cookie);
1076 return ret;
1077}
1078
1079static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
1080 struct drm_file *file)
1081{
1082 struct panthor_file *pfile = file->driver_priv;
1083 struct drm_panthor_group_destroy *args = data;
1084
1085 if (args->pad)
1086 return -EINVAL;
1087
1088 return panthor_group_destroy(pfile, group_handle: args->group_handle);
1089}
1090
1091static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
1092 struct drm_file *file)
1093{
1094 struct panthor_file *pfile = file->driver_priv;
1095 struct drm_panthor_group_create *args = data;
1096 struct drm_panthor_queue_create *queue_args;
1097 int ret;
1098
1099 if (!args->queues.count || args->queues.count > MAX_CS_PER_CSG)
1100 return -EINVAL;
1101
1102 ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
1103 if (ret)
1104 return ret;
1105
1106 ret = group_priority_permit(file, priority: args->priority);
1107 if (ret)
1108 goto out;
1109
1110 ret = panthor_group_create(pfile, group_args: args, queue_args, drm_client_id: file->client_id);
1111 if (ret < 0)
1112 goto out;
1113 args->group_handle = ret;
1114 ret = 0;
1115
1116out:
1117 kvfree(addr: queue_args);
1118 return ret;
1119}
1120
1121static int panthor_ioctl_group_get_state(struct drm_device *ddev, void *data,
1122 struct drm_file *file)
1123{
1124 struct panthor_file *pfile = file->driver_priv;
1125 struct drm_panthor_group_get_state *args = data;
1126
1127 return panthor_group_get_state(pfile, get_state: args);
1128}
1129
1130static int panthor_ioctl_tiler_heap_create(struct drm_device *ddev, void *data,
1131 struct drm_file *file)
1132{
1133 struct panthor_file *pfile = file->driver_priv;
1134 struct drm_panthor_tiler_heap_create *args = data;
1135 struct panthor_heap_pool *pool;
1136 struct panthor_vm *vm;
1137 int ret;
1138
1139 vm = panthor_vm_pool_get_vm(pool: pfile->vms, handle: args->vm_id);
1140 if (!vm)
1141 return -EINVAL;
1142
1143 pool = panthor_vm_get_heap_pool(vm, create: true);
1144 if (IS_ERR(ptr: pool)) {
1145 ret = PTR_ERR(ptr: pool);
1146 goto out_put_vm;
1147 }
1148
1149 ret = panthor_heap_create(pool,
1150 initial_chunk_count: args->initial_chunk_count,
1151 chunk_size: args->chunk_size,
1152 max_chunks: args->max_chunks,
1153 target_in_flight: args->target_in_flight,
1154 heap_ctx_gpu_va: &args->tiler_heap_ctx_gpu_va,
1155 first_chunk_gpu_va: &args->first_heap_chunk_gpu_va);
1156 if (ret < 0)
1157 goto out_put_heap_pool;
1158
1159 /* Heap pools are per-VM. We combine the VM and HEAP id to make
1160 * a unique heap handle.
1161 */
1162 args->handle = (args->vm_id << 16) | ret;
1163 ret = 0;
1164
1165out_put_heap_pool:
1166 panthor_heap_pool_put(pool);
1167
1168out_put_vm:
1169 panthor_vm_put(vm);
1170 return ret;
1171}
1172
1173static int panthor_ioctl_tiler_heap_destroy(struct drm_device *ddev, void *data,
1174 struct drm_file *file)
1175{
1176 struct panthor_file *pfile = file->driver_priv;
1177 struct drm_panthor_tiler_heap_destroy *args = data;
1178 struct panthor_heap_pool *pool;
1179 struct panthor_vm *vm;
1180 int ret;
1181
1182 if (args->pad)
1183 return -EINVAL;
1184
1185 vm = panthor_vm_pool_get_vm(pool: pfile->vms, handle: args->handle >> 16);
1186 if (!vm)
1187 return -EINVAL;
1188
1189 pool = panthor_vm_get_heap_pool(vm, create: false);
1190 if (IS_ERR(ptr: pool)) {
1191 ret = PTR_ERR(ptr: pool);
1192 goto out_put_vm;
1193 }
1194
1195 ret = panthor_heap_destroy(pool, handle: args->handle & GENMASK(15, 0));
1196 panthor_heap_pool_put(pool);
1197
1198out_put_vm:
1199 panthor_vm_put(vm);
1200 return ret;
1201}
1202
1203static int panthor_ioctl_vm_bind_async(struct drm_device *ddev,
1204 struct drm_panthor_vm_bind *args,
1205 struct drm_file *file)
1206{
1207 struct panthor_file *pfile = file->driver_priv;
1208 struct drm_panthor_vm_bind_op *jobs_args;
1209 struct panthor_submit_ctx ctx;
1210 struct panthor_vm *vm;
1211 int ret = 0;
1212
1213 vm = panthor_vm_pool_get_vm(pool: pfile->vms, handle: args->vm_id);
1214 if (!vm)
1215 return -EINVAL;
1216
1217 ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
1218 if (ret)
1219 goto out_put_vm;
1220
1221 ret = panthor_submit_ctx_init(ctx: &ctx, file, job_count: args->ops.count);
1222 if (ret)
1223 goto out_free_jobs_args;
1224
1225 for (u32 i = 0; i < args->ops.count; i++) {
1226 struct drm_panthor_vm_bind_op *op = &jobs_args[i];
1227 struct drm_sched_job *job;
1228
1229 job = panthor_vm_bind_job_create(file, vm, op);
1230 if (IS_ERR(ptr: job)) {
1231 ret = PTR_ERR(ptr: job);
1232 goto out_cleanup_submit_ctx;
1233 }
1234
1235 ret = panthor_submit_ctx_add_job(ctx: &ctx, idx: i, job, syncs: &op->syncs);
1236 if (ret)
1237 goto out_cleanup_submit_ctx;
1238 }
1239
1240 ret = panthor_submit_ctx_collect_jobs_signal_ops(ctx: &ctx);
1241 if (ret)
1242 goto out_cleanup_submit_ctx;
1243
1244 /* Prepare reservation objects for each VM_BIND job. */
1245 drm_exec_until_all_locked(&ctx.exec) {
1246 for (u32 i = 0; i < ctx.job_count; i++) {
1247 ret = panthor_vm_bind_job_prepare_resvs(exec: &ctx.exec, job: ctx.jobs[i].job);
1248 drm_exec_retry_on_contention(&ctx.exec);
1249 if (ret)
1250 goto out_cleanup_submit_ctx;
1251 }
1252 }
1253
1254 ret = panthor_submit_ctx_add_deps_and_arm_jobs(ctx: &ctx);
1255 if (ret)
1256 goto out_cleanup_submit_ctx;
1257
1258 /* Nothing can fail after that point. */
1259 panthor_submit_ctx_push_jobs(ctx: &ctx, upd_resvs: panthor_vm_bind_job_update_resvs);
1260
1261out_cleanup_submit_ctx:
1262 panthor_submit_ctx_cleanup(ctx: &ctx, job_put: panthor_vm_bind_job_put);
1263
1264out_free_jobs_args:
1265 kvfree(addr: jobs_args);
1266
1267out_put_vm:
1268 panthor_vm_put(vm);
1269 return ret;
1270}
1271
1272static int panthor_ioctl_vm_bind_sync(struct drm_device *ddev,
1273 struct drm_panthor_vm_bind *args,
1274 struct drm_file *file)
1275{
1276 struct panthor_file *pfile = file->driver_priv;
1277 struct drm_panthor_vm_bind_op *jobs_args;
1278 struct panthor_vm *vm;
1279 int ret;
1280
1281 vm = panthor_vm_pool_get_vm(pool: pfile->vms, handle: args->vm_id);
1282 if (!vm)
1283 return -EINVAL;
1284
1285 ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
1286 if (ret)
1287 goto out_put_vm;
1288
1289 for (u32 i = 0; i < args->ops.count; i++) {
1290 ret = panthor_vm_bind_exec_sync_op(file, vm, op: &jobs_args[i]);
1291 if (ret) {
1292 /* Update ops.count so the user knows where things failed. */
1293 args->ops.count = i;
1294 break;
1295 }
1296 }
1297
1298 kvfree(addr: jobs_args);
1299
1300out_put_vm:
1301 panthor_vm_put(vm);
1302 return ret;
1303}
1304
1305#define PANTHOR_VM_BIND_FLAGS DRM_PANTHOR_VM_BIND_ASYNC
1306
1307static int panthor_ioctl_vm_bind(struct drm_device *ddev, void *data,
1308 struct drm_file *file)
1309{
1310 struct drm_panthor_vm_bind *args = data;
1311 int cookie, ret;
1312
1313 if (!drm_dev_enter(dev: ddev, idx: &cookie))
1314 return -ENODEV;
1315
1316 if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC)
1317 ret = panthor_ioctl_vm_bind_async(ddev, args, file);
1318 else
1319 ret = panthor_ioctl_vm_bind_sync(ddev, args, file);
1320
1321 drm_dev_exit(idx: cookie);
1322 return ret;
1323}
1324
1325static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
1326 struct drm_file *file)
1327{
1328 struct panthor_file *pfile = file->driver_priv;
1329 struct drm_panthor_vm_get_state *args = data;
1330 struct panthor_vm *vm;
1331
1332 vm = panthor_vm_pool_get_vm(pool: pfile->vms, handle: args->vm_id);
1333 if (!vm)
1334 return -EINVAL;
1335
1336 if (panthor_vm_is_unusable(vm))
1337 args->state = DRM_PANTHOR_VM_STATE_UNUSABLE;
1338 else
1339 args->state = DRM_PANTHOR_VM_STATE_USABLE;
1340
1341 panthor_vm_put(vm);
1342 return 0;
1343}
1344
1345static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data,
1346 struct drm_file *file)
1347{
1348 struct drm_panthor_bo_set_label *args = data;
1349 struct drm_gem_object *obj;
1350 const char *label = NULL;
1351 int ret = 0;
1352
1353 if (args->pad)
1354 return -EINVAL;
1355
1356 obj = drm_gem_object_lookup(filp: file, handle: args->handle);
1357 if (!obj)
1358 return -ENOENT;
1359
1360 if (args->label) {
1361 label = strndup_user((const char __user *)(uintptr_t)args->label,
1362 PANTHOR_BO_LABEL_MAXLEN);
1363 if (IS_ERR(ptr: label)) {
1364 ret = PTR_ERR(ptr: label);
1365 if (ret == -EINVAL)
1366 ret = -E2BIG;
1367 goto err_put_obj;
1368 }
1369 }
1370
1371 /*
1372 * We treat passing a label of length 0 and passing a NULL label
1373 * differently, because even though they might seem conceptually
1374 * similar, future uses of the BO label might expect a different
1375 * behaviour in each case.
1376 */
1377 panthor_gem_bo_set_label(obj, label);
1378
1379err_put_obj:
1380 drm_gem_object_put(obj);
1381
1382 return ret;
1383}
1384
1385static int panthor_ioctl_set_user_mmio_offset(struct drm_device *ddev,
1386 void *data, struct drm_file *file)
1387{
1388 struct drm_panthor_set_user_mmio_offset *args = data;
1389 struct panthor_file *pfile = file->driver_priv;
1390
1391 if (args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_32BIT &&
1392 args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
1393 return -EINVAL;
1394
1395 WRITE_ONCE(pfile->user_mmio.offset, args->offset);
1396 return 0;
1397}
1398
1399static int
1400panthor_open(struct drm_device *ddev, struct drm_file *file)
1401{
1402 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
1403 struct panthor_file *pfile;
1404 int ret;
1405
1406 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
1407 if (!pfile)
1408 return -ENOMEM;
1409
1410 pfile->ptdev = ptdev;
1411 pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
1412
1413#ifdef CONFIG_ARM64
1414 /*
1415 * With 32-bit systems being limited by the 32-bit representation of
1416 * mmap2's pgoffset field, we need to make the MMIO offset arch
1417 * specific.
1418 */
1419 if (test_tsk_thread_flag(current, TIF_32BIT))
1420 pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
1421#endif
1422
1423
1424 ret = panthor_vm_pool_create(pfile);
1425 if (ret)
1426 goto err_free_file;
1427
1428 ret = panthor_group_pool_create(pfile);
1429 if (ret)
1430 goto err_destroy_vm_pool;
1431
1432 file->driver_priv = pfile;
1433 return 0;
1434
1435err_destroy_vm_pool:
1436 panthor_vm_pool_destroy(pfile);
1437
1438err_free_file:
1439 kfree(objp: pfile);
1440 return ret;
1441}
1442
1443static void
1444panthor_postclose(struct drm_device *ddev, struct drm_file *file)
1445{
1446 struct panthor_file *pfile = file->driver_priv;
1447
1448 panthor_group_pool_destroy(pfile);
1449 panthor_vm_pool_destroy(pfile);
1450
1451 kfree(objp: pfile);
1452}
1453
1454static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
1455#define PANTHOR_IOCTL(n, func, flags) \
1456 DRM_IOCTL_DEF_DRV(PANTHOR_##n, panthor_ioctl_##func, flags)
1457
1458 PANTHOR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
1459 PANTHOR_IOCTL(VM_CREATE, vm_create, DRM_RENDER_ALLOW),
1460 PANTHOR_IOCTL(VM_DESTROY, vm_destroy, DRM_RENDER_ALLOW),
1461 PANTHOR_IOCTL(VM_BIND, vm_bind, DRM_RENDER_ALLOW),
1462 PANTHOR_IOCTL(VM_GET_STATE, vm_get_state, DRM_RENDER_ALLOW),
1463 PANTHOR_IOCTL(BO_CREATE, bo_create, DRM_RENDER_ALLOW),
1464 PANTHOR_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, DRM_RENDER_ALLOW),
1465 PANTHOR_IOCTL(GROUP_CREATE, group_create, DRM_RENDER_ALLOW),
1466 PANTHOR_IOCTL(GROUP_DESTROY, group_destroy, DRM_RENDER_ALLOW),
1467 PANTHOR_IOCTL(GROUP_GET_STATE, group_get_state, DRM_RENDER_ALLOW),
1468 PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
1469 PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
1470 PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
1471 PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
1472 PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, DRM_RENDER_ALLOW),
1473};
1474
1475static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
1476{
1477 struct drm_file *file = filp->private_data;
1478 struct panthor_file *pfile = file->driver_priv;
1479 struct panthor_device *ptdev = pfile->ptdev;
1480 u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
1481 u64 user_mmio_offset;
1482 int ret, cookie;
1483
1484 if (!drm_dev_enter(dev: file->minor->dev, idx: &cookie))
1485 return -ENODEV;
1486
1487 /* Adjust the user MMIO offset to match the offset used kernel side.
1488 * We use a local variable with a READ_ONCE() here to make sure
1489 * the user_mmio_offset we use for the is_user_mmio_mapping() check
1490 * hasn't changed when we do the offset adjustment.
1491 */
1492 user_mmio_offset = READ_ONCE(pfile->user_mmio.offset);
1493 if (offset >= user_mmio_offset) {
1494 offset -= user_mmio_offset;
1495 offset += DRM_PANTHOR_USER_MMIO_OFFSET;
1496 vma->vm_pgoff = offset >> PAGE_SHIFT;
1497 ret = panthor_device_mmap_io(ptdev, vma);
1498 } else {
1499 ret = drm_gem_mmap(filp, vma);
1500 }
1501
1502 drm_dev_exit(idx: cookie);
1503 return ret;
1504}
1505
1506static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
1507 struct panthor_file *pfile,
1508 struct drm_printer *p)
1509{
1510 if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_ALL)
1511 panthor_fdinfo_gather_group_samples(pfile);
1512
1513 if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) {
1514#ifdef CONFIG_ARM_ARCH_TIMER
1515 drm_printf(p, "drm-engine-panthor:\t%llu ns\n",
1516 DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC),
1517 arch_timer_get_cntfrq()));
1518#endif
1519 }
1520 if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES)
1521 drm_printf(p, f: "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles);
1522
1523 drm_printf(p, f: "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate);
1524 drm_printf(p, f: "drm-curfreq-panthor:\t%lu Hz\n",
1525 panthor_devfreq_get_freq(ptdev));
1526}
1527
1528static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
1529{
1530 char *drv_name = file->minor->dev->driver->name;
1531 struct panthor_file *pfile = file->driver_priv;
1532 struct drm_memory_stats stats = {0};
1533
1534 panthor_fdinfo_gather_group_mem_info(pfile, stats: &stats);
1535 panthor_vm_heaps_sizes(pfile, stats: &stats);
1536
1537 drm_fdinfo_print_size(p, prefix: drv_name, stat: "resident", region: "memory", sz: stats.resident);
1538 drm_fdinfo_print_size(p, prefix: drv_name, stat: "active", region: "memory", sz: stats.active);
1539}
1540
1541static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
1542{
1543 struct drm_device *dev = file->minor->dev;
1544 struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
1545
1546 panthor_gpu_show_fdinfo(ptdev, pfile: file->driver_priv, p);
1547 panthor_show_internal_memory_stats(p, file);
1548
1549 drm_show_memory_stats(p, file);
1550}
1551
1552static const struct file_operations panthor_drm_driver_fops = {
1553 .owner = THIS_MODULE,
1554 .open = drm_open,
1555 .release = drm_release,
1556 .unlocked_ioctl = drm_ioctl,
1557 .compat_ioctl = drm_compat_ioctl,
1558 .poll = drm_poll,
1559 .read = drm_read,
1560 .llseek = noop_llseek,
1561 .mmap = panthor_mmap,
1562 .show_fdinfo = drm_show_fdinfo,
1563 .fop_flags = FOP_UNSIGNED_OFFSET,
1564};
1565
1566#ifdef CONFIG_DEBUG_FS
1567static int panthor_gems_show(struct seq_file *m, void *data)
1568{
1569 struct drm_info_node *node = m->private;
1570 struct drm_device *dev = node->minor->dev;
1571 struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
1572
1573 panthor_gem_debugfs_print_bos(pfdev: ptdev, m);
1574
1575 return 0;
1576}
1577
1578static struct drm_info_list panthor_debugfs_list[] = {
1579 {"gems", panthor_gems_show, 0, NULL},
1580};
1581
1582static int panthor_gems_debugfs_init(struct drm_minor *minor)
1583{
1584 drm_debugfs_create_files(files: panthor_debugfs_list,
1585 ARRAY_SIZE(panthor_debugfs_list),
1586 root: minor->debugfs_root, minor);
1587
1588 return 0;
1589}
1590
1591static void panthor_debugfs_init(struct drm_minor *minor)
1592{
1593 panthor_mmu_debugfs_init(minor);
1594 panthor_gems_debugfs_init(minor);
1595}
1596#endif
1597
1598/*
1599 * PanCSF driver version:
1600 * - 1.0 - initial interface
1601 * - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query
1602 * - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
1603 * - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
1604 * - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
1605 * - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
1606 * - 1.5 - adds DRM_PANTHOR_SET_USER_MMIO_OFFSET ioctl
1607 */
1608static const struct drm_driver panthor_drm_driver = {
1609 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
1610 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
1611 .open = panthor_open,
1612 .postclose = panthor_postclose,
1613 .show_fdinfo = panthor_show_fdinfo,
1614 .ioctls = panthor_drm_driver_ioctls,
1615 .num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
1616 .fops = &panthor_drm_driver_fops,
1617 .name = "panthor",
1618 .desc = "Panthor DRM driver",
1619 .major = 1,
1620 .minor = 5,
1621
1622 .gem_create_object = panthor_gem_create_object,
1623 .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
1624#ifdef CONFIG_DEBUG_FS
1625 .debugfs_init = panthor_debugfs_init,
1626#endif
1627};
1628
1629static int panthor_probe(struct platform_device *pdev)
1630{
1631 struct panthor_device *ptdev;
1632
1633 ptdev = devm_drm_dev_alloc(&pdev->dev, &panthor_drm_driver,
1634 struct panthor_device, base);
1635 if (IS_ERR(ptr: ptdev))
1636 return -ENOMEM;
1637
1638 platform_set_drvdata(pdev, data: ptdev);
1639
1640 return panthor_device_init(ptdev);
1641}
1642
1643static void panthor_remove(struct platform_device *pdev)
1644{
1645 struct panthor_device *ptdev = platform_get_drvdata(pdev);
1646
1647 panthor_device_unplug(ptdev);
1648}
1649
1650static ssize_t profiling_show(struct device *dev,
1651 struct device_attribute *attr,
1652 char *buf)
1653{
1654 struct panthor_device *ptdev = dev_get_drvdata(dev);
1655
1656 return sysfs_emit(buf, fmt: "%d\n", ptdev->profile_mask);
1657}
1658
1659static ssize_t profiling_store(struct device *dev,
1660 struct device_attribute *attr,
1661 const char *buf, size_t len)
1662{
1663 struct panthor_device *ptdev = dev_get_drvdata(dev);
1664 u32 value;
1665 int err;
1666
1667 err = kstrtou32(s: buf, base: 0, res: &value);
1668 if (err)
1669 return err;
1670
1671 if ((value & ~PANTHOR_DEVICE_PROFILING_ALL) != 0)
1672 return -EINVAL;
1673
1674 ptdev->profile_mask = value;
1675
1676 return len;
1677}
1678
1679static DEVICE_ATTR_RW(profiling);
1680
1681static struct attribute *panthor_attrs[] = {
1682 &dev_attr_profiling.attr,
1683 NULL,
1684};
1685
1686ATTRIBUTE_GROUPS(panthor);
1687
1688static const struct panthor_soc_data soc_data_mediatek_mt8196 = {
1689 .asn_hash_enable = true,
1690 .asn_hash = { 0xb, 0xe, 0x0, },
1691};
1692
1693static const struct of_device_id dt_match[] = {
1694 { .compatible = "mediatek,mt8196-mali", .data = &soc_data_mediatek_mt8196, },
1695 { .compatible = "rockchip,rk3588-mali" },
1696 { .compatible = "arm,mali-valhall-csf" },
1697 {}
1698};
1699MODULE_DEVICE_TABLE(of, dt_match);
1700
1701static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops,
1702 panthor_device_suspend,
1703 panthor_device_resume,
1704 NULL);
1705
1706static struct platform_driver panthor_driver = {
1707 .probe = panthor_probe,
1708 .remove = panthor_remove,
1709 .driver = {
1710 .name = "panthor",
1711 .pm = pm_ptr(&panthor_pm_ops),
1712 .of_match_table = dt_match,
1713 .dev_groups = panthor_groups,
1714 },
1715};
1716
1717/*
1718 * Workqueue used to cleanup stuff.
1719 *
1720 * We create a dedicated workqueue so we can drain on unplug and
1721 * make sure all resources are freed before the module is unloaded.
1722 */
1723struct workqueue_struct *panthor_cleanup_wq;
1724
1725static int __init panthor_init(void)
1726{
1727 int ret;
1728
1729 ret = panthor_mmu_pt_cache_init();
1730 if (ret)
1731 return ret;
1732
1733 panthor_cleanup_wq = alloc_workqueue("panthor-cleanup", WQ_UNBOUND, 0);
1734 if (!panthor_cleanup_wq) {
1735 pr_err("panthor: Failed to allocate the workqueues");
1736 ret = -ENOMEM;
1737 goto err_mmu_pt_cache_fini;
1738 }
1739
1740 ret = platform_driver_register(&panthor_driver);
1741 if (ret)
1742 goto err_destroy_cleanup_wq;
1743
1744 return 0;
1745
1746err_destroy_cleanup_wq:
1747 destroy_workqueue(wq: panthor_cleanup_wq);
1748
1749err_mmu_pt_cache_fini:
1750 panthor_mmu_pt_cache_fini();
1751 return ret;
1752}
1753module_init(panthor_init);
1754
1755static void __exit panthor_exit(void)
1756{
1757 platform_driver_unregister(&panthor_driver);
1758 destroy_workqueue(wq: panthor_cleanup_wq);
1759 panthor_mmu_pt_cache_fini();
1760}
1761module_exit(panthor_exit);
1762
1763MODULE_AUTHOR("Panthor Project Developers");
1764MODULE_DESCRIPTION("Panthor DRM Driver");
1765MODULE_LICENSE("Dual MIT/GPL");
1766

source code of linux/drivers/gpu/drm/panthor/panthor_drv.c