1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#ifndef _XE_GPU_SCHEDULER_H_
7#define _XE_GPU_SCHEDULER_H_
8
9#include "xe_gpu_scheduler_types.h"
10#include "xe_sched_job.h"
11
12int xe_sched_init(struct xe_gpu_scheduler *sched,
13 const struct drm_sched_backend_ops *ops,
14 const struct xe_sched_backend_ops *xe_ops,
15 struct workqueue_struct *submit_wq,
16 uint32_t hw_submission, unsigned hang_limit,
17 long timeout, struct workqueue_struct *timeout_wq,
18 atomic_t *score, const char *name,
19 struct device *dev);
20void xe_sched_fini(struct xe_gpu_scheduler *sched);
21
22void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
23void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
24
25void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
26
27void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
28 struct xe_sched_msg *msg);
29void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
30 struct xe_sched_msg *msg);
31void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
32 struct xe_sched_msg *msg);
33
34static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
35{
36 spin_lock(lock: &sched->base.job_list_lock);
37}
38
39static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
40{
41 spin_unlock(lock: &sched->base.job_list_lock);
42}
43
44static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
45{
46 drm_sched_stop(sched: &sched->base, NULL);
47}
48
49static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
50{
51 drm_sched_tdr_queue_imm(sched: &sched->base);
52}
53
54static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
55{
56 struct drm_sched_job *s_job;
57 bool restore_replay = false;
58
59 list_for_each_entry(s_job, &sched->base.pending_list, list) {
60 struct drm_sched_fence *s_fence = s_job->s_fence;
61 struct dma_fence *hw_fence = s_fence->parent;
62
63 restore_replay |= to_xe_sched_job(drm: s_job)->restore_replay;
64 if (restore_replay || (hw_fence && !dma_fence_is_signaled(fence: hw_fence)))
65 sched->base.ops->run_job(s_job);
66 }
67}
68
69static inline bool
70xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
71{
72 return drm_sched_invalidate_job(s_job: &job->drm, threshold);
73}
74
75static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
76 struct xe_sched_job *job)
77{
78 spin_lock(lock: &sched->base.job_list_lock);
79 list_add(new: &job->drm.list, head: &sched->base.pending_list);
80 spin_unlock(lock: &sched->base.job_list_lock);
81}
82
83/**
84 * xe_sched_first_pending_job() - Find first pending job which is unsignaled
85 * @sched: Xe GPU scheduler
86 *
87 * Return first unsignaled job in pending list or NULL
88 */
89static inline
90struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
91{
92 struct xe_sched_job *job, *r_job = NULL;
93
94 spin_lock(lock: &sched->base.job_list_lock);
95 list_for_each_entry(job, &sched->base.pending_list, drm.list) {
96 struct drm_sched_fence *s_fence = job->drm.s_fence;
97 struct dma_fence *hw_fence = s_fence->parent;
98
99 if (hw_fence && !dma_fence_is_signaled(fence: hw_fence)) {
100 r_job = job;
101 break;
102 }
103 }
104 spin_unlock(lock: &sched->base.job_list_lock);
105
106 return r_job;
107}
108
109static inline int
110xe_sched_entity_init(struct xe_sched_entity *entity,
111 struct xe_gpu_scheduler *sched)
112{
113 return drm_sched_entity_init(entity, priority: 0,
114 sched_list: (struct drm_gpu_scheduler **)&sched,
115 num_sched_list: 1, NULL);
116}
117
118#define xe_sched_entity_fini drm_sched_entity_fini
119
120#endif
121

source code of linux/drivers/gpu/drm/xe/xe_gpu_scheduler.h