| 1 | /* SPDX-License-Identifier: MIT */ |
| 2 | /* |
| 3 | * Copyright © 2019 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #ifndef __INTEL_CONTEXT_TYPES__ |
| 7 | #define __INTEL_CONTEXT_TYPES__ |
| 8 | |
| 9 | #include <linux/average.h> |
| 10 | #include <linux/kref.h> |
| 11 | #include <linux/list.h> |
| 12 | #include <linux/mutex.h> |
| 13 | #include <linux/types.h> |
| 14 | |
| 15 | #include "i915_active_types.h" |
| 16 | #include "i915_sw_fence.h" |
| 17 | #include "intel_engine_types.h" |
| 18 | #include "intel_sseu.h" |
| 19 | #include "intel_wakeref.h" |
| 20 | |
| 21 | #include "uc/intel_guc_fwif.h" |
| 22 | |
| 23 | #define CONTEXT_REDZONE POISON_INUSE |
| 24 | DECLARE_EWMA(runtime, 3, 8); |
| 25 | |
| 26 | struct i915_gem_context; |
| 27 | struct i915_gem_ww_ctx; |
| 28 | struct i915_vma; |
| 29 | struct intel_breadcrumbs; |
| 30 | struct intel_context; |
| 31 | struct intel_ring; |
| 32 | |
| 33 | struct intel_context_ops { |
| 34 | unsigned long flags; |
| 35 | #define COPS_HAS_INFLIGHT_BIT 0 |
| 36 | #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT) |
| 37 | |
| 38 | #define COPS_RUNTIME_CYCLES_BIT 1 |
| 39 | #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) |
| 40 | |
| 41 | int (*alloc)(struct intel_context *ce); |
| 42 | |
| 43 | void (*revoke)(struct intel_context *ce, struct i915_request *rq, |
| 44 | unsigned int preempt_timeout_ms); |
| 45 | |
| 46 | void (*close)(struct intel_context *ce); |
| 47 | |
| 48 | int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); |
| 49 | int (*pin)(struct intel_context *ce, void *vaddr); |
| 50 | void (*unpin)(struct intel_context *ce); |
| 51 | void (*post_unpin)(struct intel_context *ce); |
| 52 | |
| 53 | void (*cancel_request)(struct intel_context *ce, |
| 54 | struct i915_request *rq); |
| 55 | |
| 56 | void (*enter)(struct intel_context *ce); |
| 57 | void (*exit)(struct intel_context *ce); |
| 58 | |
| 59 | void (*sched_disable)(struct intel_context *ce); |
| 60 | |
| 61 | void (*update_stats)(struct intel_context *ce); |
| 62 | |
| 63 | void (*reset)(struct intel_context *ce); |
| 64 | void (*destroy)(struct kref *kref); |
| 65 | |
| 66 | /* virtual/parallel engine/context interface */ |
| 67 | struct intel_context *(*create_virtual)(struct intel_engine_cs **engine, |
| 68 | unsigned int count, |
| 69 | unsigned long flags); |
| 70 | struct intel_context *(*create_parallel)(struct intel_engine_cs **engines, |
| 71 | unsigned int num_siblings, |
| 72 | unsigned int width); |
| 73 | struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine, |
| 74 | unsigned int sibling); |
| 75 | }; |
| 76 | |
| 77 | struct intel_context { |
| 78 | /* |
| 79 | * Note: Some fields may be accessed under RCU. |
| 80 | * |
| 81 | * Unless otherwise noted a field can safely be assumed to be protected |
| 82 | * by strong reference counting. |
| 83 | */ |
| 84 | union { |
| 85 | struct kref ref; /* no kref_get_unless_zero()! */ |
| 86 | struct rcu_head rcu; |
| 87 | }; |
| 88 | |
| 89 | struct intel_engine_cs *engine; |
| 90 | struct intel_engine_cs *inflight; |
| 91 | #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3) |
| 92 | #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3) |
| 93 | #define intel_context_inflight(ce) \ |
| 94 | __intel_context_inflight(READ_ONCE((ce)->inflight)) |
| 95 | #define intel_context_inflight_count(ce) \ |
| 96 | __intel_context_inflight_count(READ_ONCE((ce)->inflight)) |
| 97 | |
| 98 | struct i915_address_space *vm; |
| 99 | struct i915_gem_context __rcu *gem_context; |
| 100 | |
| 101 | struct file *default_state; |
| 102 | |
| 103 | /* |
| 104 | * @signal_lock protects the list of requests that need signaling, |
| 105 | * @signals. While there are any requests that need signaling, |
| 106 | * we add the context to the breadcrumbs worker, and remove it |
| 107 | * upon completion/cancellation of the last request. |
| 108 | */ |
| 109 | struct list_head signal_link; /* Accessed under RCU */ |
| 110 | struct list_head signals; /* Guarded by signal_lock */ |
| 111 | spinlock_t signal_lock; /* protects signals, the list of requests */ |
| 112 | |
| 113 | struct i915_vma *state; |
| 114 | u32 ring_size; |
| 115 | struct intel_ring *ring; |
| 116 | struct intel_timeline *timeline; |
| 117 | intel_wakeref_t wakeref; |
| 118 | |
| 119 | unsigned long flags; |
| 120 | #define CONTEXT_BARRIER_BIT 0 |
| 121 | #define CONTEXT_ALLOC_BIT 1 |
| 122 | #define CONTEXT_INIT_BIT 2 |
| 123 | #define CONTEXT_VALID_BIT 3 |
| 124 | #define CONTEXT_CLOSED_BIT 4 |
| 125 | #define CONTEXT_USE_SEMAPHORES 5 |
| 126 | #define CONTEXT_BANNED 6 |
| 127 | #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 |
| 128 | #define CONTEXT_NOPREEMPT 8 |
| 129 | #define CONTEXT_LRCA_DIRTY 9 |
| 130 | #define CONTEXT_GUC_INIT 10 |
| 131 | #define CONTEXT_PERMA_PIN 11 |
| 132 | #define CONTEXT_IS_PARKING 12 |
| 133 | #define CONTEXT_EXITING 13 |
| 134 | #define CONTEXT_LOW_LATENCY 14 |
| 135 | #define CONTEXT_OWN_STATE 15 |
| 136 | |
| 137 | struct { |
| 138 | u64 timeout_us; |
| 139 | } watchdog; |
| 140 | |
| 141 | u32 *lrc_reg_state; |
| 142 | union { |
| 143 | struct { |
| 144 | u32 lrca; |
| 145 | u32 ccid; |
| 146 | }; |
| 147 | u64 desc; |
| 148 | } lrc; |
| 149 | u32 tag; /* cookie passed to HW to track this context on submission */ |
| 150 | |
| 151 | /** stats: Context GPU engine busyness tracking. */ |
| 152 | struct intel_context_stats { |
| 153 | u64 active; |
| 154 | |
| 155 | /* Time on GPU as tracked by the hw. */ |
| 156 | struct { |
| 157 | struct ewma_runtime avg; |
| 158 | u64 total; |
| 159 | u32 last; |
| 160 | I915_SELFTEST_DECLARE(u32 num_underflow); |
| 161 | I915_SELFTEST_DECLARE(u32 max_underflow); |
| 162 | } runtime; |
| 163 | } stats; |
| 164 | |
| 165 | unsigned int active_count; /* protected by timeline->mutex */ |
| 166 | |
| 167 | atomic_t pin_count; |
| 168 | struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ |
| 169 | |
| 170 | /** |
| 171 | * active: Active tracker for the rq activity (inc. external) on this |
| 172 | * intel_context object. |
| 173 | */ |
| 174 | struct i915_active active; |
| 175 | |
| 176 | const struct intel_context_ops *ops; |
| 177 | |
| 178 | /** sseu: Control eu/slice partitioning */ |
| 179 | struct intel_sseu sseu; |
| 180 | |
| 181 | /** |
| 182 | * pinned_contexts_link: List link for the engine's pinned contexts. |
| 183 | * This is only used if this is a perma-pinned kernel context and |
| 184 | * the list is assumed to only be manipulated during driver load |
| 185 | * or unload time so no mutex protection currently. |
| 186 | */ |
| 187 | struct list_head pinned_contexts_link; |
| 188 | |
| 189 | u8 wa_bb_page; /* if set, page num reserved for context workarounds */ |
| 190 | |
| 191 | struct { |
| 192 | /** @lock: protects everything in guc_state */ |
| 193 | spinlock_t lock; |
| 194 | /** |
| 195 | * @sched_state: scheduling state of this context using GuC |
| 196 | * submission |
| 197 | */ |
| 198 | u32 sched_state; |
| 199 | /* |
| 200 | * @fences: maintains a list of requests that are currently |
| 201 | * being fenced until a GuC operation completes |
| 202 | */ |
| 203 | struct list_head fences; |
| 204 | /** |
| 205 | * @blocked: fence used to signal when the blocking of a |
| 206 | * context's submissions is complete. |
| 207 | */ |
| 208 | struct i915_sw_fence blocked; |
| 209 | /** @requests: list of active requests on this context */ |
| 210 | struct list_head requests; |
| 211 | /** @prio: the context's current guc priority */ |
| 212 | u8 prio; |
| 213 | /** |
| 214 | * @prio_count: a counter of the number requests in flight in |
| 215 | * each priority bucket |
| 216 | */ |
| 217 | u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; |
| 218 | /** |
| 219 | * @sched_disable_delay_work: worker to disable scheduling on this |
| 220 | * context |
| 221 | */ |
| 222 | struct delayed_work sched_disable_delay_work; |
| 223 | } guc_state; |
| 224 | |
| 225 | struct { |
| 226 | /** |
| 227 | * @id: handle which is used to uniquely identify this context |
| 228 | * with the GuC, protected by guc->submission_state.lock |
| 229 | */ |
| 230 | u16 id; |
| 231 | /** |
| 232 | * @ref: the number of references to the guc_id, when |
| 233 | * transitioning in and out of zero protected by |
| 234 | * guc->submission_state.lock |
| 235 | */ |
| 236 | atomic_t ref; |
| 237 | /** |
| 238 | * @link: in guc->guc_id_list when the guc_id has no refs but is |
| 239 | * still valid, protected by guc->submission_state.lock |
| 240 | */ |
| 241 | struct list_head link; |
| 242 | } guc_id; |
| 243 | |
| 244 | /** |
| 245 | * @destroyed_link: link in guc->submission_state.destroyed_contexts, in |
| 246 | * list when context is pending to be destroyed (deregistered with the |
| 247 | * GuC), protected by guc->submission_state.lock |
| 248 | */ |
| 249 | struct list_head destroyed_link; |
| 250 | |
| 251 | /** @parallel: sub-structure for parallel submission members */ |
| 252 | struct { |
| 253 | union { |
| 254 | /** |
| 255 | * @child_list: parent's list of children |
| 256 | * contexts, no protection as immutable after context |
| 257 | * creation |
| 258 | */ |
| 259 | struct list_head child_list; |
| 260 | /** |
| 261 | * @child_link: child's link into parent's list of |
| 262 | * children |
| 263 | */ |
| 264 | struct list_head child_link; |
| 265 | }; |
| 266 | /** @parent: pointer to parent if child */ |
| 267 | struct intel_context *parent; |
| 268 | /** |
| 269 | * @last_rq: last request submitted on a parallel context, used |
| 270 | * to insert submit fences between requests in the parallel |
| 271 | * context |
| 272 | */ |
| 273 | struct i915_request *last_rq; |
| 274 | /** |
| 275 | * @fence_context: fence context composite fence when doing |
| 276 | * parallel submission |
| 277 | */ |
| 278 | u64 fence_context; |
| 279 | /** |
| 280 | * @seqno: seqno for composite fence when doing parallel |
| 281 | * submission |
| 282 | */ |
| 283 | u32 seqno; |
| 284 | /** @number_children: number of children if parent */ |
| 285 | u8 number_children; |
| 286 | /** @child_index: index into child_list if child */ |
| 287 | u8 child_index; |
| 288 | /** @guc: GuC specific members for parallel submission */ |
| 289 | struct { |
| 290 | /** @wqi_head: cached head pointer in work queue */ |
| 291 | u16 wqi_head; |
| 292 | /** @wqi_tail: cached tail pointer in work queue */ |
| 293 | u16 wqi_tail; |
| 294 | /** @wq_head: pointer to the actual head in work queue */ |
| 295 | u32 *wq_head; |
| 296 | /** @wq_tail: pointer to the actual head in work queue */ |
| 297 | u32 *wq_tail; |
| 298 | /** @wq_status: pointer to the status in work queue */ |
| 299 | u32 *wq_status; |
| 300 | |
| 301 | /** |
| 302 | * @parent_page: page in context state (ce->state) used |
| 303 | * by parent for work queue, process descriptor |
| 304 | */ |
| 305 | u8 parent_page; |
| 306 | } guc; |
| 307 | } parallel; |
| 308 | |
| 309 | #ifdef CONFIG_DRM_I915_SELFTEST |
| 310 | /** |
| 311 | * @drop_schedule_enable: Force drop of schedule enable G2H for selftest |
| 312 | */ |
| 313 | bool drop_schedule_enable; |
| 314 | |
| 315 | /** |
| 316 | * @drop_schedule_disable: Force drop of schedule disable G2H for |
| 317 | * selftest |
| 318 | */ |
| 319 | bool drop_schedule_disable; |
| 320 | |
| 321 | /** |
| 322 | * @drop_deregister: Force drop of deregister G2H for selftest |
| 323 | */ |
| 324 | bool drop_deregister; |
| 325 | #endif |
| 326 | }; |
| 327 | |
| 328 | #endif /* __INTEL_CONTEXT_TYPES__ */ |
| 329 | |