| 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #include <linux/export.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/completion.h> |
| 27 | |
| 28 | #include <drm/drm_print.h> |
| 29 | #include <drm/gpu_scheduler.h> |
| 30 | |
| 31 | #include "sched_internal.h" |
| 32 | |
| 33 | #include "gpu_scheduler_trace.h" |
| 34 | |
| 35 | /** |
| 36 | * drm_sched_entity_init - Init a context entity used by scheduler when |
| 37 | * submit to HW ring. |
| 38 | * |
| 39 | * @entity: scheduler entity to init |
| 40 | * @priority: priority of the entity |
| 41 | * @sched_list: the list of drm scheds on which jobs from this |
| 42 | * entity can be submitted |
| 43 | * @num_sched_list: number of drm sched in sched_list |
| 44 | * @guilty: atomic_t set to 1 when a job on this queue |
| 45 | * is found to be guilty causing a timeout |
| 46 | * |
| 47 | * Note that the &sched_list must have at least one element to schedule the entity. |
| 48 | * |
| 49 | * For changing @priority later on at runtime see |
| 50 | * drm_sched_entity_set_priority(). For changing the set of schedulers |
| 51 | * @sched_list at runtime see drm_sched_entity_modify_sched(). |
| 52 | * |
| 53 | * An entity is cleaned up by calling drm_sched_entity_fini(). See also |
| 54 | * drm_sched_entity_destroy(). |
| 55 | * |
| 56 | * Returns 0 on success or a negative error code on failure. |
| 57 | */ |
| 58 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
| 59 | enum drm_sched_priority priority, |
| 60 | struct drm_gpu_scheduler **sched_list, |
| 61 | unsigned int num_sched_list, |
| 62 | atomic_t *guilty) |
| 63 | { |
| 64 | if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) |
| 65 | return -EINVAL; |
| 66 | |
| 67 | memset(entity, 0, sizeof(struct drm_sched_entity)); |
| 68 | INIT_LIST_HEAD(list: &entity->list); |
| 69 | entity->rq = NULL; |
| 70 | entity->guilty = guilty; |
| 71 | entity->num_sched_list = num_sched_list; |
| 72 | entity->priority = priority; |
| 73 | entity->last_user = current->group_leader; |
| 74 | /* |
| 75 | * It's perfectly valid to initialize an entity without having a valid |
| 76 | * scheduler attached. It's just not valid to use the scheduler before it |
| 77 | * is initialized itself. |
| 78 | */ |
| 79 | entity->sched_list = num_sched_list > 1 ? sched_list : NULL; |
| 80 | RCU_INIT_POINTER(entity->last_scheduled, NULL); |
| 81 | RB_CLEAR_NODE(&entity->rb_tree_node); |
| 82 | |
| 83 | if (num_sched_list && !sched_list[0]->sched_rq) { |
| 84 | /* Since every entry covered by num_sched_list |
| 85 | * should be non-NULL and therefore we warn drivers |
| 86 | * not to do this and to fix their DRM calling order. |
| 87 | */ |
| 88 | pr_warn("%s: called with uninitialized scheduler\n" , __func__); |
| 89 | } else if (num_sched_list) { |
| 90 | /* The "priority" of an entity cannot exceed the number of run-queues of a |
| 91 | * scheduler. Protect against num_rqs being 0, by converting to signed. Choose |
| 92 | * the lowest priority available. |
| 93 | */ |
| 94 | if (entity->priority >= sched_list[0]->num_rqs) { |
| 95 | dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n" , |
| 96 | entity->priority, sched_list[0]->num_rqs); |
| 97 | entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, |
| 98 | (s32) DRM_SCHED_PRIORITY_KERNEL); |
| 99 | } |
| 100 | entity->rq = sched_list[0]->sched_rq[entity->priority]; |
| 101 | } |
| 102 | |
| 103 | init_completion(x: &entity->entity_idle); |
| 104 | |
| 105 | /* We start in an idle state. */ |
| 106 | complete_all(&entity->entity_idle); |
| 107 | |
| 108 | spin_lock_init(&entity->lock); |
| 109 | spsc_queue_init(queue: &entity->job_queue); |
| 110 | |
| 111 | atomic_set(v: &entity->fence_seq, i: 0); |
| 112 | entity->fence_context = dma_fence_context_alloc(num: 2); |
| 113 | |
| 114 | return 0; |
| 115 | } |
| 116 | EXPORT_SYMBOL(drm_sched_entity_init); |
| 117 | |
| 118 | /** |
| 119 | * drm_sched_entity_modify_sched - Modify sched of an entity |
| 120 | * @entity: scheduler entity to init |
| 121 | * @sched_list: the list of new drm scheds which will replace |
| 122 | * existing entity->sched_list |
| 123 | * @num_sched_list: number of drm sched in sched_list |
| 124 | * |
| 125 | * Note that this must be called under the same common lock for @entity as |
| 126 | * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to |
| 127 | * guarantee through some other means that this is never called while new jobs |
| 128 | * can be pushed to @entity. |
| 129 | */ |
| 130 | void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, |
| 131 | struct drm_gpu_scheduler **sched_list, |
| 132 | unsigned int num_sched_list) |
| 133 | { |
| 134 | WARN_ON(!num_sched_list || !sched_list); |
| 135 | |
| 136 | spin_lock(lock: &entity->lock); |
| 137 | entity->sched_list = sched_list; |
| 138 | entity->num_sched_list = num_sched_list; |
| 139 | spin_unlock(lock: &entity->lock); |
| 140 | } |
| 141 | EXPORT_SYMBOL(drm_sched_entity_modify_sched); |
| 142 | |
| 143 | static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) |
| 144 | { |
| 145 | rmb(); /* for list_empty to work without lock */ |
| 146 | |
| 147 | if (list_empty(head: &entity->list) || |
| 148 | spsc_queue_count(queue: &entity->job_queue) == 0 || |
| 149 | entity->stopped) |
| 150 | return true; |
| 151 | |
| 152 | return false; |
| 153 | } |
| 154 | |
| 155 | /** |
| 156 | * drm_sched_entity_error - return error of last scheduled job |
| 157 | * @entity: scheduler entity to check |
| 158 | * |
| 159 | * Opportunistically return the error of the last scheduled job. Result can |
| 160 | * change any time when new jobs are pushed to the hw. |
| 161 | */ |
| 162 | int drm_sched_entity_error(struct drm_sched_entity *entity) |
| 163 | { |
| 164 | struct dma_fence *fence; |
| 165 | int r; |
| 166 | |
| 167 | rcu_read_lock(); |
| 168 | fence = rcu_dereference(entity->last_scheduled); |
| 169 | r = fence ? fence->error : 0; |
| 170 | rcu_read_unlock(); |
| 171 | |
| 172 | return r; |
| 173 | } |
| 174 | EXPORT_SYMBOL(drm_sched_entity_error); |
| 175 | |
| 176 | static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, |
| 177 | struct dma_fence_cb *cb); |
| 178 | |
| 179 | static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) |
| 180 | { |
| 181 | struct drm_sched_job *job = container_of(wrk, typeof(*job), work); |
| 182 | struct dma_fence *f; |
| 183 | unsigned long index; |
| 184 | |
| 185 | /* Wait for all dependencies to avoid data corruptions */ |
| 186 | xa_for_each(&job->dependencies, index, f) { |
| 187 | struct drm_sched_fence *s_fence = to_drm_sched_fence(f); |
| 188 | |
| 189 | if (s_fence && f == &s_fence->scheduled) { |
| 190 | /* The dependencies array had a reference on the scheduled |
| 191 | * fence, and the finished fence refcount might have |
| 192 | * dropped to zero. Use dma_fence_get_rcu() so we get |
| 193 | * a NULL fence in that case. |
| 194 | */ |
| 195 | f = dma_fence_get_rcu(fence: &s_fence->finished); |
| 196 | |
| 197 | /* Now that we have a reference on the finished fence, |
| 198 | * we can release the reference the dependencies array |
| 199 | * had on the scheduled fence. |
| 200 | */ |
| 201 | dma_fence_put(fence: &s_fence->scheduled); |
| 202 | } |
| 203 | |
| 204 | xa_erase(&job->dependencies, index); |
| 205 | if (f && !dma_fence_add_callback(fence: f, cb: &job->finish_cb, |
| 206 | func: drm_sched_entity_kill_jobs_cb)) |
| 207 | return; |
| 208 | |
| 209 | dma_fence_put(fence: f); |
| 210 | } |
| 211 | |
| 212 | drm_sched_fence_scheduled(fence: job->s_fence, NULL); |
| 213 | drm_sched_fence_finished(fence: job->s_fence, result: -ESRCH); |
| 214 | WARN_ON(job->s_fence->parent); |
| 215 | job->sched->ops->free_job(job); |
| 216 | } |
| 217 | |
| 218 | /* Signal the scheduler finished fence when the entity in question is killed. */ |
| 219 | static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, |
| 220 | struct dma_fence_cb *cb) |
| 221 | { |
| 222 | struct drm_sched_job *job = container_of(cb, struct drm_sched_job, |
| 223 | finish_cb); |
| 224 | |
| 225 | dma_fence_put(fence: f); |
| 226 | |
| 227 | INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); |
| 228 | schedule_work(work: &job->work); |
| 229 | } |
| 230 | |
| 231 | /* Remove the entity from the scheduler and kill all pending jobs */ |
| 232 | static void drm_sched_entity_kill(struct drm_sched_entity *entity) |
| 233 | { |
| 234 | struct drm_sched_job *job; |
| 235 | struct dma_fence *prev; |
| 236 | |
| 237 | if (!entity->rq) |
| 238 | return; |
| 239 | |
| 240 | spin_lock(lock: &entity->lock); |
| 241 | entity->stopped = true; |
| 242 | drm_sched_rq_remove_entity(rq: entity->rq, entity); |
| 243 | spin_unlock(lock: &entity->lock); |
| 244 | |
| 245 | /* Make sure this entity is not used by the scheduler at the moment */ |
| 246 | wait_for_completion(&entity->entity_idle); |
| 247 | |
| 248 | /* The entity is guaranteed to not be used by the scheduler */ |
| 249 | prev = rcu_dereference_check(entity->last_scheduled, true); |
| 250 | dma_fence_get(fence: prev); |
| 251 | while ((job = drm_sched_entity_queue_pop(entity))) { |
| 252 | struct drm_sched_fence *s_fence = job->s_fence; |
| 253 | |
| 254 | dma_fence_get(fence: &s_fence->finished); |
| 255 | if (!prev || |
| 256 | dma_fence_add_callback(fence: prev, cb: &job->finish_cb, |
| 257 | func: drm_sched_entity_kill_jobs_cb)) { |
| 258 | /* |
| 259 | * Adding callback above failed. |
| 260 | * dma_fence_put() checks for NULL. |
| 261 | */ |
| 262 | dma_fence_put(fence: prev); |
| 263 | drm_sched_entity_kill_jobs_cb(NULL, cb: &job->finish_cb); |
| 264 | } |
| 265 | |
| 266 | prev = &s_fence->finished; |
| 267 | } |
| 268 | dma_fence_put(fence: prev); |
| 269 | } |
| 270 | |
| 271 | /** |
| 272 | * drm_sched_entity_flush - Flush a context entity |
| 273 | * |
| 274 | * @entity: scheduler entity |
| 275 | * @timeout: time to wait in for Q to become empty in jiffies. |
| 276 | * |
| 277 | * Splitting drm_sched_entity_fini() into two functions, The first one does the |
| 278 | * waiting, removes the entity from the runqueue and returns an error when the |
| 279 | * process was killed. |
| 280 | * |
| 281 | * Returns the remaining time in jiffies left from the input timeout |
| 282 | */ |
| 283 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) |
| 284 | { |
| 285 | struct drm_gpu_scheduler *sched; |
| 286 | struct task_struct *last_user; |
| 287 | long ret = timeout; |
| 288 | |
| 289 | if (!entity->rq) |
| 290 | return 0; |
| 291 | |
| 292 | sched = entity->rq->sched; |
| 293 | /* |
| 294 | * The client will not queue more jobs during this fini - consume |
| 295 | * existing queued ones, or discard them on SIGKILL. |
| 296 | */ |
| 297 | if (current->flags & PF_EXITING) { |
| 298 | if (timeout) |
| 299 | ret = wait_event_timeout( |
| 300 | sched->job_scheduled, |
| 301 | drm_sched_entity_is_idle(entity), |
| 302 | timeout); |
| 303 | } else { |
| 304 | wait_event_killable(sched->job_scheduled, |
| 305 | drm_sched_entity_is_idle(entity)); |
| 306 | } |
| 307 | |
| 308 | /* For a killed process disallow further enqueueing of jobs. */ |
| 309 | last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); |
| 310 | if (last_user == current->group_leader && |
| 311 | (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) |
| 312 | drm_sched_entity_kill(entity); |
| 313 | |
| 314 | return ret; |
| 315 | } |
| 316 | EXPORT_SYMBOL(drm_sched_entity_flush); |
| 317 | |
| 318 | /** |
| 319 | * drm_sched_entity_fini - Destroy a context entity |
| 320 | * |
| 321 | * @entity: scheduler entity |
| 322 | * |
| 323 | * Cleanups up @entity which has been initialized by drm_sched_entity_init(). |
| 324 | * |
| 325 | * If there are potentially job still in flight or getting newly queued |
| 326 | * drm_sched_entity_flush() must be called first. This function then goes over |
| 327 | * the entity and signals all jobs with an error code if the process was killed. |
| 328 | */ |
| 329 | void drm_sched_entity_fini(struct drm_sched_entity *entity) |
| 330 | { |
| 331 | /* |
| 332 | * If consumption of existing jobs wasn't completed forcefully remove |
| 333 | * them. Also makes sure that the scheduler won't touch this entity any |
| 334 | * more. |
| 335 | */ |
| 336 | drm_sched_entity_kill(entity); |
| 337 | |
| 338 | if (entity->dependency) { |
| 339 | dma_fence_remove_callback(fence: entity->dependency, cb: &entity->cb); |
| 340 | dma_fence_put(fence: entity->dependency); |
| 341 | entity->dependency = NULL; |
| 342 | } |
| 343 | |
| 344 | dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); |
| 345 | RCU_INIT_POINTER(entity->last_scheduled, NULL); |
| 346 | } |
| 347 | EXPORT_SYMBOL(drm_sched_entity_fini); |
| 348 | |
| 349 | /** |
| 350 | * drm_sched_entity_destroy - Destroy a context entity |
| 351 | * @entity: scheduler entity |
| 352 | * |
| 353 | * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a |
| 354 | * convenience wrapper. |
| 355 | */ |
| 356 | void drm_sched_entity_destroy(struct drm_sched_entity *entity) |
| 357 | { |
| 358 | drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); |
| 359 | drm_sched_entity_fini(entity); |
| 360 | } |
| 361 | EXPORT_SYMBOL(drm_sched_entity_destroy); |
| 362 | |
| 363 | /* |
| 364 | * drm_sched_entity_wakeup - callback to clear the entity's dependency and |
| 365 | * wake up the scheduler |
| 366 | */ |
| 367 | static void drm_sched_entity_wakeup(struct dma_fence *f, |
| 368 | struct dma_fence_cb *cb) |
| 369 | { |
| 370 | struct drm_sched_entity *entity = |
| 371 | container_of(cb, struct drm_sched_entity, cb); |
| 372 | |
| 373 | entity->dependency = NULL; |
| 374 | dma_fence_put(fence: f); |
| 375 | drm_sched_wakeup(sched: entity->rq->sched); |
| 376 | } |
| 377 | |
| 378 | /** |
| 379 | * drm_sched_entity_set_priority - Sets priority of the entity |
| 380 | * |
| 381 | * @entity: scheduler entity |
| 382 | * @priority: scheduler priority |
| 383 | * |
| 384 | * Update the priority of runqueues used for the entity. |
| 385 | */ |
| 386 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
| 387 | enum drm_sched_priority priority) |
| 388 | { |
| 389 | spin_lock(lock: &entity->lock); |
| 390 | entity->priority = priority; |
| 391 | spin_unlock(lock: &entity->lock); |
| 392 | } |
| 393 | EXPORT_SYMBOL(drm_sched_entity_set_priority); |
| 394 | |
| 395 | /* |
| 396 | * Add a callback to the current dependency of the entity to wake up the |
| 397 | * scheduler when the entity becomes available. |
| 398 | */ |
| 399 | static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity, |
| 400 | struct drm_sched_job *sched_job) |
| 401 | { |
| 402 | struct drm_gpu_scheduler *sched = entity->rq->sched; |
| 403 | struct dma_fence *fence = entity->dependency; |
| 404 | struct drm_sched_fence *s_fence; |
| 405 | |
| 406 | if (fence->context == entity->fence_context || |
| 407 | fence->context == entity->fence_context + 1) { |
| 408 | /* |
| 409 | * Fence is a scheduled/finished fence from a job |
| 410 | * which belongs to the same entity, we can ignore |
| 411 | * fences from ourself |
| 412 | */ |
| 413 | dma_fence_put(fence: entity->dependency); |
| 414 | return false; |
| 415 | } |
| 416 | |
| 417 | s_fence = to_drm_sched_fence(f: fence); |
| 418 | if (!fence->error && s_fence && s_fence->sched == sched && |
| 419 | !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { |
| 420 | |
| 421 | /* |
| 422 | * Fence is from the same scheduler, only need to wait for |
| 423 | * it to be scheduled |
| 424 | */ |
| 425 | fence = dma_fence_get(fence: &s_fence->scheduled); |
| 426 | dma_fence_put(fence: entity->dependency); |
| 427 | entity->dependency = fence; |
| 428 | } |
| 429 | |
| 430 | if (trace_drm_sched_job_unschedulable_enabled() && |
| 431 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags)) |
| 432 | trace_drm_sched_job_unschedulable(sched_job, fence: entity->dependency); |
| 433 | |
| 434 | if (!dma_fence_add_callback(fence: entity->dependency, cb: &entity->cb, |
| 435 | func: drm_sched_entity_wakeup)) |
| 436 | return true; |
| 437 | |
| 438 | dma_fence_put(fence: entity->dependency); |
| 439 | return false; |
| 440 | } |
| 441 | |
| 442 | static struct dma_fence * |
| 443 | drm_sched_job_dependency(struct drm_sched_job *job, |
| 444 | struct drm_sched_entity *entity) |
| 445 | { |
| 446 | struct dma_fence *f; |
| 447 | |
| 448 | /* We keep the fence around, so we can iterate over all dependencies |
| 449 | * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled |
| 450 | * before killing the job. |
| 451 | */ |
| 452 | f = xa_load(&job->dependencies, index: job->last_dependency); |
| 453 | if (f) { |
| 454 | job->last_dependency++; |
| 455 | return dma_fence_get(fence: f); |
| 456 | } |
| 457 | |
| 458 | if (job->sched->ops->prepare_job) |
| 459 | return job->sched->ops->prepare_job(job, entity); |
| 460 | |
| 461 | return NULL; |
| 462 | } |
| 463 | |
| 464 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) |
| 465 | { |
| 466 | struct drm_sched_job *sched_job; |
| 467 | |
| 468 | sched_job = drm_sched_entity_queue_peek(entity); |
| 469 | if (!sched_job) |
| 470 | return NULL; |
| 471 | |
| 472 | while ((entity->dependency = |
| 473 | drm_sched_job_dependency(job: sched_job, entity))) { |
| 474 | if (drm_sched_entity_add_dependency_cb(entity, sched_job)) |
| 475 | return NULL; |
| 476 | } |
| 477 | |
| 478 | /* skip jobs from entity that marked guilty */ |
| 479 | if (entity->guilty && atomic_read(v: entity->guilty)) |
| 480 | dma_fence_set_error(fence: &sched_job->s_fence->finished, error: -ECANCELED); |
| 481 | |
| 482 | dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); |
| 483 | rcu_assign_pointer(entity->last_scheduled, |
| 484 | dma_fence_get(&sched_job->s_fence->finished)); |
| 485 | |
| 486 | /* |
| 487 | * If the queue is empty we allow drm_sched_entity_select_rq() to |
| 488 | * locklessly access ->last_scheduled. This only works if we set the |
| 489 | * pointer before we dequeue and if we a write barrier here. |
| 490 | */ |
| 491 | smp_wmb(); |
| 492 | |
| 493 | spsc_queue_pop(queue: &entity->job_queue); |
| 494 | |
| 495 | /* |
| 496 | * Update the entity's location in the min heap according to |
| 497 | * the timestamp of the next job, if any. |
| 498 | */ |
| 499 | if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { |
| 500 | struct drm_sched_job *next; |
| 501 | |
| 502 | next = drm_sched_entity_queue_peek(entity); |
| 503 | if (next) { |
| 504 | struct drm_sched_rq *rq; |
| 505 | |
| 506 | spin_lock(lock: &entity->lock); |
| 507 | rq = entity->rq; |
| 508 | spin_lock(lock: &rq->lock); |
| 509 | drm_sched_rq_update_fifo_locked(entity, rq, |
| 510 | ts: next->submit_ts); |
| 511 | spin_unlock(lock: &rq->lock); |
| 512 | spin_unlock(lock: &entity->lock); |
| 513 | } |
| 514 | } |
| 515 | |
| 516 | /* Jobs and entities might have different lifecycles. Since we're |
| 517 | * removing the job from the entities queue, set the jobs entity pointer |
| 518 | * to NULL to prevent any future access of the entity through this job. |
| 519 | */ |
| 520 | sched_job->entity = NULL; |
| 521 | |
| 522 | return sched_job; |
| 523 | } |
| 524 | |
| 525 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity) |
| 526 | { |
| 527 | struct dma_fence *fence; |
| 528 | struct drm_gpu_scheduler *sched; |
| 529 | struct drm_sched_rq *rq; |
| 530 | |
| 531 | /* single possible engine and already selected */ |
| 532 | if (!entity->sched_list) |
| 533 | return; |
| 534 | |
| 535 | /* queue non-empty, stay on the same engine */ |
| 536 | if (spsc_queue_count(queue: &entity->job_queue)) |
| 537 | return; |
| 538 | |
| 539 | /* |
| 540 | * Only when the queue is empty are we guaranteed that |
| 541 | * drm_sched_run_job_work() cannot change entity->last_scheduled. To |
| 542 | * enforce ordering we need a read barrier here. See |
| 543 | * drm_sched_entity_pop_job() for the other side. |
| 544 | */ |
| 545 | smp_rmb(); |
| 546 | |
| 547 | fence = rcu_dereference_check(entity->last_scheduled, true); |
| 548 | |
| 549 | /* stay on the same engine if the previous job hasn't finished */ |
| 550 | if (fence && !dma_fence_is_signaled(fence)) |
| 551 | return; |
| 552 | |
| 553 | spin_lock(lock: &entity->lock); |
| 554 | sched = drm_sched_pick_best(sched_list: entity->sched_list, num_sched_list: entity->num_sched_list); |
| 555 | rq = sched ? sched->sched_rq[entity->priority] : NULL; |
| 556 | if (rq != entity->rq) { |
| 557 | drm_sched_rq_remove_entity(rq: entity->rq, entity); |
| 558 | entity->rq = rq; |
| 559 | } |
| 560 | |
| 561 | if (entity->num_sched_list == 1) |
| 562 | entity->sched_list = NULL; |
| 563 | |
| 564 | spin_unlock(lock: &entity->lock); |
| 565 | } |
| 566 | |
| 567 | /** |
| 568 | * drm_sched_entity_push_job - Submit a job to the entity's job queue |
| 569 | * @sched_job: job to submit |
| 570 | * |
| 571 | * Note: To guarantee that the order of insertion to queue matches the job's |
| 572 | * fence sequence number this function should be called with drm_sched_job_arm() |
| 573 | * under common lock for the struct drm_sched_entity that was set up for |
| 574 | * @sched_job in drm_sched_job_init(). |
| 575 | */ |
| 576 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job) |
| 577 | { |
| 578 | struct drm_sched_entity *entity = sched_job->entity; |
| 579 | bool first; |
| 580 | ktime_t submit_ts; |
| 581 | |
| 582 | trace_drm_sched_job_queue(sched_job, entity); |
| 583 | |
| 584 | if (trace_drm_sched_job_add_dep_enabled()) { |
| 585 | struct dma_fence *entry; |
| 586 | unsigned long index; |
| 587 | |
| 588 | xa_for_each(&sched_job->dependencies, index, entry) |
| 589 | trace_drm_sched_job_add_dep(sched_job, fence: entry); |
| 590 | } |
| 591 | atomic_inc(v: entity->rq->sched->score); |
| 592 | WRITE_ONCE(entity->last_user, current->group_leader); |
| 593 | |
| 594 | /* |
| 595 | * After the sched_job is pushed into the entity queue, it may be |
| 596 | * completed and freed up at any time. We can no longer access it. |
| 597 | * Make sure to set the submit_ts first, to avoid a race. |
| 598 | */ |
| 599 | sched_job->submit_ts = submit_ts = ktime_get(); |
| 600 | first = spsc_queue_push(queue: &entity->job_queue, node: &sched_job->queue_node); |
| 601 | |
| 602 | /* first job wakes up scheduler */ |
| 603 | if (first) { |
| 604 | struct drm_gpu_scheduler *sched; |
| 605 | struct drm_sched_rq *rq; |
| 606 | |
| 607 | /* Add the entity to the run queue */ |
| 608 | spin_lock(lock: &entity->lock); |
| 609 | if (entity->stopped) { |
| 610 | spin_unlock(lock: &entity->lock); |
| 611 | |
| 612 | DRM_ERROR("Trying to push to a killed entity\n" ); |
| 613 | return; |
| 614 | } |
| 615 | |
| 616 | rq = entity->rq; |
| 617 | sched = rq->sched; |
| 618 | |
| 619 | spin_lock(lock: &rq->lock); |
| 620 | drm_sched_rq_add_entity(rq, entity); |
| 621 | |
| 622 | if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) |
| 623 | drm_sched_rq_update_fifo_locked(entity, rq, ts: submit_ts); |
| 624 | |
| 625 | spin_unlock(lock: &rq->lock); |
| 626 | spin_unlock(lock: &entity->lock); |
| 627 | |
| 628 | drm_sched_wakeup(sched); |
| 629 | } |
| 630 | } |
| 631 | EXPORT_SYMBOL(drm_sched_entity_push_job); |
| 632 | |