| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
| 2 | /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ |
| 3 | |
| 4 | #include <linux/hardirq.h> |
| 5 | #include <linux/iosys-map.h> |
| 6 | #include <linux/kthread.h> |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/vmalloc.h> |
| 9 | #include <linux/pm_runtime.h> |
| 10 | |
| 11 | #include <drm/drm_print.h> |
| 12 | |
| 13 | #include "lima_devfreq.h" |
| 14 | #include "lima_drv.h" |
| 15 | #include "lima_sched.h" |
| 16 | #include "lima_vm.h" |
| 17 | #include "lima_mmu.h" |
| 18 | #include "lima_l2_cache.h" |
| 19 | #include "lima_gem.h" |
| 20 | #include "lima_trace.h" |
| 21 | |
| 22 | struct lima_fence { |
| 23 | struct dma_fence base; |
| 24 | struct lima_sched_pipe *pipe; |
| 25 | }; |
| 26 | |
| 27 | static struct kmem_cache *lima_fence_slab; |
| 28 | static int lima_fence_slab_refcnt; |
| 29 | |
| 30 | int lima_sched_slab_init(void) |
| 31 | { |
| 32 | if (!lima_fence_slab) { |
| 33 | lima_fence_slab = kmem_cache_create( |
| 34 | "lima_fence" , sizeof(struct lima_fence), 0, |
| 35 | SLAB_HWCACHE_ALIGN, NULL); |
| 36 | if (!lima_fence_slab) |
| 37 | return -ENOMEM; |
| 38 | } |
| 39 | |
| 40 | lima_fence_slab_refcnt++; |
| 41 | return 0; |
| 42 | } |
| 43 | |
| 44 | void lima_sched_slab_fini(void) |
| 45 | { |
| 46 | if (!--lima_fence_slab_refcnt) { |
| 47 | kmem_cache_destroy(s: lima_fence_slab); |
| 48 | lima_fence_slab = NULL; |
| 49 | } |
| 50 | } |
| 51 | |
| 52 | static inline struct lima_fence *to_lima_fence(struct dma_fence *fence) |
| 53 | { |
| 54 | return container_of(fence, struct lima_fence, base); |
| 55 | } |
| 56 | |
| 57 | static const char *lima_fence_get_driver_name(struct dma_fence *fence) |
| 58 | { |
| 59 | return "lima" ; |
| 60 | } |
| 61 | |
| 62 | static const char *lima_fence_get_timeline_name(struct dma_fence *fence) |
| 63 | { |
| 64 | struct lima_fence *f = to_lima_fence(fence); |
| 65 | |
| 66 | return f->pipe->base.name; |
| 67 | } |
| 68 | |
| 69 | static void lima_fence_release_rcu(struct rcu_head *rcu) |
| 70 | { |
| 71 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
| 72 | struct lima_fence *fence = to_lima_fence(fence: f); |
| 73 | |
| 74 | kmem_cache_free(s: lima_fence_slab, objp: fence); |
| 75 | } |
| 76 | |
| 77 | static void lima_fence_release(struct dma_fence *fence) |
| 78 | { |
| 79 | struct lima_fence *f = to_lima_fence(fence); |
| 80 | |
| 81 | call_rcu(head: &f->base.rcu, func: lima_fence_release_rcu); |
| 82 | } |
| 83 | |
| 84 | static const struct dma_fence_ops lima_fence_ops = { |
| 85 | .get_driver_name = lima_fence_get_driver_name, |
| 86 | .get_timeline_name = lima_fence_get_timeline_name, |
| 87 | .release = lima_fence_release, |
| 88 | }; |
| 89 | |
| 90 | static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe) |
| 91 | { |
| 92 | struct lima_fence *fence; |
| 93 | |
| 94 | fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL); |
| 95 | if (!fence) |
| 96 | return NULL; |
| 97 | |
| 98 | fence->pipe = pipe; |
| 99 | dma_fence_init(fence: &fence->base, ops: &lima_fence_ops, lock: &pipe->fence_lock, |
| 100 | context: pipe->fence_context, seqno: ++pipe->fence_seqno); |
| 101 | |
| 102 | return fence; |
| 103 | } |
| 104 | |
| 105 | static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job) |
| 106 | { |
| 107 | return container_of(job, struct lima_sched_task, base); |
| 108 | } |
| 109 | |
| 110 | static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched) |
| 111 | { |
| 112 | return container_of(sched, struct lima_sched_pipe, base); |
| 113 | } |
| 114 | |
| 115 | int lima_sched_task_init(struct lima_sched_task *task, |
| 116 | struct lima_sched_context *context, |
| 117 | struct lima_bo **bos, int num_bos, |
| 118 | struct lima_vm *vm, |
| 119 | u64 drm_client_id) |
| 120 | { |
| 121 | int err, i; |
| 122 | |
| 123 | task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL); |
| 124 | if (!task->bos) |
| 125 | return -ENOMEM; |
| 126 | |
| 127 | for (i = 0; i < num_bos; i++) |
| 128 | drm_gem_object_get(obj: &bos[i]->base.base); |
| 129 | |
| 130 | err = drm_sched_job_init(job: &task->base, entity: &context->base, credits: 1, owner: vm, |
| 131 | drm_client_id); |
| 132 | if (err) { |
| 133 | kfree(objp: task->bos); |
| 134 | return err; |
| 135 | } |
| 136 | |
| 137 | drm_sched_job_arm(job: &task->base); |
| 138 | |
| 139 | task->num_bos = num_bos; |
| 140 | task->vm = lima_vm_get(vm); |
| 141 | |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | void lima_sched_task_fini(struct lima_sched_task *task) |
| 146 | { |
| 147 | int i; |
| 148 | |
| 149 | drm_sched_job_cleanup(job: &task->base); |
| 150 | |
| 151 | if (task->bos) { |
| 152 | for (i = 0; i < task->num_bos; i++) |
| 153 | drm_gem_object_put(obj: &task->bos[i]->base.base); |
| 154 | kfree(objp: task->bos); |
| 155 | } |
| 156 | |
| 157 | lima_vm_put(vm: task->vm); |
| 158 | } |
| 159 | |
| 160 | int lima_sched_context_init(struct lima_sched_pipe *pipe, |
| 161 | struct lima_sched_context *context) |
| 162 | { |
| 163 | struct drm_gpu_scheduler *sched = &pipe->base; |
| 164 | |
| 165 | return drm_sched_entity_init(entity: &context->base, priority: DRM_SCHED_PRIORITY_NORMAL, |
| 166 | sched_list: &sched, num_sched_list: 1, NULL); |
| 167 | } |
| 168 | |
| 169 | void lima_sched_context_fini(struct lima_sched_pipe *pipe, |
| 170 | struct lima_sched_context *context) |
| 171 | { |
| 172 | drm_sched_entity_destroy(entity: &context->base); |
| 173 | } |
| 174 | |
| 175 | struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task) |
| 176 | { |
| 177 | struct dma_fence *fence = dma_fence_get(fence: &task->base.s_fence->finished); |
| 178 | |
| 179 | trace_lima_task_submit(task); |
| 180 | drm_sched_entity_push_job(sched_job: &task->base); |
| 181 | return fence; |
| 182 | } |
| 183 | |
| 184 | static int lima_pm_busy(struct lima_device *ldev) |
| 185 | { |
| 186 | int ret; |
| 187 | |
| 188 | /* resume GPU if it has been suspended by runtime PM */ |
| 189 | ret = pm_runtime_resume_and_get(dev: ldev->dev); |
| 190 | if (ret < 0) |
| 191 | return ret; |
| 192 | |
| 193 | lima_devfreq_record_busy(devfreq: &ldev->devfreq); |
| 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | static void lima_pm_idle(struct lima_device *ldev) |
| 198 | { |
| 199 | lima_devfreq_record_idle(devfreq: &ldev->devfreq); |
| 200 | |
| 201 | /* GPU can do auto runtime suspend */ |
| 202 | pm_runtime_mark_last_busy(dev: ldev->dev); |
| 203 | pm_runtime_put_autosuspend(dev: ldev->dev); |
| 204 | } |
| 205 | |
| 206 | static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) |
| 207 | { |
| 208 | struct lima_sched_task *task = to_lima_task(job); |
| 209 | struct lima_sched_pipe *pipe = to_lima_pipe(sched: job->sched); |
| 210 | struct lima_device *ldev = pipe->ldev; |
| 211 | struct lima_fence *fence; |
| 212 | int i, err; |
| 213 | |
| 214 | /* after GPU reset */ |
| 215 | if (job->s_fence->finished.error < 0) |
| 216 | return NULL; |
| 217 | |
| 218 | fence = lima_fence_create(pipe); |
| 219 | if (!fence) |
| 220 | return NULL; |
| 221 | |
| 222 | err = lima_pm_busy(ldev); |
| 223 | if (err < 0) { |
| 224 | dma_fence_put(fence: &fence->base); |
| 225 | return NULL; |
| 226 | } |
| 227 | |
| 228 | task->fence = &fence->base; |
| 229 | |
| 230 | /* for caller usage of the fence, otherwise irq handler |
| 231 | * may consume the fence before caller use it |
| 232 | */ |
| 233 | dma_fence_get(fence: task->fence); |
| 234 | |
| 235 | pipe->current_task = task; |
| 236 | |
| 237 | /* this is needed for MMU to work correctly, otherwise GP/PP |
| 238 | * will hang or page fault for unknown reason after running for |
| 239 | * a while. |
| 240 | * |
| 241 | * Need to investigate: |
| 242 | * 1. is it related to TLB |
| 243 | * 2. how much performance will be affected by L2 cache flush |
| 244 | * 3. can we reduce the calling of this function because all |
| 245 | * GP/PP use the same L2 cache on mali400 |
| 246 | * |
| 247 | * TODO: |
| 248 | * 1. move this to task fini to save some wait time? |
| 249 | * 2. when GP/PP use different l2 cache, need PP wait GP l2 |
| 250 | * cache flush? |
| 251 | */ |
| 252 | for (i = 0; i < pipe->num_l2_cache; i++) |
| 253 | lima_l2_cache_flush(ip: pipe->l2_cache[i]); |
| 254 | |
| 255 | lima_vm_put(vm: pipe->current_vm); |
| 256 | pipe->current_vm = lima_vm_get(vm: task->vm); |
| 257 | |
| 258 | if (pipe->bcast_mmu) |
| 259 | lima_mmu_switch_vm(ip: pipe->bcast_mmu, vm: pipe->current_vm); |
| 260 | else { |
| 261 | for (i = 0; i < pipe->num_mmu; i++) |
| 262 | lima_mmu_switch_vm(ip: pipe->mmu[i], vm: pipe->current_vm); |
| 263 | } |
| 264 | |
| 265 | trace_lima_task_run(task); |
| 266 | |
| 267 | pipe->error = false; |
| 268 | pipe->task_run(pipe, task); |
| 269 | |
| 270 | return task->fence; |
| 271 | } |
| 272 | |
| 273 | static void lima_sched_build_error_task_list(struct lima_sched_task *task) |
| 274 | { |
| 275 | struct lima_sched_error_task *et; |
| 276 | struct lima_sched_pipe *pipe = to_lima_pipe(sched: task->base.sched); |
| 277 | struct lima_ip *ip = pipe->processor[0]; |
| 278 | int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp; |
| 279 | struct lima_device *dev = ip->dev; |
| 280 | struct lima_sched_context *sched_ctx = |
| 281 | container_of(task->base.entity, |
| 282 | struct lima_sched_context, base); |
| 283 | struct lima_ctx *ctx = |
| 284 | container_of(sched_ctx, struct lima_ctx, context[pipe_id]); |
| 285 | struct lima_dump_task *dt; |
| 286 | struct lima_dump_chunk *chunk; |
| 287 | struct lima_dump_chunk_pid *pid_chunk; |
| 288 | struct lima_dump_chunk_buffer *buffer_chunk; |
| 289 | u32 size, task_size, mem_size; |
| 290 | int i; |
| 291 | struct iosys_map map; |
| 292 | int ret; |
| 293 | |
| 294 | mutex_lock(&dev->error_task_list_lock); |
| 295 | |
| 296 | if (dev->dump.num_tasks >= lima_max_error_tasks) { |
| 297 | dev_info(dev->dev, "fail to save task state from %s pid %d: " |
| 298 | "error task list is full\n" , ctx->pname, ctx->pid); |
| 299 | goto out; |
| 300 | } |
| 301 | |
| 302 | /* frame chunk */ |
| 303 | size = sizeof(struct lima_dump_chunk) + pipe->frame_size; |
| 304 | /* process name chunk */ |
| 305 | size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname); |
| 306 | /* pid chunk */ |
| 307 | size += sizeof(struct lima_dump_chunk); |
| 308 | /* buffer chunks */ |
| 309 | for (i = 0; i < task->num_bos; i++) { |
| 310 | struct lima_bo *bo = task->bos[i]; |
| 311 | |
| 312 | size += sizeof(struct lima_dump_chunk); |
| 313 | size += bo->heap_size ? bo->heap_size : lima_bo_size(bo); |
| 314 | } |
| 315 | |
| 316 | task_size = size + sizeof(struct lima_dump_task); |
| 317 | mem_size = task_size + sizeof(*et); |
| 318 | et = kvmalloc(mem_size, GFP_KERNEL); |
| 319 | if (!et) { |
| 320 | dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n" , |
| 321 | mem_size); |
| 322 | goto out; |
| 323 | } |
| 324 | |
| 325 | et->data = et + 1; |
| 326 | et->size = task_size; |
| 327 | |
| 328 | dt = et->data; |
| 329 | memset(dt, 0, sizeof(*dt)); |
| 330 | dt->id = pipe_id; |
| 331 | dt->size = size; |
| 332 | |
| 333 | chunk = (struct lima_dump_chunk *)(dt + 1); |
| 334 | memset(chunk, 0, sizeof(*chunk)); |
| 335 | chunk->id = LIMA_DUMP_CHUNK_FRAME; |
| 336 | chunk->size = pipe->frame_size; |
| 337 | memcpy(chunk + 1, task->frame, pipe->frame_size); |
| 338 | dt->num_chunks++; |
| 339 | |
| 340 | chunk = (void *)(chunk + 1) + chunk->size; |
| 341 | memset(chunk, 0, sizeof(*chunk)); |
| 342 | chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME; |
| 343 | chunk->size = sizeof(ctx->pname); |
| 344 | memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname)); |
| 345 | dt->num_chunks++; |
| 346 | |
| 347 | pid_chunk = (void *)(chunk + 1) + chunk->size; |
| 348 | memset(pid_chunk, 0, sizeof(*pid_chunk)); |
| 349 | pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID; |
| 350 | pid_chunk->pid = ctx->pid; |
| 351 | dt->num_chunks++; |
| 352 | |
| 353 | buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size; |
| 354 | for (i = 0; i < task->num_bos; i++) { |
| 355 | struct lima_bo *bo = task->bos[i]; |
| 356 | void *data; |
| 357 | |
| 358 | memset(buffer_chunk, 0, sizeof(*buffer_chunk)); |
| 359 | buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER; |
| 360 | buffer_chunk->va = lima_vm_get_va(vm: task->vm, bo); |
| 361 | |
| 362 | if (bo->heap_size) { |
| 363 | buffer_chunk->size = bo->heap_size; |
| 364 | |
| 365 | data = vmap(pages: bo->base.pages, count: bo->heap_size >> PAGE_SHIFT, |
| 366 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 367 | if (!data) { |
| 368 | kvfree(addr: et); |
| 369 | goto out; |
| 370 | } |
| 371 | |
| 372 | memcpy(buffer_chunk + 1, data, buffer_chunk->size); |
| 373 | |
| 374 | vunmap(addr: data); |
| 375 | } else { |
| 376 | buffer_chunk->size = lima_bo_size(bo); |
| 377 | |
| 378 | ret = drm_gem_vmap(obj: &bo->base.base, map: &map); |
| 379 | if (ret) { |
| 380 | kvfree(addr: et); |
| 381 | goto out; |
| 382 | } |
| 383 | |
| 384 | memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); |
| 385 | |
| 386 | drm_gem_vunmap(obj: &bo->base.base, map: &map); |
| 387 | } |
| 388 | |
| 389 | buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; |
| 390 | dt->num_chunks++; |
| 391 | } |
| 392 | |
| 393 | list_add(new: &et->list, head: &dev->error_task_list); |
| 394 | dev->dump.size += et->size; |
| 395 | dev->dump.num_tasks++; |
| 396 | |
| 397 | dev_info(dev->dev, "save error task state success\n" ); |
| 398 | |
| 399 | out: |
| 400 | mutex_unlock(lock: &dev->error_task_list_lock); |
| 401 | } |
| 402 | |
| 403 | static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job) |
| 404 | { |
| 405 | struct lima_sched_pipe *pipe = to_lima_pipe(sched: job->sched); |
| 406 | struct lima_sched_task *task = to_lima_task(job); |
| 407 | struct lima_device *ldev = pipe->ldev; |
| 408 | struct lima_ip *ip = pipe->processor[0]; |
| 409 | int i; |
| 410 | |
| 411 | /* |
| 412 | * If the GPU managed to complete this jobs fence, the timeout is |
| 413 | * spurious. Bail out. |
| 414 | */ |
| 415 | if (dma_fence_is_signaled(fence: task->fence)) { |
| 416 | DRM_WARN("%s spurious timeout\n" , lima_ip_name(ip)); |
| 417 | return DRM_GPU_SCHED_STAT_RESET; |
| 418 | } |
| 419 | |
| 420 | /* |
| 421 | * Lima IRQ handler may take a long time to process an interrupt |
| 422 | * if there is another IRQ handler hogging the processing. |
| 423 | * In order to catch such cases and not report spurious Lima job |
| 424 | * timeouts, synchronize the IRQ handler and re-check the fence |
| 425 | * status. |
| 426 | */ |
| 427 | for (i = 0; i < pipe->num_processor; i++) |
| 428 | synchronize_irq(irq: pipe->processor[i]->irq); |
| 429 | if (pipe->bcast_processor) |
| 430 | synchronize_irq(irq: pipe->bcast_processor->irq); |
| 431 | |
| 432 | if (dma_fence_is_signaled(fence: task->fence)) { |
| 433 | DRM_WARN("%s unexpectedly high interrupt latency\n" , lima_ip_name(ip)); |
| 434 | return DRM_GPU_SCHED_STAT_RESET; |
| 435 | } |
| 436 | |
| 437 | /* |
| 438 | * The task might still finish while this timeout handler runs. |
| 439 | * To prevent a race condition on its completion, mask all irqs |
| 440 | * on the running core until the next hard reset completes. |
| 441 | */ |
| 442 | pipe->task_mask_irq(pipe); |
| 443 | |
| 444 | if (!pipe->error) |
| 445 | DRM_ERROR("%s job timeout\n" , lima_ip_name(ip)); |
| 446 | |
| 447 | drm_sched_stop(sched: &pipe->base, bad: &task->base); |
| 448 | |
| 449 | drm_sched_increase_karma(bad: &task->base); |
| 450 | |
| 451 | if (lima_max_error_tasks) |
| 452 | lima_sched_build_error_task_list(task); |
| 453 | |
| 454 | pipe->task_error(pipe); |
| 455 | |
| 456 | if (pipe->bcast_mmu) |
| 457 | lima_mmu_page_fault_resume(ip: pipe->bcast_mmu); |
| 458 | else { |
| 459 | for (i = 0; i < pipe->num_mmu; i++) |
| 460 | lima_mmu_page_fault_resume(ip: pipe->mmu[i]); |
| 461 | } |
| 462 | |
| 463 | lima_vm_put(vm: pipe->current_vm); |
| 464 | pipe->current_vm = NULL; |
| 465 | pipe->current_task = NULL; |
| 466 | |
| 467 | lima_pm_idle(ldev); |
| 468 | |
| 469 | drm_sched_resubmit_jobs(sched: &pipe->base); |
| 470 | drm_sched_start(sched: &pipe->base, errno: 0); |
| 471 | |
| 472 | return DRM_GPU_SCHED_STAT_RESET; |
| 473 | } |
| 474 | |
| 475 | static void lima_sched_free_job(struct drm_sched_job *job) |
| 476 | { |
| 477 | struct lima_sched_task *task = to_lima_task(job); |
| 478 | struct lima_sched_pipe *pipe = to_lima_pipe(sched: job->sched); |
| 479 | struct lima_vm *vm = task->vm; |
| 480 | struct lima_bo **bos = task->bos; |
| 481 | int i; |
| 482 | |
| 483 | dma_fence_put(fence: task->fence); |
| 484 | |
| 485 | for (i = 0; i < task->num_bos; i++) |
| 486 | lima_vm_bo_del(vm, bo: bos[i]); |
| 487 | |
| 488 | lima_sched_task_fini(task); |
| 489 | kmem_cache_free(s: pipe->task_slab, objp: task); |
| 490 | } |
| 491 | |
| 492 | static const struct drm_sched_backend_ops lima_sched_ops = { |
| 493 | .run_job = lima_sched_run_job, |
| 494 | .timedout_job = lima_sched_timedout_job, |
| 495 | .free_job = lima_sched_free_job, |
| 496 | }; |
| 497 | |
| 498 | static void lima_sched_recover_work(struct work_struct *work) |
| 499 | { |
| 500 | struct lima_sched_pipe *pipe = |
| 501 | container_of(work, struct lima_sched_pipe, recover_work); |
| 502 | int i; |
| 503 | |
| 504 | for (i = 0; i < pipe->num_l2_cache; i++) |
| 505 | lima_l2_cache_flush(ip: pipe->l2_cache[i]); |
| 506 | |
| 507 | if (pipe->bcast_mmu) { |
| 508 | lima_mmu_flush_tlb(ip: pipe->bcast_mmu); |
| 509 | } else { |
| 510 | for (i = 0; i < pipe->num_mmu; i++) |
| 511 | lima_mmu_flush_tlb(ip: pipe->mmu[i]); |
| 512 | } |
| 513 | |
| 514 | if (pipe->task_recover(pipe)) |
| 515 | drm_sched_fault(sched: &pipe->base); |
| 516 | } |
| 517 | |
| 518 | int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) |
| 519 | { |
| 520 | unsigned int timeout = lima_sched_timeout_ms > 0 ? |
| 521 | lima_sched_timeout_ms : 10000; |
| 522 | const struct drm_sched_init_args args = { |
| 523 | .ops = &lima_sched_ops, |
| 524 | .num_rqs = DRM_SCHED_PRIORITY_COUNT, |
| 525 | .credit_limit = 1, |
| 526 | .hang_limit = lima_job_hang_limit, |
| 527 | .timeout = msecs_to_jiffies(m: timeout), |
| 528 | .name = name, |
| 529 | .dev = pipe->ldev->dev, |
| 530 | }; |
| 531 | |
| 532 | pipe->fence_context = dma_fence_context_alloc(num: 1); |
| 533 | spin_lock_init(&pipe->fence_lock); |
| 534 | |
| 535 | INIT_WORK(&pipe->recover_work, lima_sched_recover_work); |
| 536 | |
| 537 | return drm_sched_init(sched: &pipe->base, args: &args); |
| 538 | } |
| 539 | |
| 540 | void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) |
| 541 | { |
| 542 | drm_sched_fini(sched: &pipe->base); |
| 543 | } |
| 544 | |
| 545 | void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe) |
| 546 | { |
| 547 | struct lima_sched_task *task = pipe->current_task; |
| 548 | struct lima_device *ldev = pipe->ldev; |
| 549 | |
| 550 | if (pipe->error) { |
| 551 | if (task && task->recoverable) |
| 552 | schedule_work(work: &pipe->recover_work); |
| 553 | else |
| 554 | drm_sched_fault(sched: &pipe->base); |
| 555 | } else { |
| 556 | pipe->task_fini(pipe); |
| 557 | dma_fence_signal(fence: task->fence); |
| 558 | |
| 559 | lima_pm_idle(ldev); |
| 560 | } |
| 561 | } |
| 562 | |