| 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | * Christian König |
| 28 | */ |
| 29 | #include <linux/seq_file.h> |
| 30 | #include <linux/slab.h> |
| 31 | |
| 32 | #include <drm/amdgpu_drm.h> |
| 33 | |
| 34 | #include "amdgpu.h" |
| 35 | #include "atom.h" |
| 36 | #include "amdgpu_trace.h" |
| 37 | |
| 38 | #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) |
| 39 | #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) |
| 40 | |
| 41 | /* |
| 42 | * IB |
| 43 | * IBs (Indirect Buffers) and areas of GPU accessible memory where |
| 44 | * commands are stored. You can put a pointer to the IB in the |
| 45 | * command ring and the hw will fetch the commands from the IB |
| 46 | * and execute them. Generally userspace acceleration drivers |
| 47 | * produce command buffers which are send to the kernel and |
| 48 | * put in IBs for execution by the requested ring. |
| 49 | */ |
| 50 | |
| 51 | /** |
| 52 | * amdgpu_ib_get - request an IB (Indirect Buffer) |
| 53 | * |
| 54 | * @adev: amdgpu_device pointer |
| 55 | * @vm: amdgpu_vm pointer |
| 56 | * @size: requested IB size |
| 57 | * @pool_type: IB pool type (delayed, immediate, direct) |
| 58 | * @ib: IB object returned |
| 59 | * |
| 60 | * Request an IB (all asics). IBs are allocated using the |
| 61 | * suballocator. |
| 62 | * Returns 0 on success, error on failure. |
| 63 | */ |
| 64 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
| 65 | unsigned int size, enum amdgpu_ib_pool_type pool_type, |
| 66 | struct amdgpu_ib *ib) |
| 67 | { |
| 68 | int r; |
| 69 | |
| 70 | if (size) { |
| 71 | r = amdgpu_sa_bo_new(sa_manager: &adev->ib_pools[pool_type], |
| 72 | sa_bo: &ib->sa_bo, size); |
| 73 | if (r) { |
| 74 | dev_err(adev->dev, "failed to get a new IB (%d)\n" , r); |
| 75 | return r; |
| 76 | } |
| 77 | |
| 78 | ib->ptr = amdgpu_sa_bo_cpu_addr(sa_bo: ib->sa_bo); |
| 79 | /* flush the cache before commit the IB */ |
| 80 | ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC; |
| 81 | |
| 82 | if (!vm) |
| 83 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(sa_bo: ib->sa_bo); |
| 84 | } |
| 85 | |
| 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | /** |
| 90 | * amdgpu_ib_free - free an IB (Indirect Buffer) |
| 91 | * |
| 92 | * @ib: IB object to free |
| 93 | * @f: the fence SA bo need wait on for the ib alloation |
| 94 | * |
| 95 | * Free an IB (all asics). |
| 96 | */ |
| 97 | void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f) |
| 98 | { |
| 99 | amdgpu_sa_bo_free(sa_bo: &ib->sa_bo, fence: f); |
| 100 | } |
| 101 | |
| 102 | /** |
| 103 | * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring |
| 104 | * |
| 105 | * @ring: ring index the IB is associated with |
| 106 | * @num_ibs: number of IBs to schedule |
| 107 | * @ibs: IB objects to schedule |
| 108 | * @job: job to schedule |
| 109 | * @f: fence created during this submission |
| 110 | * |
| 111 | * Schedule an IB on the associated ring (all asics). |
| 112 | * Returns 0 on success, error on failure. |
| 113 | * |
| 114 | * On SI, there are two parallel engines fed from the primary ring, |
| 115 | * the CE (Constant Engine) and the DE (Drawing Engine). Since |
| 116 | * resource descriptors have moved to memory, the CE allows you to |
| 117 | * prime the caches while the DE is updating register state so that |
| 118 | * the resource descriptors will be already in cache when the draw is |
| 119 | * processed. To accomplish this, the userspace driver submits two |
| 120 | * IBs, one for the CE and one for the DE. If there is a CE IB (called |
| 121 | * a CONST_IB), it will be put on the ring prior to the DE IB. Prior |
| 122 | * to SI there was just a DE IB. |
| 123 | */ |
| 124 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, |
| 125 | struct amdgpu_ib *ibs, struct amdgpu_job *job, |
| 126 | struct dma_fence **f) |
| 127 | { |
| 128 | struct amdgpu_device *adev = ring->adev; |
| 129 | struct amdgpu_ib *ib = &ibs[0]; |
| 130 | struct dma_fence *tmp = NULL; |
| 131 | struct amdgpu_fence *af; |
| 132 | bool need_ctx_switch; |
| 133 | struct amdgpu_vm *vm; |
| 134 | uint64_t fence_ctx; |
| 135 | uint32_t status = 0, alloc_size; |
| 136 | unsigned int fence_flags = 0; |
| 137 | bool secure, init_shadow; |
| 138 | u64 shadow_va, csa_va, gds_va; |
| 139 | int vmid = AMDGPU_JOB_GET_VMID(job); |
| 140 | bool need_pipe_sync = false; |
| 141 | unsigned int cond_exec; |
| 142 | unsigned int i; |
| 143 | int r = 0; |
| 144 | |
| 145 | if (num_ibs == 0) |
| 146 | return -EINVAL; |
| 147 | |
| 148 | /* ring tests don't use a job */ |
| 149 | if (job) { |
| 150 | vm = job->vm; |
| 151 | fence_ctx = job->base.s_fence ? |
| 152 | job->base.s_fence->finished.context : 0; |
| 153 | shadow_va = job->shadow_va; |
| 154 | csa_va = job->csa_va; |
| 155 | gds_va = job->gds_va; |
| 156 | init_shadow = job->init_shadow; |
| 157 | af = job->hw_fence; |
| 158 | /* Save the context of the job for reset handling. |
| 159 | * The driver needs this so it can skip the ring |
| 160 | * contents for guilty contexts. |
| 161 | */ |
| 162 | af->context = fence_ctx; |
| 163 | /* the vm fence is also part of the job's context */ |
| 164 | job->hw_vm_fence->context = fence_ctx; |
| 165 | } else { |
| 166 | vm = NULL; |
| 167 | fence_ctx = 0; |
| 168 | shadow_va = 0; |
| 169 | csa_va = 0; |
| 170 | gds_va = 0; |
| 171 | init_shadow = false; |
| 172 | af = kzalloc(sizeof(*af), GFP_ATOMIC); |
| 173 | if (!af) |
| 174 | return -ENOMEM; |
| 175 | } |
| 176 | |
| 177 | if (!ring->sched.ready) { |
| 178 | dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n" , ring->name); |
| 179 | r = -EINVAL; |
| 180 | goto free_fence; |
| 181 | } |
| 182 | |
| 183 | if (vm && !job->vmid) { |
| 184 | dev_err(adev->dev, "VM IB without ID\n" ); |
| 185 | r = -EINVAL; |
| 186 | goto free_fence; |
| 187 | } |
| 188 | |
| 189 | if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) && |
| 190 | (!ring->funcs->secure_submission_supported)) { |
| 191 | dev_err(adev->dev, "secure submissions not supported on ring <%s>\n" , ring->name); |
| 192 | r = -EINVAL; |
| 193 | goto free_fence; |
| 194 | } |
| 195 | |
| 196 | alloc_size = ring->funcs->emit_frame_size + num_ibs * |
| 197 | ring->funcs->emit_ib_size; |
| 198 | |
| 199 | r = amdgpu_ring_alloc(ring, ndw: alloc_size); |
| 200 | if (r) { |
| 201 | dev_err(adev->dev, "scheduling IB failed (%d).\n" , r); |
| 202 | goto free_fence; |
| 203 | } |
| 204 | |
| 205 | need_ctx_switch = ring->current_ctx != fence_ctx; |
| 206 | if (ring->funcs->emit_pipeline_sync && job && |
| 207 | ((tmp = amdgpu_sync_get_fence(sync: &job->explicit_sync)) || |
| 208 | need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) { |
| 209 | |
| 210 | need_pipe_sync = true; |
| 211 | |
| 212 | if (tmp) |
| 213 | trace_amdgpu_ib_pipe_sync(sched_job: job, fence: tmp); |
| 214 | |
| 215 | dma_fence_put(fence: tmp); |
| 216 | } |
| 217 | |
| 218 | if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) |
| 219 | ring->funcs->emit_mem_sync(ring); |
| 220 | |
| 221 | if (ring->funcs->emit_wave_limit && |
| 222 | ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) |
| 223 | ring->funcs->emit_wave_limit(ring, true); |
| 224 | |
| 225 | if (ring->funcs->insert_start) |
| 226 | ring->funcs->insert_start(ring); |
| 227 | |
| 228 | if (job) { |
| 229 | r = amdgpu_vm_flush(ring, job, need_pipe_sync); |
| 230 | if (r) { |
| 231 | amdgpu_ring_undo(ring); |
| 232 | return r; |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | amdgpu_ring_ib_begin(ring); |
| 237 | |
| 238 | if (ring->funcs->emit_gfx_shadow && adev->gfx.cp_gfx_shadow) |
| 239 | amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, |
| 240 | init_shadow, vmid); |
| 241 | |
| 242 | if (ring->funcs->init_cond_exec) |
| 243 | cond_exec = amdgpu_ring_init_cond_exec(ring, |
| 244 | ring->cond_exe_gpu_addr); |
| 245 | |
| 246 | amdgpu_device_flush_hdp(adev, ring); |
| 247 | |
| 248 | if (need_ctx_switch) |
| 249 | status |= AMDGPU_HAVE_CTX_SWITCH; |
| 250 | |
| 251 | if (job && ring->funcs->emit_cntxcntl) { |
| 252 | status |= job->preamble_status; |
| 253 | status |= job->preemption_status; |
| 254 | amdgpu_ring_emit_cntxcntl(ring, status); |
| 255 | } |
| 256 | |
| 257 | /* Setup initial TMZiness and send it off. |
| 258 | */ |
| 259 | secure = false; |
| 260 | if (job && ring->funcs->emit_frame_cntl) { |
| 261 | secure = ib->flags & AMDGPU_IB_FLAGS_SECURE; |
| 262 | amdgpu_ring_emit_frame_cntl(ring, true, secure); |
| 263 | } |
| 264 | |
| 265 | for (i = 0; i < num_ibs; ++i) { |
| 266 | ib = &ibs[i]; |
| 267 | |
| 268 | if (job && ring->funcs->emit_frame_cntl) { |
| 269 | if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) { |
| 270 | amdgpu_ring_emit_frame_cntl(ring, false, secure); |
| 271 | secure = !secure; |
| 272 | amdgpu_ring_emit_frame_cntl(ring, true, secure); |
| 273 | } |
| 274 | } |
| 275 | |
| 276 | amdgpu_ring_emit_ib(ring, job, ib, status); |
| 277 | status &= ~AMDGPU_HAVE_CTX_SWITCH; |
| 278 | } |
| 279 | |
| 280 | if (job && ring->funcs->emit_frame_cntl) |
| 281 | amdgpu_ring_emit_frame_cntl(ring, false, secure); |
| 282 | |
| 283 | amdgpu_device_invalidate_hdp(adev, ring); |
| 284 | |
| 285 | if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) |
| 286 | fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; |
| 287 | |
| 288 | /* wrap the last IB with fence */ |
| 289 | if (job && job->uf_addr) { |
| 290 | amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, |
| 291 | fence_flags | AMDGPU_FENCE_FLAG_64BIT); |
| 292 | } |
| 293 | |
| 294 | if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec && |
| 295 | adev->gfx.cp_gfx_shadow) { |
| 296 | amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); |
| 297 | amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); |
| 298 | } |
| 299 | |
| 300 | r = amdgpu_fence_emit(ring, af, flags: fence_flags); |
| 301 | if (r) { |
| 302 | dev_err(adev->dev, "failed to emit fence (%d)\n" , r); |
| 303 | if (job && job->vmid) |
| 304 | amdgpu_vmid_reset(adev, vmhub: ring->vm_hub, vmid: job->vmid); |
| 305 | amdgpu_ring_undo(ring); |
| 306 | goto free_fence; |
| 307 | } |
| 308 | *f = &af->base; |
| 309 | /* get a ref for the job */ |
| 310 | if (job) |
| 311 | dma_fence_get(fence: *f); |
| 312 | |
| 313 | if (ring->funcs->insert_end) |
| 314 | ring->funcs->insert_end(ring); |
| 315 | |
| 316 | amdgpu_ring_patch_cond_exec(ring, offset: cond_exec); |
| 317 | |
| 318 | ring->current_ctx = fence_ctx; |
| 319 | if (job && ring->funcs->emit_switch_buffer) |
| 320 | amdgpu_ring_emit_switch_buffer(ring); |
| 321 | |
| 322 | if (ring->funcs->emit_wave_limit && |
| 323 | ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) |
| 324 | ring->funcs->emit_wave_limit(ring, false); |
| 325 | |
| 326 | /* Save the wptr associated with this fence. |
| 327 | * This must be last for resets to work properly |
| 328 | * as we need to save the wptr associated with this |
| 329 | * fence so we know what rings contents to backup |
| 330 | * after we reset the queue. |
| 331 | */ |
| 332 | amdgpu_fence_save_wptr(af); |
| 333 | |
| 334 | amdgpu_ring_ib_end(ring); |
| 335 | amdgpu_ring_commit(ring); |
| 336 | |
| 337 | return 0; |
| 338 | |
| 339 | free_fence: |
| 340 | if (!job) |
| 341 | kfree(objp: af); |
| 342 | return r; |
| 343 | } |
| 344 | |
| 345 | /** |
| 346 | * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool |
| 347 | * |
| 348 | * @adev: amdgpu_device pointer |
| 349 | * |
| 350 | * Initialize the suballocator to manage a pool of memory |
| 351 | * for use as IBs (all asics). |
| 352 | * Returns 0 on success, error on failure. |
| 353 | */ |
| 354 | int amdgpu_ib_pool_init(struct amdgpu_device *adev) |
| 355 | { |
| 356 | int r, i; |
| 357 | |
| 358 | if (adev->ib_pool_ready) |
| 359 | return 0; |
| 360 | |
| 361 | for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { |
| 362 | r = amdgpu_sa_bo_manager_init(adev, sa_manager: &adev->ib_pools[i], |
| 363 | AMDGPU_IB_POOL_SIZE, align: 256, |
| 364 | AMDGPU_GEM_DOMAIN_GTT); |
| 365 | if (r) |
| 366 | goto error; |
| 367 | } |
| 368 | adev->ib_pool_ready = true; |
| 369 | |
| 370 | return 0; |
| 371 | |
| 372 | error: |
| 373 | while (i--) |
| 374 | amdgpu_sa_bo_manager_fini(adev, sa_manager: &adev->ib_pools[i]); |
| 375 | return r; |
| 376 | } |
| 377 | |
| 378 | /** |
| 379 | * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool |
| 380 | * |
| 381 | * @adev: amdgpu_device pointer |
| 382 | * |
| 383 | * Tear down the suballocator managing the pool of memory |
| 384 | * for use as IBs (all asics). |
| 385 | */ |
| 386 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev) |
| 387 | { |
| 388 | int i; |
| 389 | |
| 390 | if (!adev->ib_pool_ready) |
| 391 | return; |
| 392 | |
| 393 | for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) |
| 394 | amdgpu_sa_bo_manager_fini(adev, sa_manager: &adev->ib_pools[i]); |
| 395 | adev->ib_pool_ready = false; |
| 396 | } |
| 397 | |
| 398 | /** |
| 399 | * amdgpu_ib_ring_tests - test IBs on the rings |
| 400 | * |
| 401 | * @adev: amdgpu_device pointer |
| 402 | * |
| 403 | * Test an IB (Indirect Buffer) on each ring. |
| 404 | * If the test fails, disable the ring. |
| 405 | * Returns 0 on success, error if the primary GFX ring |
| 406 | * IB test fails. |
| 407 | */ |
| 408 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev) |
| 409 | { |
| 410 | long tmo_gfx, tmo_mm; |
| 411 | int r, ret = 0; |
| 412 | unsigned int i; |
| 413 | |
| 414 | tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; |
| 415 | if (amdgpu_sriov_vf(adev)) { |
| 416 | /* for MM engines in hypervisor side they are not scheduled together |
| 417 | * with CP and SDMA engines, so even in exclusive mode MM engine could |
| 418 | * still running on other VF thus the IB TEST TIMEOUT for MM engines |
| 419 | * under SR-IOV should be set to a long time. 8 sec should be enough |
| 420 | * for the MM comes back to this VF. |
| 421 | */ |
| 422 | tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; |
| 423 | } |
| 424 | |
| 425 | if (amdgpu_sriov_runtime(adev)) { |
| 426 | /* for CP & SDMA engines since they are scheduled together so |
| 427 | * need to make the timeout width enough to cover the time |
| 428 | * cost waiting for it coming back under RUNTIME only |
| 429 | */ |
| 430 | tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; |
| 431 | } else if (adev->gmc.xgmi.hive_id) { |
| 432 | tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; |
| 433 | } |
| 434 | |
| 435 | for (i = 0; i < adev->num_rings; ++i) { |
| 436 | struct amdgpu_ring *ring = adev->rings[i]; |
| 437 | long tmo; |
| 438 | |
| 439 | /* KIQ rings don't have an IB test because we never submit IBs |
| 440 | * to them and they have no interrupt support. |
| 441 | */ |
| 442 | if (!ring->sched.ready || !ring->funcs->test_ib) |
| 443 | continue; |
| 444 | |
| 445 | if (adev->enable_mes && |
| 446 | ring->funcs->type == AMDGPU_RING_TYPE_KIQ) |
| 447 | continue; |
| 448 | |
| 449 | /* MM engine need more time */ |
| 450 | if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || |
| 451 | ring->funcs->type == AMDGPU_RING_TYPE_VCE || |
| 452 | ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || |
| 453 | ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || |
| 454 | ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || |
| 455 | ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) |
| 456 | tmo = tmo_mm; |
| 457 | else |
| 458 | tmo = tmo_gfx; |
| 459 | |
| 460 | r = amdgpu_ring_test_ib(ring, tmo); |
| 461 | if (!r) { |
| 462 | DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n" , |
| 463 | ring->name); |
| 464 | continue; |
| 465 | } |
| 466 | |
| 467 | ring->sched.ready = false; |
| 468 | DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n" , |
| 469 | ring->name, r); |
| 470 | |
| 471 | if (ring == &adev->gfx.gfx_ring[0]) { |
| 472 | /* oh, oh, that's really bad */ |
| 473 | adev->accel_working = false; |
| 474 | return r; |
| 475 | |
| 476 | } else { |
| 477 | ret = r; |
| 478 | } |
| 479 | } |
| 480 | return ret; |
| 481 | } |
| 482 | |
| 483 | /* |
| 484 | * Debugfs info |
| 485 | */ |
| 486 | #if defined(CONFIG_DEBUG_FS) |
| 487 | |
| 488 | static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused) |
| 489 | { |
| 490 | struct amdgpu_device *adev = m->private; |
| 491 | |
| 492 | seq_puts(m, s: "--------------------- DELAYED ---------------------\n" ); |
| 493 | amdgpu_sa_bo_dump_debug_info(sa_manager: &adev->ib_pools[AMDGPU_IB_POOL_DELAYED], |
| 494 | m); |
| 495 | seq_puts(m, s: "-------------------- IMMEDIATE --------------------\n" ); |
| 496 | amdgpu_sa_bo_dump_debug_info(sa_manager: &adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE], |
| 497 | m); |
| 498 | seq_puts(m, s: "--------------------- DIRECT ----------------------\n" ); |
| 499 | amdgpu_sa_bo_dump_debug_info(sa_manager: &adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m); |
| 500 | |
| 501 | return 0; |
| 502 | } |
| 503 | |
| 504 | DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info); |
| 505 | |
| 506 | #endif |
| 507 | |
| 508 | void amdgpu_debugfs_sa_init(struct amdgpu_device *adev) |
| 509 | { |
| 510 | #if defined(CONFIG_DEBUG_FS) |
| 511 | struct drm_minor *minor = adev_to_drm(adev)->primary; |
| 512 | struct dentry *root = minor->debugfs_root; |
| 513 | |
| 514 | debugfs_create_file("amdgpu_sa_info" , 0444, root, adev, |
| 515 | &amdgpu_debugfs_sa_info_fops); |
| 516 | |
| 517 | #endif |
| 518 | } |
| 519 | |