1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41/* Polaris10/11/12 firmware version */
42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
44static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
47static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48static int uvd_v6_0_start(struct amdgpu_device *adev);
49static void uvd_v6_0_stop(struct amdgpu_device *adev);
50static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
52 enum amd_clockgating_state state);
53static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
55
56/**
57* uvd_v6_0_enc_support - get encode support status
58*
59* @adev: amdgpu_device pointer
60*
61* Returns the current hardware encode support status
62*/
63static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64{
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68}
69
70/**
71 * uvd_v6_0_ring_get_rptr - get read pointer
72 *
73 * @ring: amdgpu_ring pointer
74 *
75 * Returns the current hardware read pointer
76 */
77static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78{
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82}
83
84/**
85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86 *
87 * @ring: amdgpu_ring pointer
88 *
89 * Returns the current hardware enc read pointer
90 */
91static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92{
93 struct amdgpu_device *adev = ring->adev;
94
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99}
100/**
101 * uvd_v6_0_ring_get_wptr - get write pointer
102 *
103 * @ring: amdgpu_ring pointer
104 *
105 * Returns the current hardware write pointer
106 */
107static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108{
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112}
113
114/**
115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116 *
117 * @ring: amdgpu_ring pointer
118 *
119 * Returns the current hardware enc write pointer
120 */
121static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122{
123 struct amdgpu_device *adev = ring->adev;
124
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129}
130
131/**
132 * uvd_v6_0_ring_set_wptr - set write pointer
133 *
134 * @ring: amdgpu_ring pointer
135 *
136 * Commits the write pointer to the hardware
137 */
138static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139{
140 struct amdgpu_device *adev = ring->adev;
141
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143}
144
145/**
146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147 *
148 * @ring: amdgpu_ring pointer
149 *
150 * Commits the enc write pointer to the hardware
151 */
152static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153{
154 struct amdgpu_device *adev = ring->adev;
155
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162}
163
164/**
165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166 *
167 * @ring: the engine to test on
168 *
169 */
170static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171{
172 struct amdgpu_device *adev = ring->adev;
173 uint32_t rptr;
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, ndw: 16);
178 if (r)
179 return r;
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
189 udelay(usec: 1);
190 }
191
192 if (i >= adev->usec_timeout)
193 r = -ETIMEDOUT;
194
195 return r;
196}
197
198/**
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200 *
201 * @ring: ring we should submit the msg to
202 * @handle: session handle to use
203 * @bo: amdgpu object for which we query the offset
204 * @fence: optional fence to return
205 *
206 * Open up a stream for HW test
207 */
208static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct amdgpu_bo *bo,
210 struct dma_fence **fence)
211{
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
216 uint64_t addr;
217 int i, r;
218
219 r = amdgpu_job_alloc_with_ib(adev: ring->adev, NULL, NULL, size: ib_size_dw * 4,
220 pool_type: AMDGPU_IB_POOL_DIRECT, job: &job,
221 AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
222 if (r)
223 return r;
224
225 ib = &job->ibs[0];
226 addr = amdgpu_bo_gpu_offset(bo);
227
228 ib->length_dw = 0;
229 ib->ptr[ib->length_dw++] = 0x00000018;
230 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
231 ib->ptr[ib->length_dw++] = handle;
232 ib->ptr[ib->length_dw++] = 0x00010000;
233 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
234 ib->ptr[ib->length_dw++] = addr;
235
236 ib->ptr[ib->length_dw++] = 0x00000014;
237 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
238 ib->ptr[ib->length_dw++] = 0x0000001c;
239 ib->ptr[ib->length_dw++] = 0x00000001;
240 ib->ptr[ib->length_dw++] = 0x00000000;
241
242 ib->ptr[ib->length_dw++] = 0x00000008;
243 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
244
245 for (i = ib->length_dw; i < ib_size_dw; ++i)
246 ib->ptr[i] = 0x0;
247
248 r = amdgpu_job_submit_direct(job, ring, fence: &f);
249 if (r)
250 goto err;
251
252 if (fence)
253 *fence = dma_fence_get(fence: f);
254 dma_fence_put(fence: f);
255 return 0;
256
257err:
258 amdgpu_job_free(job);
259 return r;
260}
261
262/**
263 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
264 *
265 * @ring: ring we should submit the msg to
266 * @handle: session handle to use
267 * @bo: amdgpu object for which we query the offset
268 * @fence: optional fence to return
269 *
270 * Close up a stream for HW test or if userspace failed to do so
271 */
272static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
273 uint32_t handle,
274 struct amdgpu_bo *bo,
275 struct dma_fence **fence)
276{
277 const unsigned ib_size_dw = 16;
278 struct amdgpu_job *job;
279 struct amdgpu_ib *ib;
280 struct dma_fence *f = NULL;
281 uint64_t addr;
282 int i, r;
283
284 r = amdgpu_job_alloc_with_ib(adev: ring->adev, NULL, NULL, size: ib_size_dw * 4,
285 pool_type: AMDGPU_IB_POOL_DIRECT, job: &job,
286 AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
287 if (r)
288 return r;
289
290 ib = &job->ibs[0];
291 addr = amdgpu_bo_gpu_offset(bo);
292
293 ib->length_dw = 0;
294 ib->ptr[ib->length_dw++] = 0x00000018;
295 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
296 ib->ptr[ib->length_dw++] = handle;
297 ib->ptr[ib->length_dw++] = 0x00010000;
298 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
299 ib->ptr[ib->length_dw++] = addr;
300
301 ib->ptr[ib->length_dw++] = 0x00000014;
302 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
303 ib->ptr[ib->length_dw++] = 0x0000001c;
304 ib->ptr[ib->length_dw++] = 0x00000001;
305 ib->ptr[ib->length_dw++] = 0x00000000;
306
307 ib->ptr[ib->length_dw++] = 0x00000008;
308 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
309
310 for (i = ib->length_dw; i < ib_size_dw; ++i)
311 ib->ptr[i] = 0x0;
312
313 r = amdgpu_job_submit_direct(job, ring, fence: &f);
314 if (r)
315 goto err;
316
317 if (fence)
318 *fence = dma_fence_get(fence: f);
319 dma_fence_put(fence: f);
320 return 0;
321
322err:
323 amdgpu_job_free(job);
324 return r;
325}
326
327/**
328 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
329 *
330 * @ring: the engine to test on
331 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
332 *
333 */
334static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
335{
336 struct dma_fence *fence = NULL;
337 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
338 long r;
339
340 r = uvd_v6_0_enc_get_create_msg(ring, handle: 1, bo, NULL);
341 if (r)
342 goto error;
343
344 r = uvd_v6_0_enc_get_destroy_msg(ring, handle: 1, bo, fence: &fence);
345 if (r)
346 goto error;
347
348 r = dma_fence_wait_timeout(fence, intr: false, timeout);
349 if (r == 0)
350 r = -ETIMEDOUT;
351 else if (r > 0)
352 r = 0;
353
354error:
355 dma_fence_put(fence);
356 return r;
357}
358
359static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block)
360{
361 struct amdgpu_device *adev = ip_block->adev;
362 adev->uvd.num_uvd_inst = 1;
363
364 if (!(adev->flags & AMD_IS_APU) &&
365 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
366 return -ENOENT;
367
368 uvd_v6_0_set_ring_funcs(adev);
369
370 if (uvd_v6_0_enc_support(adev)) {
371 adev->uvd.num_enc_rings = 2;
372 uvd_v6_0_set_enc_ring_funcs(adev);
373 }
374
375 uvd_v6_0_set_irq_funcs(adev);
376
377 return 0;
378}
379
380static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
381{
382 struct amdgpu_ring *ring;
383 int i, r;
384 struct amdgpu_device *adev = ip_block->adev;
385
386 /* UVD TRAP */
387 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, source: &adev->uvd.inst->irq);
388 if (r)
389 return r;
390
391 /* UVD ENC TRAP */
392 if (uvd_v6_0_enc_support(adev)) {
393 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
394 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, source: &adev->uvd.inst->irq);
395 if (r)
396 return r;
397 }
398 }
399
400 r = amdgpu_uvd_sw_init(adev);
401 if (r)
402 return r;
403
404 if (!uvd_v6_0_enc_support(adev)) {
405 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
406 adev->uvd.inst->ring_enc[i].funcs = NULL;
407
408 adev->uvd.inst->irq.num_types = 1;
409 adev->uvd.num_enc_rings = 0;
410
411 DRM_INFO("UVD ENC is disabled\n");
412 }
413
414 ring = &adev->uvd.inst->ring;
415 sprintf(buf: ring->name, fmt: "uvd");
416 r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->uvd.inst->irq, irq_type: 0,
417 hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL);
418 if (r)
419 return r;
420
421 r = amdgpu_uvd_resume(adev);
422 if (r)
423 return r;
424
425 if (uvd_v6_0_enc_support(adev)) {
426 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
427 ring = &adev->uvd.inst->ring_enc[i];
428 sprintf(buf: ring->name, fmt: "uvd_enc%d", i);
429 r = amdgpu_ring_init(adev, ring, max_dw: 512,
430 irq_src: &adev->uvd.inst->irq, irq_type: 0,
431 hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL);
432 if (r)
433 return r;
434 }
435 }
436
437 return r;
438}
439
440static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
441{
442 int i, r;
443 struct amdgpu_device *adev = ip_block->adev;
444
445 r = amdgpu_uvd_suspend(adev);
446 if (r)
447 return r;
448
449 if (uvd_v6_0_enc_support(adev)) {
450 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
451 amdgpu_ring_fini(ring: &adev->uvd.inst->ring_enc[i]);
452 }
453
454 return amdgpu_uvd_sw_fini(adev);
455}
456
457/**
458 * uvd_v6_0_hw_init - start and test UVD block
459 *
460 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
461 *
462 * Initialize the hardware, boot up the VCPU and do some testing
463 */
464static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
465{
466 struct amdgpu_device *adev = ip_block->adev;
467 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
468 uint32_t tmp;
469 int i, r;
470
471 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
472 uvd_v6_0_set_clockgating_state(ip_block, state: AMD_CG_STATE_UNGATE);
473 uvd_v6_0_enable_mgcg(adev, enable: true);
474
475 r = amdgpu_ring_test_helper(ring);
476 if (r)
477 goto done;
478
479 r = amdgpu_ring_alloc(ring, ndw: 10);
480 if (r) {
481 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
482 goto done;
483 }
484
485 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
486 amdgpu_ring_write(ring, v: tmp);
487 amdgpu_ring_write(ring, v: 0xFFFFF);
488
489 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
490 amdgpu_ring_write(ring, v: tmp);
491 amdgpu_ring_write(ring, v: 0xFFFFF);
492
493 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
494 amdgpu_ring_write(ring, v: tmp);
495 amdgpu_ring_write(ring, v: 0xFFFFF);
496
497 /* Clear timeout status bits */
498 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
499 amdgpu_ring_write(ring, v: 0x8);
500
501 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
502 amdgpu_ring_write(ring, v: 3);
503
504 amdgpu_ring_commit(ring);
505
506 if (uvd_v6_0_enc_support(adev)) {
507 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
508 ring = &adev->uvd.inst->ring_enc[i];
509 r = amdgpu_ring_test_helper(ring);
510 if (r)
511 goto done;
512 }
513 }
514
515done:
516 if (!r) {
517 if (uvd_v6_0_enc_support(adev))
518 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
519 else
520 DRM_INFO("UVD initialized successfully.\n");
521 }
522
523 return r;
524}
525
526/**
527 * uvd_v6_0_hw_fini - stop the hardware block
528 *
529 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
530 *
531 * Stop the UVD block, mark ring as not ready any more
532 */
533static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
534{
535 struct amdgpu_device *adev = ip_block->adev;
536
537 cancel_delayed_work_sync(dwork: &adev->uvd.idle_work);
538
539 if (RREG32(mmUVD_STATUS) != 0)
540 uvd_v6_0_stop(adev);
541
542 return 0;
543}
544
545static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
546{
547 struct amdgpu_device *adev = ip_block->adev;
548
549 return amdgpu_uvd_prepare_suspend(adev);
550}
551
552static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block)
553{
554 int r;
555 struct amdgpu_device *adev = ip_block->adev;
556
557 /*
558 * Proper cleanups before halting the HW engine:
559 * - cancel the delayed idle work
560 * - enable powergating
561 * - enable clockgating
562 * - disable dpm
563 *
564 * TODO: to align with the VCN implementation, move the
565 * jobs for clockgating/powergating/dpm setting to
566 * ->set_powergating_state().
567 */
568 cancel_delayed_work_sync(dwork: &adev->uvd.idle_work);
569
570 if (adev->pm.dpm_enabled) {
571 amdgpu_dpm_enable_uvd(adev, enable: false);
572 } else {
573 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
574 /* shutdown the UVD block */
575 amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD,
576 state: AMD_PG_STATE_GATE);
577 amdgpu_device_ip_set_clockgating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD,
578 state: AMD_CG_STATE_GATE);
579 }
580
581 r = uvd_v6_0_hw_fini(ip_block);
582 if (r)
583 return r;
584
585 return amdgpu_uvd_suspend(adev);
586}
587
588static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block)
589{
590 int r;
591
592 r = amdgpu_uvd_resume(adev: ip_block->adev);
593 if (r)
594 return r;
595
596 return uvd_v6_0_hw_init(ip_block);
597}
598
599/**
600 * uvd_v6_0_mc_resume - memory controller programming
601 *
602 * @adev: amdgpu_device pointer
603 *
604 * Let the UVD memory controller know it's offsets
605 */
606static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
607{
608 uint64_t offset;
609 uint32_t size;
610
611 /* program memory controller bits 0-27 */
612 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
613 lower_32_bits(adev->uvd.inst->gpu_addr));
614 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
615 upper_32_bits(adev->uvd.inst->gpu_addr));
616
617 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
618 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
619 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
620 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
621
622 offset += size;
623 size = AMDGPU_UVD_HEAP_SIZE;
624 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
625 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
626
627 offset += size;
628 size = AMDGPU_UVD_STACK_SIZE +
629 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
630 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
631 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
632
633 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
634 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
635 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
636
637 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
638}
639
640#if 0
641static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
642 bool enable)
643{
644 u32 data, data1;
645
646 data = RREG32(mmUVD_CGC_GATE);
647 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
648 if (enable) {
649 data |= UVD_CGC_GATE__SYS_MASK |
650 UVD_CGC_GATE__UDEC_MASK |
651 UVD_CGC_GATE__MPEG2_MASK |
652 UVD_CGC_GATE__RBC_MASK |
653 UVD_CGC_GATE__LMI_MC_MASK |
654 UVD_CGC_GATE__IDCT_MASK |
655 UVD_CGC_GATE__MPRD_MASK |
656 UVD_CGC_GATE__MPC_MASK |
657 UVD_CGC_GATE__LBSI_MASK |
658 UVD_CGC_GATE__LRBBM_MASK |
659 UVD_CGC_GATE__UDEC_RE_MASK |
660 UVD_CGC_GATE__UDEC_CM_MASK |
661 UVD_CGC_GATE__UDEC_IT_MASK |
662 UVD_CGC_GATE__UDEC_DB_MASK |
663 UVD_CGC_GATE__UDEC_MP_MASK |
664 UVD_CGC_GATE__WCB_MASK |
665 UVD_CGC_GATE__VCPU_MASK |
666 UVD_CGC_GATE__SCPU_MASK;
667 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
668 UVD_SUVD_CGC_GATE__SIT_MASK |
669 UVD_SUVD_CGC_GATE__SMP_MASK |
670 UVD_SUVD_CGC_GATE__SCM_MASK |
671 UVD_SUVD_CGC_GATE__SDB_MASK |
672 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
673 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
674 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
675 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
676 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
677 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
678 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
679 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
680 } else {
681 data &= ~(UVD_CGC_GATE__SYS_MASK |
682 UVD_CGC_GATE__UDEC_MASK |
683 UVD_CGC_GATE__MPEG2_MASK |
684 UVD_CGC_GATE__RBC_MASK |
685 UVD_CGC_GATE__LMI_MC_MASK |
686 UVD_CGC_GATE__LMI_UMC_MASK |
687 UVD_CGC_GATE__IDCT_MASK |
688 UVD_CGC_GATE__MPRD_MASK |
689 UVD_CGC_GATE__MPC_MASK |
690 UVD_CGC_GATE__LBSI_MASK |
691 UVD_CGC_GATE__LRBBM_MASK |
692 UVD_CGC_GATE__UDEC_RE_MASK |
693 UVD_CGC_GATE__UDEC_CM_MASK |
694 UVD_CGC_GATE__UDEC_IT_MASK |
695 UVD_CGC_GATE__UDEC_DB_MASK |
696 UVD_CGC_GATE__UDEC_MP_MASK |
697 UVD_CGC_GATE__WCB_MASK |
698 UVD_CGC_GATE__VCPU_MASK |
699 UVD_CGC_GATE__SCPU_MASK);
700 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
701 UVD_SUVD_CGC_GATE__SIT_MASK |
702 UVD_SUVD_CGC_GATE__SMP_MASK |
703 UVD_SUVD_CGC_GATE__SCM_MASK |
704 UVD_SUVD_CGC_GATE__SDB_MASK |
705 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
706 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
707 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
708 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
709 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
710 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
711 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
712 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
713 }
714 WREG32(mmUVD_CGC_GATE, data);
715 WREG32(mmUVD_SUVD_CGC_GATE, data1);
716}
717#endif
718
719/**
720 * uvd_v6_0_start - start UVD block
721 *
722 * @adev: amdgpu_device pointer
723 *
724 * Setup and start the UVD block
725 */
726static int uvd_v6_0_start(struct amdgpu_device *adev)
727{
728 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
729 uint32_t rb_bufsz, tmp;
730 uint32_t lmi_swap_cntl;
731 uint32_t mp_swap_cntl;
732 int i, j, r;
733
734 /* disable DPG */
735 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
736
737 /* disable byte swapping */
738 lmi_swap_cntl = 0;
739 mp_swap_cntl = 0;
740
741 uvd_v6_0_mc_resume(adev);
742
743 /* disable interupt */
744 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
745
746 /* stall UMC and register bus before resetting VCPU */
747 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
748 mdelay(1);
749
750 /* put LMI, VCPU, RBC etc... into reset */
751 WREG32(mmUVD_SOFT_RESET,
752 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
753 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
754 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
755 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
756 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
757 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
758 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
759 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
760 mdelay(5);
761
762 /* take UVD block out of reset */
763 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
764 mdelay(5);
765
766 /* initialize UVD memory controller */
767 WREG32(mmUVD_LMI_CTRL,
768 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
769 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
770 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
771 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
772 UVD_LMI_CTRL__REQ_MODE_MASK |
773 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
774
775#ifdef __BIG_ENDIAN
776 /* swap (8 in 32) RB and IB */
777 lmi_swap_cntl = 0xa;
778 mp_swap_cntl = 0;
779#endif
780 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
781 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
782
783 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
784 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
785 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
786 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
787 WREG32(mmUVD_MPC_SET_ALU, 0);
788 WREG32(mmUVD_MPC_SET_MUX, 0x88);
789
790 /* take all subblocks out of reset, except VCPU */
791 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
792 mdelay(5);
793
794 /* enable VCPU clock */
795 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
796
797 /* enable UMC */
798 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
799
800 /* boot up the VCPU */
801 WREG32(mmUVD_SOFT_RESET, 0);
802 mdelay(10);
803
804 for (i = 0; i < 10; ++i) {
805 uint32_t status;
806
807 for (j = 0; j < 100; ++j) {
808 status = RREG32(mmUVD_STATUS);
809 if (status & 2)
810 break;
811 mdelay(10);
812 }
813 r = 0;
814 if (status & 2)
815 break;
816
817 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
818 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
819 mdelay(10);
820 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
821 mdelay(10);
822 r = -1;
823 }
824
825 if (r) {
826 DRM_ERROR("UVD not responding, giving up!!!\n");
827 return r;
828 }
829 /* enable master interrupt */
830 WREG32_P(mmUVD_MASTINT_EN,
831 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
832 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
833
834 /* clear the bit 4 of UVD_STATUS */
835 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
836
837 /* force RBC into idle state */
838 rb_bufsz = order_base_2(ring->ring_size);
839 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
840 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
841 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
842 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
843 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
844 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
845 WREG32(mmUVD_RBC_RB_CNTL, tmp);
846
847 /* set the write pointer delay */
848 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
849
850 /* set the wb address */
851 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
852
853 /* program the RB_BASE for ring buffer */
854 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
855 lower_32_bits(ring->gpu_addr));
856 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
857 upper_32_bits(ring->gpu_addr));
858
859 /* Initialize the ring buffer's read and write pointers */
860 WREG32(mmUVD_RBC_RB_RPTR, 0);
861
862 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
863 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
864
865 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
866
867 if (uvd_v6_0_enc_support(adev)) {
868 ring = &adev->uvd.inst->ring_enc[0];
869 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
870 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
871 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
872 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
873 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
874
875 ring = &adev->uvd.inst->ring_enc[1];
876 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
877 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
878 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
879 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
880 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
881 }
882
883 return 0;
884}
885
886/**
887 * uvd_v6_0_stop - stop UVD block
888 *
889 * @adev: amdgpu_device pointer
890 *
891 * stop the UVD block
892 */
893static void uvd_v6_0_stop(struct amdgpu_device *adev)
894{
895 /* force RBC into idle state */
896 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
897
898 /* Stall UMC and register bus before resetting VCPU */
899 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
900 mdelay(1);
901
902 /* put VCPU into reset */
903 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
904 mdelay(5);
905
906 /* disable VCPU clock */
907 WREG32(mmUVD_VCPU_CNTL, 0x0);
908
909 /* Unstall UMC and register bus */
910 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
911
912 WREG32(mmUVD_STATUS, 0);
913}
914
915/**
916 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
917 *
918 * @ring: amdgpu_ring pointer
919 * @addr: address
920 * @seq: sequence number
921 * @flags: fence related flags
922 *
923 * Write a fence and a trap command to the ring.
924 */
925static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
926 unsigned flags)
927{
928 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
929
930 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
931 amdgpu_ring_write(ring, v: seq);
932 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
933 amdgpu_ring_write(ring, v: addr & 0xffffffff);
934 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
935 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
936 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
937 amdgpu_ring_write(ring, v: 0);
938
939 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
940 amdgpu_ring_write(ring, v: 0);
941 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
942 amdgpu_ring_write(ring, v: 0);
943 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
944 amdgpu_ring_write(ring, v: 2);
945}
946
947/**
948 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
949 *
950 * @ring: amdgpu_ring pointer
951 * @addr: address
952 * @seq: sequence number
953 * @flags: fence related flags
954 *
955 * Write enc a fence and a trap command to the ring.
956 */
957static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
958 u64 seq, unsigned flags)
959{
960 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
961
962 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
963 amdgpu_ring_write(ring, v: addr);
964 amdgpu_ring_write(ring, upper_32_bits(addr));
965 amdgpu_ring_write(ring, v: seq);
966 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
967}
968
969/**
970 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
971 *
972 * @ring: amdgpu_ring pointer
973 */
974static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
975{
976 /* The firmware doesn't seem to like touching registers at this point. */
977}
978
979/**
980 * uvd_v6_0_ring_test_ring - register write test
981 *
982 * @ring: amdgpu_ring pointer
983 *
984 * Test if we can successfully write to the context register
985 */
986static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
987{
988 struct amdgpu_device *adev = ring->adev;
989 uint32_t tmp = 0;
990 unsigned i;
991 int r;
992
993 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
994 r = amdgpu_ring_alloc(ring, ndw: 3);
995 if (r)
996 return r;
997
998 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
999 amdgpu_ring_write(ring, v: 0xDEADBEEF);
1000 amdgpu_ring_commit(ring);
1001 for (i = 0; i < adev->usec_timeout; i++) {
1002 tmp = RREG32(mmUVD_CONTEXT_ID);
1003 if (tmp == 0xDEADBEEF)
1004 break;
1005 udelay(usec: 1);
1006 }
1007
1008 if (i >= adev->usec_timeout)
1009 r = -ETIMEDOUT;
1010
1011 return r;
1012}
1013
1014/**
1015 * uvd_v6_0_ring_emit_ib - execute indirect buffer
1016 *
1017 * @ring: amdgpu_ring pointer
1018 * @job: job to retrieve vmid from
1019 * @ib: indirect buffer to execute
1020 * @flags: unused
1021 *
1022 * Write ring commands to execute the indirect buffer
1023 */
1024static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1025 struct amdgpu_job *job,
1026 struct amdgpu_ib *ib,
1027 uint32_t flags)
1028{
1029 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1030
1031 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1032 amdgpu_ring_write(ring, v: vmid);
1033
1034 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1035 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1036 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1037 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1038 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1039 amdgpu_ring_write(ring, v: ib->length_dw);
1040}
1041
1042/**
1043 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1044 *
1045 * @ring: amdgpu_ring pointer
1046 * @job: job to retrive vmid from
1047 * @ib: indirect buffer to execute
1048 * @flags: unused
1049 *
1050 * Write enc ring commands to execute the indirect buffer
1051 */
1052static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1053 struct amdgpu_job *job,
1054 struct amdgpu_ib *ib,
1055 uint32_t flags)
1056{
1057 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1058
1059 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1060 amdgpu_ring_write(ring, v: vmid);
1061 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1062 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1063 amdgpu_ring_write(ring, v: ib->length_dw);
1064}
1065
1066static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1067 uint32_t reg, uint32_t val)
1068{
1069 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1070 amdgpu_ring_write(ring, v: reg << 2);
1071 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1072 amdgpu_ring_write(ring, v: val);
1073 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1074 amdgpu_ring_write(ring, v: 0x8);
1075}
1076
1077static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1078 unsigned vmid, uint64_t pd_addr)
1079{
1080 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1081
1082 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1083 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1084 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1085 amdgpu_ring_write(ring, v: 0);
1086 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1087 amdgpu_ring_write(ring, v: 1 << vmid); /* mask */
1088 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1089 amdgpu_ring_write(ring, v: 0xC);
1090}
1091
1092static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1093{
1094 uint32_t seq = ring->fence_drv.sync_seq;
1095 uint64_t addr = ring->fence_drv.gpu_addr;
1096
1097 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1098 amdgpu_ring_write(ring, lower_32_bits(addr));
1099 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1100 amdgpu_ring_write(ring, upper_32_bits(addr));
1101 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1102 amdgpu_ring_write(ring, v: 0xffffffff); /* mask */
1103 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1104 amdgpu_ring_write(ring, v: seq);
1105 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1106 amdgpu_ring_write(ring, v: 0xE);
1107}
1108
1109static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1110{
1111 int i;
1112
1113 WARN_ON(ring->wptr % 2 || count % 2);
1114
1115 for (i = 0; i < count / 2; i++) {
1116 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1117 amdgpu_ring_write(ring, v: 0);
1118 }
1119}
1120
1121static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1122{
1123 uint32_t seq = ring->fence_drv.sync_seq;
1124 uint64_t addr = ring->fence_drv.gpu_addr;
1125
1126 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1127 amdgpu_ring_write(ring, lower_32_bits(addr));
1128 amdgpu_ring_write(ring, upper_32_bits(addr));
1129 amdgpu_ring_write(ring, v: seq);
1130}
1131
1132static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1133{
1134 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1135}
1136
1137static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1138 unsigned int vmid, uint64_t pd_addr)
1139{
1140 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1141 amdgpu_ring_write(ring, v: vmid);
1142 amdgpu_ring_write(ring, v: pd_addr >> 12);
1143
1144 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1145 amdgpu_ring_write(ring, v: vmid);
1146}
1147
1148static bool uvd_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
1149{
1150 struct amdgpu_device *adev = ip_block->adev;
1151
1152 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1153}
1154
1155static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1156{
1157 unsigned i;
1158 struct amdgpu_device *adev = ip_block->adev;
1159
1160 for (i = 0; i < adev->usec_timeout; i++) {
1161 if (uvd_v6_0_is_idle(ip_block))
1162 return 0;
1163 }
1164 return -ETIMEDOUT;
1165}
1166
1167#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1168static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
1169{
1170 struct amdgpu_device *adev = ip_block->adev;
1171 u32 srbm_soft_reset = 0;
1172 u32 tmp = RREG32(mmSRBM_STATUS);
1173
1174 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1175 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1176 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1177 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1178
1179 if (srbm_soft_reset) {
1180 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1181 return true;
1182 } else {
1183 adev->uvd.inst->srbm_soft_reset = 0;
1184 return false;
1185 }
1186}
1187
1188static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
1189{
1190 struct amdgpu_device *adev = ip_block->adev;
1191
1192 if (!adev->uvd.inst->srbm_soft_reset)
1193 return 0;
1194
1195 uvd_v6_0_stop(adev);
1196 return 0;
1197}
1198
1199static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
1200{
1201 struct amdgpu_device *adev = ip_block->adev;
1202 u32 srbm_soft_reset;
1203
1204 if (!adev->uvd.inst->srbm_soft_reset)
1205 return 0;
1206 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1207
1208 if (srbm_soft_reset) {
1209 u32 tmp;
1210
1211 tmp = RREG32(mmSRBM_SOFT_RESET);
1212 tmp |= srbm_soft_reset;
1213 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1214 WREG32(mmSRBM_SOFT_RESET, tmp);
1215 tmp = RREG32(mmSRBM_SOFT_RESET);
1216
1217 udelay(usec: 50);
1218
1219 tmp &= ~srbm_soft_reset;
1220 WREG32(mmSRBM_SOFT_RESET, tmp);
1221 tmp = RREG32(mmSRBM_SOFT_RESET);
1222
1223 /* Wait a little for things to settle down */
1224 udelay(usec: 50);
1225 }
1226
1227 return 0;
1228}
1229
1230static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
1231{
1232 struct amdgpu_device *adev = ip_block->adev;
1233
1234 if (!adev->uvd.inst->srbm_soft_reset)
1235 return 0;
1236
1237 mdelay(5);
1238
1239 return uvd_v6_0_start(adev);
1240}
1241
1242static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1243 struct amdgpu_irq_src *source,
1244 unsigned type,
1245 enum amdgpu_interrupt_state state)
1246{
1247 // TODO
1248 return 0;
1249}
1250
1251static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1252 struct amdgpu_irq_src *source,
1253 struct amdgpu_iv_entry *entry)
1254{
1255 bool int_handled = true;
1256 DRM_DEBUG("IH: UVD TRAP\n");
1257
1258 switch (entry->src_id) {
1259 case 124:
1260 amdgpu_fence_process(ring: &adev->uvd.inst->ring);
1261 break;
1262 case 119:
1263 if (likely(uvd_v6_0_enc_support(adev)))
1264 amdgpu_fence_process(ring: &adev->uvd.inst->ring_enc[0]);
1265 else
1266 int_handled = false;
1267 break;
1268 case 120:
1269 if (likely(uvd_v6_0_enc_support(adev)))
1270 amdgpu_fence_process(ring: &adev->uvd.inst->ring_enc[1]);
1271 else
1272 int_handled = false;
1273 break;
1274 }
1275
1276 if (!int_handled)
1277 DRM_ERROR("Unhandled interrupt: %d %d\n",
1278 entry->src_id, entry->src_data[0]);
1279
1280 return 0;
1281}
1282
1283static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1284{
1285 uint32_t data1, data3;
1286
1287 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1288 data3 = RREG32(mmUVD_CGC_GATE);
1289
1290 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1291 UVD_SUVD_CGC_GATE__SIT_MASK |
1292 UVD_SUVD_CGC_GATE__SMP_MASK |
1293 UVD_SUVD_CGC_GATE__SCM_MASK |
1294 UVD_SUVD_CGC_GATE__SDB_MASK |
1295 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1296 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1297 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1298 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1299 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1300 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1301 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1302 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1303
1304 if (enable) {
1305 data3 |= (UVD_CGC_GATE__SYS_MASK |
1306 UVD_CGC_GATE__UDEC_MASK |
1307 UVD_CGC_GATE__MPEG2_MASK |
1308 UVD_CGC_GATE__RBC_MASK |
1309 UVD_CGC_GATE__LMI_MC_MASK |
1310 UVD_CGC_GATE__LMI_UMC_MASK |
1311 UVD_CGC_GATE__IDCT_MASK |
1312 UVD_CGC_GATE__MPRD_MASK |
1313 UVD_CGC_GATE__MPC_MASK |
1314 UVD_CGC_GATE__LBSI_MASK |
1315 UVD_CGC_GATE__LRBBM_MASK |
1316 UVD_CGC_GATE__UDEC_RE_MASK |
1317 UVD_CGC_GATE__UDEC_CM_MASK |
1318 UVD_CGC_GATE__UDEC_IT_MASK |
1319 UVD_CGC_GATE__UDEC_DB_MASK |
1320 UVD_CGC_GATE__UDEC_MP_MASK |
1321 UVD_CGC_GATE__WCB_MASK |
1322 UVD_CGC_GATE__JPEG_MASK |
1323 UVD_CGC_GATE__SCPU_MASK |
1324 UVD_CGC_GATE__JPEG2_MASK);
1325 /* only in pg enabled, we can gate clock to vcpu*/
1326 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1327 data3 |= UVD_CGC_GATE__VCPU_MASK;
1328
1329 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1330 } else {
1331 data3 = 0;
1332 }
1333
1334 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1335 WREG32(mmUVD_CGC_GATE, data3);
1336}
1337
1338static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1339{
1340 uint32_t data, data2;
1341
1342 data = RREG32(mmUVD_CGC_CTRL);
1343 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1344
1345
1346 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1347 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1348
1349
1350 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1351 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1352 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1353
1354 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1355 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1356 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1357 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1358 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1359 UVD_CGC_CTRL__SYS_MODE_MASK |
1360 UVD_CGC_CTRL__UDEC_MODE_MASK |
1361 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1362 UVD_CGC_CTRL__REGS_MODE_MASK |
1363 UVD_CGC_CTRL__RBC_MODE_MASK |
1364 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1365 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1366 UVD_CGC_CTRL__IDCT_MODE_MASK |
1367 UVD_CGC_CTRL__MPRD_MODE_MASK |
1368 UVD_CGC_CTRL__MPC_MODE_MASK |
1369 UVD_CGC_CTRL__LBSI_MODE_MASK |
1370 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1371 UVD_CGC_CTRL__WCB_MODE_MASK |
1372 UVD_CGC_CTRL__VCPU_MODE_MASK |
1373 UVD_CGC_CTRL__JPEG_MODE_MASK |
1374 UVD_CGC_CTRL__SCPU_MODE_MASK |
1375 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1376 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1377 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1378 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1379 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1380 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1381
1382 WREG32(mmUVD_CGC_CTRL, data);
1383 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1384}
1385
1386#if 0
1387static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1388{
1389 uint32_t data, data1, cgc_flags, suvd_flags;
1390
1391 data = RREG32(mmUVD_CGC_GATE);
1392 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1393
1394 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1395 UVD_CGC_GATE__UDEC_MASK |
1396 UVD_CGC_GATE__MPEG2_MASK |
1397 UVD_CGC_GATE__RBC_MASK |
1398 UVD_CGC_GATE__LMI_MC_MASK |
1399 UVD_CGC_GATE__IDCT_MASK |
1400 UVD_CGC_GATE__MPRD_MASK |
1401 UVD_CGC_GATE__MPC_MASK |
1402 UVD_CGC_GATE__LBSI_MASK |
1403 UVD_CGC_GATE__LRBBM_MASK |
1404 UVD_CGC_GATE__UDEC_RE_MASK |
1405 UVD_CGC_GATE__UDEC_CM_MASK |
1406 UVD_CGC_GATE__UDEC_IT_MASK |
1407 UVD_CGC_GATE__UDEC_DB_MASK |
1408 UVD_CGC_GATE__UDEC_MP_MASK |
1409 UVD_CGC_GATE__WCB_MASK |
1410 UVD_CGC_GATE__VCPU_MASK |
1411 UVD_CGC_GATE__SCPU_MASK |
1412 UVD_CGC_GATE__JPEG_MASK |
1413 UVD_CGC_GATE__JPEG2_MASK;
1414
1415 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1416 UVD_SUVD_CGC_GATE__SIT_MASK |
1417 UVD_SUVD_CGC_GATE__SMP_MASK |
1418 UVD_SUVD_CGC_GATE__SCM_MASK |
1419 UVD_SUVD_CGC_GATE__SDB_MASK;
1420
1421 data |= cgc_flags;
1422 data1 |= suvd_flags;
1423
1424 WREG32(mmUVD_CGC_GATE, data);
1425 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1426}
1427#endif
1428
1429static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1430 bool enable)
1431{
1432 u32 orig, data;
1433
1434 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1435 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1436 data |= 0xfff;
1437 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1438
1439 orig = data = RREG32(mmUVD_CGC_CTRL);
1440 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1441 if (orig != data)
1442 WREG32(mmUVD_CGC_CTRL, data);
1443 } else {
1444 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1445 data &= ~0xfff;
1446 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1447
1448 orig = data = RREG32(mmUVD_CGC_CTRL);
1449 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1450 if (orig != data)
1451 WREG32(mmUVD_CGC_CTRL, data);
1452 }
1453}
1454
1455static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1456 enum amd_clockgating_state state)
1457{
1458 struct amdgpu_device *adev = ip_block->adev;
1459 bool enable = (state == AMD_CG_STATE_GATE);
1460
1461 if (enable) {
1462 /* wait for STATUS to clear */
1463 if (uvd_v6_0_wait_for_idle(ip_block))
1464 return -EBUSY;
1465 uvd_v6_0_enable_clock_gating(adev, enable: true);
1466 /* enable HW gates because UVD is idle */
1467/* uvd_v6_0_set_hw_clock_gating(adev); */
1468 } else {
1469 /* disable HW gating and enable Sw gating */
1470 uvd_v6_0_enable_clock_gating(adev, enable: false);
1471 }
1472 uvd_v6_0_set_sw_clock_gating(adev);
1473 return 0;
1474}
1475
1476static int uvd_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1477 enum amd_powergating_state state)
1478{
1479 /* This doesn't actually powergate the UVD block.
1480 * That's done in the dpm code via the SMC. This
1481 * just re-inits the block as necessary. The actual
1482 * gating still happens in the dpm code. We should
1483 * revisit this when there is a cleaner line between
1484 * the smc and the hw blocks
1485 */
1486 struct amdgpu_device *adev = ip_block->adev;
1487 int ret = 0;
1488
1489 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1490
1491 if (state == AMD_PG_STATE_GATE) {
1492 uvd_v6_0_stop(adev);
1493 } else {
1494 ret = uvd_v6_0_start(adev);
1495 if (ret)
1496 goto out;
1497 }
1498
1499out:
1500 return ret;
1501}
1502
1503static void uvd_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1504{
1505 struct amdgpu_device *adev = ip_block->adev;
1506 int data;
1507
1508 mutex_lock(&adev->pm.mutex);
1509
1510 if (adev->flags & AMD_IS_APU)
1511 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1512 else
1513 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1514
1515 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1516 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1517 goto out;
1518 }
1519
1520 /* AMD_CG_SUPPORT_UVD_MGCG */
1521 data = RREG32(mmUVD_CGC_CTRL);
1522 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1523 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1524
1525out:
1526 mutex_unlock(lock: &adev->pm.mutex);
1527}
1528
1529static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1530 .name = "uvd_v6_0",
1531 .early_init = uvd_v6_0_early_init,
1532 .sw_init = uvd_v6_0_sw_init,
1533 .sw_fini = uvd_v6_0_sw_fini,
1534 .hw_init = uvd_v6_0_hw_init,
1535 .hw_fini = uvd_v6_0_hw_fini,
1536 .prepare_suspend = uvd_v6_0_prepare_suspend,
1537 .suspend = uvd_v6_0_suspend,
1538 .resume = uvd_v6_0_resume,
1539 .is_idle = uvd_v6_0_is_idle,
1540 .wait_for_idle = uvd_v6_0_wait_for_idle,
1541 .check_soft_reset = uvd_v6_0_check_soft_reset,
1542 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1543 .soft_reset = uvd_v6_0_soft_reset,
1544 .post_soft_reset = uvd_v6_0_post_soft_reset,
1545 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1546 .set_powergating_state = uvd_v6_0_set_powergating_state,
1547 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1548};
1549
1550static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1551 .type = AMDGPU_RING_TYPE_UVD,
1552 .align_mask = 0xf,
1553 .support_64bit_ptrs = false,
1554 .no_user_fence = true,
1555 .get_rptr = uvd_v6_0_ring_get_rptr,
1556 .get_wptr = uvd_v6_0_ring_get_wptr,
1557 .set_wptr = uvd_v6_0_ring_set_wptr,
1558 .parse_cs = amdgpu_uvd_ring_parse_cs,
1559 .emit_frame_size =
1560 6 + /* hdp invalidate */
1561 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1562 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1563 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1564 .emit_ib = uvd_v6_0_ring_emit_ib,
1565 .emit_fence = uvd_v6_0_ring_emit_fence,
1566 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1567 .test_ring = uvd_v6_0_ring_test_ring,
1568 .test_ib = amdgpu_uvd_ring_test_ib,
1569 .insert_nop = uvd_v6_0_ring_insert_nop,
1570 .pad_ib = amdgpu_ring_generic_pad_ib,
1571 .begin_use = amdgpu_uvd_ring_begin_use,
1572 .end_use = amdgpu_uvd_ring_end_use,
1573 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1574};
1575
1576static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1577 .type = AMDGPU_RING_TYPE_UVD,
1578 .align_mask = 0xf,
1579 .support_64bit_ptrs = false,
1580 .no_user_fence = true,
1581 .get_rptr = uvd_v6_0_ring_get_rptr,
1582 .get_wptr = uvd_v6_0_ring_get_wptr,
1583 .set_wptr = uvd_v6_0_ring_set_wptr,
1584 .emit_frame_size =
1585 6 + /* hdp invalidate */
1586 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1587 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1588 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1589 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1590 .emit_ib = uvd_v6_0_ring_emit_ib,
1591 .emit_fence = uvd_v6_0_ring_emit_fence,
1592 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1593 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1594 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1595 .test_ring = uvd_v6_0_ring_test_ring,
1596 .test_ib = amdgpu_uvd_ring_test_ib,
1597 .insert_nop = uvd_v6_0_ring_insert_nop,
1598 .pad_ib = amdgpu_ring_generic_pad_ib,
1599 .begin_use = amdgpu_uvd_ring_begin_use,
1600 .end_use = amdgpu_uvd_ring_end_use,
1601 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1602};
1603
1604static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1605 .type = AMDGPU_RING_TYPE_UVD_ENC,
1606 .align_mask = 0x3f,
1607 .nop = HEVC_ENC_CMD_NO_OP,
1608 .support_64bit_ptrs = false,
1609 .no_user_fence = true,
1610 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1611 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1612 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1613 .emit_frame_size =
1614 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1615 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1616 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1617 1, /* uvd_v6_0_enc_ring_insert_end */
1618 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1619 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1620 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1621 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1622 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1623 .test_ring = uvd_v6_0_enc_ring_test_ring,
1624 .test_ib = uvd_v6_0_enc_ring_test_ib,
1625 .insert_nop = amdgpu_ring_insert_nop,
1626 .insert_end = uvd_v6_0_enc_ring_insert_end,
1627 .pad_ib = amdgpu_ring_generic_pad_ib,
1628 .begin_use = amdgpu_uvd_ring_begin_use,
1629 .end_use = amdgpu_uvd_ring_end_use,
1630};
1631
1632static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1633{
1634 if (adev->asic_type >= CHIP_POLARIS10) {
1635 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1636 DRM_INFO("UVD is enabled in VM mode\n");
1637 } else {
1638 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1639 DRM_INFO("UVD is enabled in physical mode\n");
1640 }
1641}
1642
1643static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1644{
1645 int i;
1646
1647 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1648 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1649
1650 DRM_INFO("UVD ENC is enabled in VM mode\n");
1651}
1652
1653static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1654 .set = uvd_v6_0_set_interrupt_state,
1655 .process = uvd_v6_0_process_interrupt,
1656};
1657
1658static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1659{
1660 if (uvd_v6_0_enc_support(adev))
1661 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1662 else
1663 adev->uvd.inst->irq.num_types = 1;
1664
1665 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1666}
1667
1668const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1669{
1670 .type = AMD_IP_BLOCK_TYPE_UVD,
1671 .major = 6,
1672 .minor = 0,
1673 .rev = 0,
1674 .funcs = &uvd_v6_0_ip_funcs,
1675};
1676
1677const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1678{
1679 .type = AMD_IP_BLOCK_TYPE_UVD,
1680 .major = 6,
1681 .minor = 2,
1682 .rev = 0,
1683 .funcs = &uvd_v6_0_ip_funcs,
1684};
1685
1686const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1687{
1688 .type = AMD_IP_BLOCK_TYPE_UVD,
1689 .major = 6,
1690 .minor = 3,
1691 .rev = 0,
1692 .funcs = &uvd_v6_0_ip_funcs,
1693};
1694

source code of linux/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c