1/*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "amdgpu.h"
26#include "amdgpu_vcn.h"
27#include "amdgpu_pm.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "soc15_hw_ip.h"
31#include "vcn_v2_0.h"
32
33#include "vcn/vcn_5_0_0_offset.h"
34#include "vcn/vcn_5_0_0_sh_mask.h"
35#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
36#include "vcn_v5_0_0.h"
37
38#include <drm/drm_drv.h>
39
40static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
41 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
42 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
43 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
44 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
45 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
46 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
47 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
48 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
49 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
50 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
51 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
52 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
53 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
54 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
55 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
56 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
57 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
58 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
59 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
60 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
61 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
62 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
63 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
64 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
65 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
66 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
67 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
68 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
69 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
70 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
71 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
72};
73
74static int amdgpu_ih_clientid_vcns[] = {
75 SOC15_IH_CLIENTID_VCN,
76 SOC15_IH_CLIENTID_VCN1
77};
78
79static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
80static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
81static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
82 enum amd_powergating_state state);
83static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
84 struct dpg_pause_state *new_state);
85static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
86
87/**
88 * vcn_v5_0_0_early_init - set function pointers and load microcode
89 *
90 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
91 *
92 * Set ring and irq function pointers
93 * Load microcode from filesystem
94 */
95static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
96{
97 struct amdgpu_device *adev = ip_block->adev;
98 int i, r;
99
100 for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
101 /* re-use enc ring as unified ring */
102 adev->vcn.inst[i].num_enc_rings = 1;
103
104 vcn_v5_0_0_set_unified_ring_funcs(adev);
105 vcn_v5_0_0_set_irq_funcs(adev);
106
107 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
108 adev->vcn.inst[i].set_pg_state = vcn_v5_0_0_set_pg_state;
109
110 r = amdgpu_vcn_early_init(adev, i);
111 if (r)
112 return r;
113 }
114
115 return 0;
116}
117
118/**
119 * vcn_v5_0_0_sw_init - sw init for VCN block
120 *
121 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
122 *
123 * Load firmware and sw initialization
124 */
125static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
126{
127 struct amdgpu_ring *ring;
128 struct amdgpu_device *adev = ip_block->adev;
129 int i, r;
130
131 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
132 struct amdgpu_vcn5_fw_shared *fw_shared;
133
134 if (adev->vcn.harvest_config & (1 << i))
135 continue;
136
137 r = amdgpu_vcn_sw_init(adev, i);
138 if (r)
139 return r;
140
141 amdgpu_vcn_setup_ucode(adev, i);
142
143 r = amdgpu_vcn_resume(adev, i);
144 if (r)
145 return r;
146
147 atomic_set(v: &adev->vcn.inst[i].sched_score, i: 0);
148
149 /* VCN UNIFIED TRAP */
150 r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[i],
151 VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, source: &adev->vcn.inst[i].irq);
152 if (r)
153 return r;
154
155 /* VCN POISON TRAP */
156 r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[i],
157 VCN_5_0__SRCID_UVD_POISON, source: &adev->vcn.inst[i].irq);
158 if (r)
159 return r;
160
161 ring = &adev->vcn.inst[i].ring_enc[0];
162 ring->use_doorbell = true;
163 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
164
165 ring->vm_hub = AMDGPU_MMHUB0(0);
166 sprintf(buf: ring->name, fmt: "vcn_unified_%d", i);
167
168 r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->vcn.inst[i].irq, irq_type: 0,
169 hw_prio: AMDGPU_RING_PRIO_0, sched_score: &adev->vcn.inst[i].sched_score);
170 if (r)
171 return r;
172
173 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
174 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
175 fw_shared->sq.is_enabled = 1;
176
177 if (amdgpu_vcnfw_log)
178 amdgpu_vcn_fwlog_init(vcn: &adev->vcn.inst[i]);
179
180 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
181 adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
182 }
183
184 adev->vcn.supported_reset =
185 amdgpu_get_soft_full_reset_mask(ring: &adev->vcn.inst[0].ring_enc[0]);
186 if (!amdgpu_sriov_vf(adev))
187 adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
188
189 r = amdgpu_vcn_reg_dump_init(adev, reg: vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
190 if (r)
191 return r;
192
193 r = amdgpu_vcn_sysfs_reset_mask_init(adev);
194 if (r)
195 return r;
196
197 return 0;
198}
199
200/**
201 * vcn_v5_0_0_sw_fini - sw fini for VCN block
202 *
203 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
204 *
205 * VCN suspend and free up sw allocation
206 */
207static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
208{
209 struct amdgpu_device *adev = ip_block->adev;
210 int i, r, idx;
211
212 if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) {
213 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
214 struct amdgpu_vcn5_fw_shared *fw_shared;
215
216 if (adev->vcn.harvest_config & (1 << i))
217 continue;
218
219 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
220 fw_shared->present_flag_0 = 0;
221 fw_shared->sq.is_enabled = 0;
222 }
223
224 drm_dev_exit(idx);
225 }
226
227 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
228 r = amdgpu_vcn_suspend(adev, i);
229 if (r)
230 return r;
231 }
232
233 amdgpu_vcn_sysfs_reset_mask_fini(adev);
234
235 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
236 amdgpu_vcn_sw_fini(adev, i);
237
238 return 0;
239}
240
241/**
242 * vcn_v5_0_0_hw_init - start and test VCN block
243 *
244 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
245 *
246 * Initialize the hardware, boot up the VCPU and do some testing
247 */
248static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
249{
250 struct amdgpu_device *adev = ip_block->adev;
251 struct amdgpu_ring *ring;
252 int i, r;
253
254 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
255 if (adev->vcn.harvest_config & (1 << i))
256 continue;
257
258 ring = &adev->vcn.inst[i].ring_enc[0];
259
260 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
261 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
262
263 r = amdgpu_ring_test_helper(ring);
264 if (r)
265 return r;
266 }
267
268 return 0;
269}
270
271/**
272 * vcn_v5_0_0_hw_fini - stop the hardware block
273 *
274 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
275 *
276 * Stop the VCN block, mark ring as not ready any more
277 */
278static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
279{
280 struct amdgpu_device *adev = ip_block->adev;
281 int i;
282
283 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
284 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
285
286 if (adev->vcn.harvest_config & (1 << i))
287 continue;
288
289 cancel_delayed_work_sync(dwork: &vinst->idle_work);
290
291 if (!amdgpu_sriov_vf(adev)) {
292 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
293 (vinst->cur_state != AMD_PG_STATE_GATE &&
294 RREG32_SOC15(VCN, i, regUVD_STATUS))) {
295 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
296 }
297 }
298 }
299
300 return 0;
301}
302
303/**
304 * vcn_v5_0_0_suspend - suspend VCN block
305 *
306 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
307 *
308 * HW fini and suspend VCN block
309 */
310static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
311{
312 struct amdgpu_device *adev = ip_block->adev;
313 int r, i;
314
315 r = vcn_v5_0_0_hw_fini(ip_block);
316 if (r)
317 return r;
318
319 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
320 r = amdgpu_vcn_suspend(adev: ip_block->adev, i);
321 if (r)
322 return r;
323 }
324
325 return r;
326}
327
328/**
329 * vcn_v5_0_0_resume - resume VCN block
330 *
331 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
332 *
333 * Resume firmware and hw init VCN block
334 */
335static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
336{
337 struct amdgpu_device *adev = ip_block->adev;
338 int r, i;
339
340 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
341 r = amdgpu_vcn_resume(adev: ip_block->adev, i);
342 if (r)
343 return r;
344 }
345
346 r = vcn_v5_0_0_hw_init(ip_block);
347
348 return r;
349}
350
351/**
352 * vcn_v5_0_0_mc_resume - memory controller programming
353 *
354 * @vinst: VCN instance
355 *
356 * Let the VCN memory controller know it's offsets
357 */
358static void vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst *vinst)
359{
360 struct amdgpu_device *adev = vinst->adev;
361 int inst = vinst->inst;
362 uint32_t offset, size;
363 const struct common_firmware_header *hdr;
364
365 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
366 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
367
368 /* cache window 0: fw */
369 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
370 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
371 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
372 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
373 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
374 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
375 offset = 0;
376 } else {
377 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
378 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
379 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
380 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
381 offset = size;
382 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
383 }
384 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
385
386 /* cache window 1: stack */
387 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
388 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
389 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
390 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
391 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
392 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
393
394 /* cache window 2: context */
395 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
396 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
397 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
398 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
399 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
400 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
401
402 /* non-cache window */
403 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
404 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
405 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
406 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
407 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
408 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
409 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
410}
411
412/**
413 * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
414 *
415 * @vinst: VCN instance
416 * @indirect: indirectly write sram
417 *
418 * Let the VCN memory controller know it's offsets with dpg mode
419 */
420static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
421 bool indirect)
422{
423 struct amdgpu_device *adev = vinst->adev;
424 int inst_idx = vinst->inst;
425 uint32_t offset, size;
426 const struct common_firmware_header *hdr;
427
428 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
429 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
430
431 /* cache window 0: fw */
432 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
433 if (!indirect) {
434 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
435 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
436 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
437 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
438 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
439 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
440 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
441 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
442 } else {
443 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
444 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
445 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
446 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
447 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
448 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
449 }
450 offset = 0;
451 } else {
452 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
453 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
454 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
455 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
456 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
457 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
458 offset = size;
459 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
460 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
461 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
462 }
463
464 if (!indirect)
465 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
466 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
467 else
468 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
469 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
470
471 /* cache window 1: stack */
472 if (!indirect) {
473 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
474 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
475 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
476 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
477 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
478 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
479 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
480 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
481 } else {
482 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
483 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
484 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
485 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
486 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
487 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
488 }
489 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
490 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
491
492 /* cache window 2: context */
493 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
494 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
495 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
496 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
497 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
498 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
499 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
500 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
501 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
502 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
503
504 /* non-cache window */
505 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
506 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
507 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
508 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
509 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
510 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
511 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
512 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
513 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
514 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
515 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
516
517 /* VCN global tiling registers */
518 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
519 VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
520 adev->gfx.config.gb_addr_config, 0, indirect);
521
522 return;
523}
524
525/**
526 * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
527 *
528 * @vinst: VCN instance
529 *
530 * Disable static power gating for VCN block
531 */
532static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
533{
534 struct amdgpu_device *adev = vinst->adev;
535 int inst = vinst->inst;
536 uint32_t data = 0;
537
538 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
539 data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
540 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
541 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
542 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
543
544 data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
545 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
546 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
547 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
548 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
549
550 data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
551 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
552 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
553 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
554 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
555
556 data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
557 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
558 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
559 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
560 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
561 } else {
562 data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
563 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
564 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
565 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
566
567 data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
568 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
569 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
570 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
571
572 data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
573 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
574 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
575 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
576
577 data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
578 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
579 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
580 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
581 }
582
583 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
584 data &= ~0x103;
585 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
586 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
587 UVD_POWER_STATUS__UVD_PG_EN_MASK;
588
589 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
590 return;
591}
592
593/**
594 * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
595 *
596 * @vinst: VCN instance
597 *
598 * Enable static power gating for VCN block
599 */
600static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
601{
602 struct amdgpu_device *adev = vinst->adev;
603 int inst = vinst->inst;
604 uint32_t data;
605
606 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
607 /* Before power off, this indicator has to be turned on */
608 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
609 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
610 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
611 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
612
613 data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
614 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
615 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
616 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
617 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
618
619 data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
620 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
621 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
622 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
623 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
624
625 data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
626 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
627 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
628 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
629 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
630
631 data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
632 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
633 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
634 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
635 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
636 }
637 return;
638}
639
640/**
641 * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
642 *
643 * @vinst: VCN instance
644 *
645 * Disable clock gating for VCN block
646 */
647static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
648{
649 return;
650}
651
652#if 0
653/**
654 * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
655 *
656 * @vinst: VCN instance
657 * @sram_sel: sram select
658 * @indirect: indirectly write sram
659 *
660 * Disable clock gating for VCN block with dpg mode
661 */
662static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
663 uint8_t sram_sel,
664 uint8_t indirect)
665{
666 return;
667}
668#endif
669
670/**
671 * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
672 *
673 * @vinst: VCN instance
674 *
675 * Enable clock gating for VCN block
676 */
677static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
678{
679 return;
680}
681
682/**
683 * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
684 *
685 * @vinst: VCN instance
686 * @indirect: indirectly write sram
687 *
688 * Start VCN block with dpg mode
689 */
690static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
691 bool indirect)
692{
693 struct amdgpu_device *adev = vinst->adev;
694 int inst_idx = vinst->inst;
695 struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
696 struct amdgpu_ring *ring;
697 uint32_t tmp;
698 int ret;
699
700 /* disable register anti-hang mechanism */
701 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
702 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
703
704 /* enable dynamic power gating mode */
705 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
706 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
707 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
708 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
709
710 if (indirect)
711 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
712
713 /* enable VCPU clock */
714 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
715 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
716 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
717 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
718
719 /* disable master interrupt */
720 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
721 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
722
723 /* setup regUVD_LMI_CTRL */
724 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
725 UVD_LMI_CTRL__REQ_MODE_MASK |
726 UVD_LMI_CTRL__CRC_RESET_MASK |
727 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
728 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
729 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
730 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
731 0x00100000L);
732 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
733 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
734
735 vcn_v5_0_0_mc_resume_dpg_mode(vinst, indirect);
736
737 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
738 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
739 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
740 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
741
742 /* enable LMI MC and UMC channels */
743 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
744 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
745 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
746
747 /* enable master interrupt */
748 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
749 VCN, inst_idx, regUVD_MASTINT_EN),
750 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
751
752 if (indirect) {
753 ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, ucode_id: 0);
754 if (ret) {
755 dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
756 return ret;
757 }
758 }
759
760 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
761
762 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
763 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
764 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
765
766 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
767 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
768 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
769 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
770 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
771 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
772
773 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
774 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
775 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
776
777 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
778 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
779 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
780 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
781
782 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
783 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
784 VCN_RB1_DB_CTRL__EN_MASK);
785
786 /* Keeping one read-back to ensure all register writes are done,
787 * otherwise it may introduce race conditions.
788 */
789 RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
790
791 return 0;
792}
793
794/**
795 * vcn_v5_0_0_start - VCN start
796 *
797 * @vinst: VCN instance
798 *
799 * Start VCN block
800 */
801static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
802{
803 struct amdgpu_device *adev = vinst->adev;
804 int i = vinst->inst;
805 struct amdgpu_vcn5_fw_shared *fw_shared;
806 struct amdgpu_ring *ring;
807 uint32_t tmp;
808 int j, k, r;
809
810 if (adev->vcn.harvest_config & (1 << i))
811 return 0;
812
813 if (adev->pm.dpm_enabled)
814 amdgpu_dpm_enable_vcn(adev, enable: true, inst: i);
815
816 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
817
818 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
819 return vcn_v5_0_0_start_dpg_mode(vinst, indirect: adev->vcn.inst[i].indirect_sram);
820
821 /* disable VCN power gating */
822 vcn_v5_0_0_disable_static_power_gating(vinst);
823
824 /* set VCN status busy */
825 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
826 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
827
828 /* enable VCPU clock */
829 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
830 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
831
832 /* disable master interrupt */
833 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
834 ~UVD_MASTINT_EN__VCPU_EN_MASK);
835
836 /* enable LMI MC and UMC channels */
837 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
838 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
839
840 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
841 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
842 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
843 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
844
845 /* setup regUVD_LMI_CTRL */
846 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
847 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
848 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
849 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
850 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
851 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
852
853 vcn_v5_0_0_mc_resume(vinst);
854
855 /* VCN global tiling registers */
856 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
857 adev->gfx.config.gb_addr_config);
858
859 /* unblock VCPU register access */
860 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
861 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
862
863 /* release VCPU reset to boot */
864 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
865 ~UVD_VCPU_CNTL__BLK_RST_MASK);
866
867 for (j = 0; j < 10; ++j) {
868 uint32_t status;
869
870 for (k = 0; k < 100; ++k) {
871 status = RREG32_SOC15(VCN, i, regUVD_STATUS);
872 if (status & 2)
873 break;
874 mdelay(10);
875 if (amdgpu_emu_mode == 1)
876 msleep(msecs: 1);
877 }
878
879 if (amdgpu_emu_mode == 1) {
880 r = -1;
881 if (status & 2) {
882 r = 0;
883 break;
884 }
885 } else {
886 r = 0;
887 if (status & 2)
888 break;
889
890 dev_err(adev->dev,
891 "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
892 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
893 UVD_VCPU_CNTL__BLK_RST_MASK,
894 ~UVD_VCPU_CNTL__BLK_RST_MASK);
895 mdelay(10);
896 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
897 ~UVD_VCPU_CNTL__BLK_RST_MASK);
898
899 mdelay(10);
900 r = -1;
901 }
902 }
903
904 if (r) {
905 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
906 return r;
907 }
908
909 /* enable master interrupt */
910 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
911 UVD_MASTINT_EN__VCPU_EN_MASK,
912 ~UVD_MASTINT_EN__VCPU_EN_MASK);
913
914 /* clear the busy bit of VCN_STATUS */
915 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
916 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
917
918 ring = &adev->vcn.inst[i].ring_enc[0];
919 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
920 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
921 VCN_RB1_DB_CTRL__EN_MASK);
922
923 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
924 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
925 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
926
927 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
928 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
929 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
930 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
931 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
932 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
933
934 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
935 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
936 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
937
938 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
939 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
940 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
941 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
942
943 /* Keeping one read-back to ensure all register writes are done,
944 * otherwise it may introduce race conditions.
945 */
946 RREG32_SOC15(VCN, i, regUVD_STATUS);
947
948 return 0;
949}
950
951/**
952 * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
953 *
954 * @vinst: VCN instance
955 *
956 * Stop VCN block with dpg mode
957 */
958static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
959{
960 struct amdgpu_device *adev = vinst->adev;
961 int inst_idx = vinst->inst;
962 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
963 uint32_t tmp;
964
965 vcn_v5_0_0_pause_dpg_mode(vinst, new_state: &state);
966
967 /* Wait for power status to be 1 */
968 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
969 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
970
971 /* wait for read ptr to be equal to write ptr */
972 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
973 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
974
975 /* disable dynamic power gating mode */
976 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
977 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
978
979 /* Keeping one read-back to ensure all register writes are done,
980 * otherwise it may introduce race conditions.
981 */
982 RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
983
984 return;
985}
986
987/**
988 * vcn_v5_0_0_stop - VCN stop
989 *
990 * @vinst: VCN instance
991 *
992 * Stop VCN block
993 */
994static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
995{
996 struct amdgpu_device *adev = vinst->adev;
997 int i = vinst->inst;
998 struct amdgpu_vcn5_fw_shared *fw_shared;
999 uint32_t tmp;
1000 int r = 0;
1001
1002 if (adev->vcn.harvest_config & (1 << i))
1003 return 0;
1004
1005 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1006 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1007
1008 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1009 vcn_v5_0_0_stop_dpg_mode(vinst);
1010 r = 0;
1011 goto done;
1012 }
1013
1014 /* wait for vcn idle */
1015 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1016 if (r)
1017 goto done;
1018
1019 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1020 UVD_LMI_STATUS__READ_CLEAN_MASK |
1021 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1022 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1023 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1024 if (r)
1025 goto done;
1026
1027 /* disable LMI UMC channel */
1028 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1029 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1030 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1031 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1032 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1033 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1034 if (r)
1035 goto done;
1036
1037 /* block VCPU register access */
1038 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1039 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1040 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1041
1042 /* reset VCPU */
1043 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1044 UVD_VCPU_CNTL__BLK_RST_MASK,
1045 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1046
1047 /* disable VCPU clock */
1048 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1049 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1050
1051 /* apply soft reset */
1052 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1053 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1054 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1055 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1056 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1057 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1058
1059 /* clear status */
1060 WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1061
1062 /* enable VCN power gating */
1063 vcn_v5_0_0_enable_static_power_gating(vinst);
1064
1065 /* Keeping one read-back to ensure all register writes are done,
1066 * otherwise it may introduce race conditions.
1067 */
1068 RREG32_SOC15(VCN, i, regUVD_STATUS);
1069
1070done:
1071 if (adev->pm.dpm_enabled)
1072 amdgpu_dpm_enable_vcn(adev, enable: false, inst: i);
1073
1074 return r;
1075}
1076
1077/**
1078 * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
1079 *
1080 * @vinst: VCN instance
1081 * @new_state: pause state
1082 *
1083 * Pause dpg mode for VCN block
1084 */
1085static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1086 struct dpg_pause_state *new_state)
1087{
1088 struct amdgpu_device *adev = vinst->adev;
1089 int inst_idx = vinst->inst;
1090 uint32_t reg_data = 0;
1091 int ret_code;
1092
1093 /* pause/unpause if state is changed */
1094 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1095 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1096 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1097 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1098 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1099
1100 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1101 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1102 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1103
1104 if (!ret_code) {
1105 /* pause DPG */
1106 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1107 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1108
1109 /* wait for ACK */
1110 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1111 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1112 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1113 }
1114 } else {
1115 /* unpause dpg, no need to wait */
1116 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1117 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1118 }
1119 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1120 }
1121
1122 return 0;
1123}
1124
1125/**
1126 * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
1127 *
1128 * @ring: amdgpu_ring pointer
1129 *
1130 * Returns the current hardware unified read pointer
1131 */
1132static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1133{
1134 struct amdgpu_device *adev = ring->adev;
1135
1136 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1137 DRM_ERROR("wrong ring id is identified in %s", __func__);
1138
1139 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1140}
1141
1142/**
1143 * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
1144 *
1145 * @ring: amdgpu_ring pointer
1146 *
1147 * Returns the current hardware unified write pointer
1148 */
1149static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1150{
1151 struct amdgpu_device *adev = ring->adev;
1152
1153 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1154 DRM_ERROR("wrong ring id is identified in %s", __func__);
1155
1156 if (ring->use_doorbell)
1157 return *ring->wptr_cpu_addr;
1158 else
1159 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1160}
1161
1162/**
1163 * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
1164 *
1165 * @ring: amdgpu_ring pointer
1166 *
1167 * Commits the enc write pointer to the hardware
1168 */
1169static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1170{
1171 struct amdgpu_device *adev = ring->adev;
1172
1173 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1174 DRM_ERROR("wrong ring id is identified in %s", __func__);
1175
1176 if (ring->use_doorbell) {
1177 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1178 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1179 } else {
1180 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1181 }
1182}
1183
1184static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
1185 unsigned int vmid,
1186 struct amdgpu_fence *timedout_fence)
1187{
1188 struct amdgpu_device *adev = ring->adev;
1189 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
1190 int r;
1191
1192 amdgpu_ring_reset_helper_begin(ring, guilty_fence: timedout_fence);
1193 r = vcn_v5_0_0_stop(vinst);
1194 if (r)
1195 return r;
1196 r = vcn_v5_0_0_start(vinst);
1197 if (r)
1198 return r;
1199 return amdgpu_ring_reset_helper_end(ring, guilty_fence: timedout_fence);
1200}
1201
1202static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
1203 .type = AMDGPU_RING_TYPE_VCN_ENC,
1204 .align_mask = 0x3f,
1205 .nop = VCN_ENC_CMD_NO_OP,
1206 .get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
1207 .get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
1208 .set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
1209 .emit_frame_size =
1210 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1211 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1212 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1213 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1214 1, /* vcn_v2_0_enc_ring_insert_end */
1215 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1216 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1217 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1218 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1219 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1220 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1221 .insert_nop = amdgpu_ring_insert_nop,
1222 .insert_end = vcn_v2_0_enc_ring_insert_end,
1223 .pad_ib = amdgpu_ring_generic_pad_ib,
1224 .begin_use = amdgpu_vcn_ring_begin_use,
1225 .end_use = amdgpu_vcn_ring_end_use,
1226 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1227 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1228 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1229 .reset = vcn_v5_0_0_ring_reset,
1230};
1231
1232/**
1233 * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
1234 *
1235 * @adev: amdgpu_device pointer
1236 *
1237 * Set unified ring functions
1238 */
1239static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1240{
1241 int i;
1242
1243 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1244 if (adev->vcn.harvest_config & (1 << i))
1245 continue;
1246
1247 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
1248 adev->vcn.inst[i].ring_enc[0].me = i;
1249 }
1250}
1251
1252/**
1253 * vcn_v5_0_0_is_idle - check VCN block is idle
1254 *
1255 * @ip_block: Pointer to the amdgpu_ip_block structure
1256 *
1257 * Check whether VCN block is idle
1258 */
1259static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
1260{
1261 struct amdgpu_device *adev = ip_block->adev;
1262 int i, ret = 1;
1263
1264 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1265 if (adev->vcn.harvest_config & (1 << i))
1266 continue;
1267
1268 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1269 }
1270
1271 return ret;
1272}
1273
1274/**
1275 * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
1276 *
1277 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1278 *
1279 * Wait for VCN block idle
1280 */
1281static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1282{
1283 struct amdgpu_device *adev = ip_block->adev;
1284 int i, ret = 0;
1285
1286 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1287 if (adev->vcn.harvest_config & (1 << i))
1288 continue;
1289
1290 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1291 UVD_STATUS__IDLE);
1292 if (ret)
1293 return ret;
1294 }
1295
1296 return ret;
1297}
1298
1299/**
1300 * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
1301 *
1302 * @ip_block: amdgpu_ip_block pointer
1303 * @state: clock gating state
1304 *
1305 * Set VCN block clockgating state
1306 */
1307static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1308 enum amd_clockgating_state state)
1309{
1310 struct amdgpu_device *adev = ip_block->adev;
1311 bool enable = state == AMD_CG_STATE_GATE;
1312 int i;
1313
1314 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1315 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1316
1317 if (adev->vcn.harvest_config & (1 << i))
1318 continue;
1319
1320 if (enable) {
1321 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1322 return -EBUSY;
1323 vcn_v5_0_0_enable_clock_gating(vinst);
1324 } else {
1325 vcn_v5_0_0_disable_clock_gating(vinst);
1326 }
1327 }
1328
1329 return 0;
1330}
1331
1332static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
1333 enum amd_powergating_state state)
1334{
1335 int ret = 0;
1336
1337 if (state == vinst->cur_state)
1338 return 0;
1339
1340 if (state == AMD_PG_STATE_GATE)
1341 ret = vcn_v5_0_0_stop(vinst);
1342 else
1343 ret = vcn_v5_0_0_start(vinst);
1344
1345 if (!ret)
1346 vinst->cur_state = state;
1347
1348 return ret;
1349}
1350
1351/**
1352 * vcn_v5_0_0_process_interrupt - process VCN block interrupt
1353 *
1354 * @adev: amdgpu_device pointer
1355 * @source: interrupt sources
1356 * @entry: interrupt entry from clients and sources
1357 *
1358 * Process VCN block interrupt
1359 */
1360static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1361 struct amdgpu_iv_entry *entry)
1362{
1363 uint32_t ip_instance;
1364
1365 switch (entry->client_id) {
1366 case SOC15_IH_CLIENTID_VCN:
1367 ip_instance = 0;
1368 break;
1369 case SOC15_IH_CLIENTID_VCN1:
1370 ip_instance = 1;
1371 break;
1372 default:
1373 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1374 return 0;
1375 }
1376
1377 DRM_DEBUG("IH: VCN TRAP\n");
1378
1379 switch (entry->src_id) {
1380 case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1381 amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_enc[0]);
1382 break;
1383 case VCN_5_0__SRCID_UVD_POISON:
1384 amdgpu_vcn_process_poison_irq(adev, source, entry);
1385 break;
1386 default:
1387 DRM_ERROR("Unhandled interrupt: %d %d\n",
1388 entry->src_id, entry->src_data[0]);
1389 break;
1390 }
1391
1392 return 0;
1393}
1394
1395static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
1396 .process = vcn_v5_0_0_process_interrupt,
1397};
1398
1399/**
1400 * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
1401 *
1402 * @adev: amdgpu_device pointer
1403 *
1404 * Set VCN block interrupt irq functions
1405 */
1406static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
1407{
1408 int i;
1409
1410 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1411 if (adev->vcn.harvest_config & (1 << i))
1412 continue;
1413
1414 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
1415 adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
1416 }
1417}
1418
1419static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
1420 .name = "vcn_v5_0_0",
1421 .early_init = vcn_v5_0_0_early_init,
1422 .sw_init = vcn_v5_0_0_sw_init,
1423 .sw_fini = vcn_v5_0_0_sw_fini,
1424 .hw_init = vcn_v5_0_0_hw_init,
1425 .hw_fini = vcn_v5_0_0_hw_fini,
1426 .suspend = vcn_v5_0_0_suspend,
1427 .resume = vcn_v5_0_0_resume,
1428 .is_idle = vcn_v5_0_0_is_idle,
1429 .wait_for_idle = vcn_v5_0_0_wait_for_idle,
1430 .set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
1431 .set_powergating_state = vcn_set_powergating_state,
1432 .dump_ip_state = amdgpu_vcn_dump_ip_state,
1433 .print_ip_state = amdgpu_vcn_print_ip_state,
1434};
1435
1436const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
1437 .type = AMD_IP_BLOCK_TYPE_VCN,
1438 .major = 5,
1439 .minor = 0,
1440 .rev = 0,
1441 .funcs = &vcn_v5_0_0_ip_funcs,
1442};
1443

source code of linux/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c