1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_drv.h>
26
27#include "amdgpu.h"
28#include "amdgpu_vcn.h"
29#include "amdgpu_pm.h"
30#include "soc15.h"
31#include "soc15d.h"
32#include "vcn_v2_0.h"
33#include "mmsch_v1_0.h"
34#include "vcn_v2_5.h"
35
36#include "vcn/vcn_2_5_offset.h"
37#include "vcn/vcn_2_5_sh_mask.h"
38#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39
40#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
41#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
42#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
43
44#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
45#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
46#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
47#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
48#define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
49#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
50#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
51
52#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
53#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
54#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
55#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
56
57#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
58
59static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
60 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
61 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
62 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
63 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
64 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
82 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
83 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
84 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
85 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
86 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
87 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
88 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
89 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
90 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
91 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
92 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
93 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
94};
95
96static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
97static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
98static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
99static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
100 enum amd_powergating_state state);
101static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
102 struct dpg_pause_state *new_state);
103static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
104static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
105static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst);
106
107static int amdgpu_ih_clientid_vcns[] = {
108 SOC15_IH_CLIENTID_VCN,
109 SOC15_IH_CLIENTID_VCN1
110};
111
112static void vcn_v2_5_idle_work_handler(struct work_struct *work)
113{
114 struct amdgpu_vcn_inst *vcn_inst =
115 container_of(work, struct amdgpu_vcn_inst, idle_work.work);
116 struct amdgpu_device *adev = vcn_inst->adev;
117 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
118 unsigned int i, j;
119
120 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
121 struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
122
123 if (adev->vcn.harvest_config & (1 << i))
124 continue;
125
126 for (j = 0; j < v->num_enc_rings; ++j)
127 fence[i] += amdgpu_fence_count_emitted(ring: &v->ring_enc[j]);
128
129 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
130 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
131 !v->using_unified_queue) {
132 struct dpg_pause_state new_state;
133
134 if (fence[i] ||
135 unlikely(atomic_read(&v->dpg_enc_submission_cnt)))
136 new_state.fw_based = VCN_DPG_STATE__PAUSE;
137 else
138 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
139
140 v->pause_dpg_mode(v, &new_state);
141 }
142
143 fence[i] += amdgpu_fence_count_emitted(ring: &v->ring_dec);
144 fences += fence[i];
145
146 }
147
148 if (!fences && !atomic_read(v: &adev->vcn.inst[0].total_submission_cnt)) {
149 amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_VCN,
150 state: AMD_PG_STATE_GATE);
151 amdgpu_vcn_put_profile(adev);
152 } else {
153 schedule_delayed_work(dwork: &adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
154 }
155}
156
157static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
158{
159 struct amdgpu_device *adev = ring->adev;
160 struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
161
162 atomic_inc(v: &adev->vcn.inst[0].total_submission_cnt);
163
164 cancel_delayed_work_sync(dwork: &adev->vcn.inst[0].idle_work);
165
166 /* We can safely return early here because we've cancelled the
167 * the delayed work so there is no one else to set it to false
168 * and we don't care if someone else sets it to true.
169 */
170 mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
171 amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_VCN,
172 state: AMD_PG_STATE_UNGATE);
173
174 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
175 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
176 !v->using_unified_queue) {
177 struct dpg_pause_state new_state;
178
179 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
180 atomic_inc(v: &v->dpg_enc_submission_cnt);
181 new_state.fw_based = VCN_DPG_STATE__PAUSE;
182 } else {
183 unsigned int fences = 0;
184 unsigned int i;
185
186 for (i = 0; i < v->num_enc_rings; ++i)
187 fences += amdgpu_fence_count_emitted(ring: &v->ring_enc[i]);
188
189 if (fences || atomic_read(v: &v->dpg_enc_submission_cnt))
190 new_state.fw_based = VCN_DPG_STATE__PAUSE;
191 else
192 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
193 }
194 v->pause_dpg_mode(v, &new_state);
195 }
196 mutex_unlock(lock: &adev->vcn.inst[0].vcn_pg_lock);
197 amdgpu_vcn_get_profile(adev);
198}
199
200static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
201{
202 struct amdgpu_device *adev = ring->adev;
203
204 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
205 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
206 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
207 !adev->vcn.inst[ring->me].using_unified_queue)
208 atomic_dec(v: &adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
209
210 atomic_dec(v: &adev->vcn.inst[0].total_submission_cnt);
211
212 schedule_delayed_work(dwork: &adev->vcn.inst[0].idle_work,
213 VCN_IDLE_TIMEOUT);
214}
215
216/**
217 * vcn_v2_5_early_init - set function pointers and load microcode
218 *
219 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
220 *
221 * Set ring and irq function pointers
222 * Load microcode from filesystem
223 */
224static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
225{
226 struct amdgpu_device *adev = ip_block->adev;
227 int i, r;
228
229 if (amdgpu_sriov_vf(adev)) {
230 adev->vcn.num_vcn_inst = 2;
231 adev->vcn.harvest_config = 0;
232 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
233 adev->vcn.inst[i].num_enc_rings = 1;
234 } else {
235 u32 harvest;
236 int i;
237
238 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
239 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
240 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
241 adev->vcn.harvest_config |= 1 << i;
242 adev->vcn.inst[i].num_enc_rings = 2;
243 }
244 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
245 AMDGPU_VCN_HARVEST_VCN1))
246 /* both instances are harvested, disable the block */
247 return -ENOENT;
248 }
249
250 vcn_v2_5_set_dec_ring_funcs(adev);
251 vcn_v2_5_set_enc_ring_funcs(adev);
252 vcn_v2_5_set_irq_funcs(adev);
253 vcn_v2_5_set_ras_funcs(adev);
254
255 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
256 adev->vcn.inst[i].set_pg_state = vcn_v2_5_set_pg_state;
257
258 r = amdgpu_vcn_early_init(adev, i);
259 if (r)
260 return r;
261 }
262
263 return 0;
264}
265
266/**
267 * vcn_v2_5_sw_init - sw init for VCN block
268 *
269 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
270 *
271 * Load firmware and sw initialization
272 */
273static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
274{
275 struct amdgpu_ring *ring;
276 int i, j, r;
277 struct amdgpu_device *adev = ip_block->adev;
278
279 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
280 struct amdgpu_fw_shared *fw_shared;
281
282 if (adev->vcn.harvest_config & (1 << j))
283 continue;
284 /* VCN DEC TRAP */
285 r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[j],
286 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, source: &adev->vcn.inst[j].irq);
287 if (r)
288 return r;
289
290 /* VCN ENC TRAP */
291 for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
292 r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[j],
293 src_id: i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, source: &adev->vcn.inst[j].irq);
294 if (r)
295 return r;
296 }
297
298 /* VCN POISON TRAP */
299 r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[j],
300 VCN_2_6__SRCID_UVD_POISON, source: &adev->vcn.inst[j].ras_poison_irq);
301 if (r)
302 return r;
303
304 r = amdgpu_vcn_sw_init(adev, i: j);
305 if (r)
306 return r;
307
308 /* Override the work func */
309 adev->vcn.inst[j].idle_work.work.func = vcn_v2_5_idle_work_handler;
310
311 amdgpu_vcn_setup_ucode(adev, i: j);
312
313 r = amdgpu_vcn_resume(adev, i: j);
314 if (r)
315 return r;
316
317 adev->vcn.inst[j].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
318 adev->vcn.inst[j].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
319 adev->vcn.inst[j].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
320 adev->vcn.inst[j].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
321 adev->vcn.inst[j].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
322 adev->vcn.inst[j].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
323
324 adev->vcn.inst[j].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
325 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
326 adev->vcn.inst[j].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
327 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
328 adev->vcn.inst[j].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
329 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
330 adev->vcn.inst[j].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
331 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
332 adev->vcn.inst[j].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
333 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
334
335 ring = &adev->vcn.inst[j].ring_dec;
336 ring->use_doorbell = true;
337
338 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
339 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
340
341 if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) == IP_VERSION(2, 5, 0))
342 ring->vm_hub = AMDGPU_MMHUB1(0);
343 else
344 ring->vm_hub = AMDGPU_MMHUB0(0);
345
346 sprintf(buf: ring->name, fmt: "vcn_dec_%d", j);
347 r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->vcn.inst[j].irq,
348 irq_type: 0, hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL);
349 if (r)
350 return r;
351
352 for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
353 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(ring: i);
354
355 ring = &adev->vcn.inst[j].ring_enc[i];
356 ring->use_doorbell = true;
357
358 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
359 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
360
361 if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) ==
362 IP_VERSION(2, 5, 0))
363 ring->vm_hub = AMDGPU_MMHUB1(0);
364 else
365 ring->vm_hub = AMDGPU_MMHUB0(0);
366
367 sprintf(buf: ring->name, fmt: "vcn_enc_%d.%d", j, i);
368 r = amdgpu_ring_init(adev, ring, max_dw: 512,
369 irq_src: &adev->vcn.inst[j].irq, irq_type: 0,
370 hw_prio, NULL);
371 if (r)
372 return r;
373 }
374
375 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
376 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
377
378 if (amdgpu_vcnfw_log)
379 amdgpu_vcn_fwlog_init(vcn: &adev->vcn.inst[i]);
380
381 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
382 adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
383 adev->vcn.inst[j].reset = vcn_v2_5_reset;
384 }
385
386 adev->vcn.supported_reset =
387 amdgpu_get_soft_full_reset_mask(ring: &adev->vcn.inst[0].ring_enc[0]);
388 if (!amdgpu_sriov_vf(adev))
389 adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
390
391 if (amdgpu_sriov_vf(adev)) {
392 r = amdgpu_virt_alloc_mm_table(adev);
393 if (r)
394 return r;
395 }
396
397 r = amdgpu_vcn_ras_sw_init(adev);
398 if (r)
399 return r;
400
401 r = amdgpu_vcn_reg_dump_init(adev, reg: vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5));
402 if (r)
403 return r;
404
405 r = amdgpu_vcn_sysfs_reset_mask_init(adev);
406 if (r)
407 return r;
408
409 return 0;
410}
411
412/**
413 * vcn_v2_5_sw_fini - sw fini for VCN block
414 *
415 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
416 *
417 * VCN suspend and free up sw allocation
418 */
419static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
420{
421 int i, r, idx;
422 struct amdgpu_device *adev = ip_block->adev;
423 struct amdgpu_fw_shared *fw_shared;
424
425 if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) {
426 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
427 if (adev->vcn.harvest_config & (1 << i))
428 continue;
429 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
430 fw_shared->present_flag_0 = 0;
431 }
432 drm_dev_exit(idx);
433 }
434
435
436 if (amdgpu_sriov_vf(adev))
437 amdgpu_virt_free_mm_table(adev);
438
439 amdgpu_vcn_sysfs_reset_mask_fini(adev);
440
441 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
442 r = amdgpu_vcn_suspend(adev, i);
443 if (r)
444 return r;
445 amdgpu_vcn_sw_fini(adev, i);
446 }
447
448 return 0;
449}
450
451/**
452 * vcn_v2_5_hw_init - start and test VCN block
453 *
454 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
455 *
456 * Initialize the hardware, boot up the VCPU and do some testing
457 */
458static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
459{
460 struct amdgpu_device *adev = ip_block->adev;
461 struct amdgpu_ring *ring;
462 int i, j, r = 0;
463
464 if (amdgpu_sriov_vf(adev))
465 r = vcn_v2_5_sriov_start(adev);
466
467 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
468 if (adev->vcn.harvest_config & (1 << j))
469 continue;
470
471 if (amdgpu_sriov_vf(adev)) {
472 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
473 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
474 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
475 adev->vcn.inst[j].ring_dec.sched.ready = true;
476 } else {
477
478 ring = &adev->vcn.inst[j].ring_dec;
479
480 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
481 ring->doorbell_index, j);
482
483 r = amdgpu_ring_test_helper(ring);
484 if (r)
485 return r;
486
487 for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
488 ring = &adev->vcn.inst[j].ring_enc[i];
489 r = amdgpu_ring_test_helper(ring);
490 if (r)
491 return r;
492 }
493 }
494 }
495
496 return r;
497}
498
499/**
500 * vcn_v2_5_hw_fini - stop the hardware block
501 *
502 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
503 *
504 * Stop the VCN block, mark ring as not ready any more
505 */
506static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
507{
508 struct amdgpu_device *adev = ip_block->adev;
509 int i;
510
511 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
512 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
513
514 if (adev->vcn.harvest_config & (1 << i))
515 continue;
516
517 cancel_delayed_work_sync(dwork: &vinst->idle_work);
518
519 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
520 (vinst->cur_state != AMD_PG_STATE_GATE &&
521 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
522 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
523
524 if (amdgpu_ras_is_supported(adev, block: AMDGPU_RAS_BLOCK__VCN))
525 amdgpu_irq_put(adev, src: &vinst->ras_poison_irq, type: 0);
526 }
527
528 return 0;
529}
530
531/**
532 * vcn_v2_5_suspend - suspend VCN block
533 *
534 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
535 *
536 * HW fini and suspend VCN block
537 */
538static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
539{
540 struct amdgpu_device *adev = ip_block->adev;
541 int r, i;
542
543 r = vcn_v2_5_hw_fini(ip_block);
544 if (r)
545 return r;
546
547 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
548 r = amdgpu_vcn_suspend(adev: ip_block->adev, i);
549 if (r)
550 return r;
551 }
552
553 return 0;
554}
555
556/**
557 * vcn_v2_5_resume - resume VCN block
558 *
559 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
560 *
561 * Resume firmware and hw init VCN block
562 */
563static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
564{
565 struct amdgpu_device *adev = ip_block->adev;
566 int r, i;
567
568 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
569 r = amdgpu_vcn_resume(adev: ip_block->adev, i);
570 if (r)
571 return r;
572 }
573
574 r = vcn_v2_5_hw_init(ip_block);
575
576 return r;
577}
578
579/**
580 * vcn_v2_5_mc_resume - memory controller programming
581 *
582 * @vinst: VCN instance
583 *
584 * Let the VCN memory controller know it's offsets
585 */
586static void vcn_v2_5_mc_resume(struct amdgpu_vcn_inst *vinst)
587{
588 struct amdgpu_device *adev = vinst->adev;
589 int i = vinst->inst;
590 uint32_t size;
591 uint32_t offset;
592
593 if (adev->vcn.harvest_config & (1 << i))
594 return;
595
596 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
597 /* cache window 0: fw */
598 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
599 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
600 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
601 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
602 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
603 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
604 offset = 0;
605 } else {
606 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
607 lower_32_bits(adev->vcn.inst[i].gpu_addr));
608 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
609 upper_32_bits(adev->vcn.inst[i].gpu_addr));
610 offset = size;
611 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
612 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
613 }
614 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
615
616 /* cache window 1: stack */
617 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
618 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
619 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
620 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
621 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
622 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
623
624 /* cache window 2: context */
625 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
626 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
627 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
628 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
629 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
630 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
631
632 /* non-cache window */
633 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
634 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
635 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
636 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
637 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
638 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
639 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
640}
641
642static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
643 bool indirect)
644{
645 struct amdgpu_device *adev = vinst->adev;
646 int inst_idx = vinst->inst;
647 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
648 uint32_t offset;
649
650 /* cache window 0: fw */
651 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
652 if (!indirect) {
653 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
654 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
655 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
656 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
657 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
658 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
659 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
660 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
661 } else {
662 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
663 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
664 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
665 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
666 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
667 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
668 }
669 offset = 0;
670 } else {
671 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
672 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
673 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
674 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
675 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
676 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
677 offset = size;
678 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
679 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
680 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
681 }
682
683 if (!indirect)
684 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
685 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
686 else
687 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
688 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
689
690 /* cache window 1: stack */
691 if (!indirect) {
692 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
693 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
694 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
695 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
696 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
697 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
698 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
699 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
700 } else {
701 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
702 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
703 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
704 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
705 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
706 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
707 }
708 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
709 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
710
711 /* cache window 2: context */
712 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
713 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
714 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
715 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
716 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
717 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
718 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
719 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
720 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
721 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
722
723 /* non-cache window */
724 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
725 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
726 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
727 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
728 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
729 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
730 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
731 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
732 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
733 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
734 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
735
736 /* VCN global tiling registers */
737 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
738 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
739}
740
741/**
742 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
743 *
744 * @vinst: VCN instance
745 *
746 * Disable clock gating for VCN block
747 */
748static void vcn_v2_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
749{
750 struct amdgpu_device *adev = vinst->adev;
751 int i = vinst->inst;
752 uint32_t data;
753
754 if (adev->vcn.harvest_config & (1 << i))
755 return;
756 /* UVD disable CGC */
757 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
758 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
759 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
760 else
761 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
762 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
763 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
764 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
765
766 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
767 data &= ~(UVD_CGC_GATE__SYS_MASK
768 | UVD_CGC_GATE__UDEC_MASK
769 | UVD_CGC_GATE__MPEG2_MASK
770 | UVD_CGC_GATE__REGS_MASK
771 | UVD_CGC_GATE__RBC_MASK
772 | UVD_CGC_GATE__LMI_MC_MASK
773 | UVD_CGC_GATE__LMI_UMC_MASK
774 | UVD_CGC_GATE__IDCT_MASK
775 | UVD_CGC_GATE__MPRD_MASK
776 | UVD_CGC_GATE__MPC_MASK
777 | UVD_CGC_GATE__LBSI_MASK
778 | UVD_CGC_GATE__LRBBM_MASK
779 | UVD_CGC_GATE__UDEC_RE_MASK
780 | UVD_CGC_GATE__UDEC_CM_MASK
781 | UVD_CGC_GATE__UDEC_IT_MASK
782 | UVD_CGC_GATE__UDEC_DB_MASK
783 | UVD_CGC_GATE__UDEC_MP_MASK
784 | UVD_CGC_GATE__WCB_MASK
785 | UVD_CGC_GATE__VCPU_MASK
786 | UVD_CGC_GATE__MMSCH_MASK);
787
788 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
789
790 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
791
792 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
793 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
794 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
795 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
796 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
797 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
798 | UVD_CGC_CTRL__SYS_MODE_MASK
799 | UVD_CGC_CTRL__UDEC_MODE_MASK
800 | UVD_CGC_CTRL__MPEG2_MODE_MASK
801 | UVD_CGC_CTRL__REGS_MODE_MASK
802 | UVD_CGC_CTRL__RBC_MODE_MASK
803 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
804 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
805 | UVD_CGC_CTRL__IDCT_MODE_MASK
806 | UVD_CGC_CTRL__MPRD_MODE_MASK
807 | UVD_CGC_CTRL__MPC_MODE_MASK
808 | UVD_CGC_CTRL__LBSI_MODE_MASK
809 | UVD_CGC_CTRL__LRBBM_MODE_MASK
810 | UVD_CGC_CTRL__WCB_MODE_MASK
811 | UVD_CGC_CTRL__VCPU_MODE_MASK
812 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
813 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
814
815 /* turn on */
816 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
817 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
818 | UVD_SUVD_CGC_GATE__SIT_MASK
819 | UVD_SUVD_CGC_GATE__SMP_MASK
820 | UVD_SUVD_CGC_GATE__SCM_MASK
821 | UVD_SUVD_CGC_GATE__SDB_MASK
822 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
823 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
824 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
825 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
826 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
827 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
828 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
829 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
830 | UVD_SUVD_CGC_GATE__SCLR_MASK
831 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
832 | UVD_SUVD_CGC_GATE__ENT_MASK
833 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
834 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
835 | UVD_SUVD_CGC_GATE__SITE_MASK
836 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
837 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
838 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
839 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
840 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
841 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
842
843 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
844 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
845 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
846 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
847 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
848 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
849 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
850 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
851 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
852 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
853 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
854 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
855}
856
857static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
858 uint8_t sram_sel, uint8_t indirect)
859{
860 struct amdgpu_device *adev = vinst->adev;
861 int inst_idx = vinst->inst;
862 uint32_t reg_data = 0;
863
864 /* enable sw clock gating control */
865 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
866 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
867 else
868 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
869 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
870 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
871 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
872 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
873 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
874 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
875 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
876 UVD_CGC_CTRL__SYS_MODE_MASK |
877 UVD_CGC_CTRL__UDEC_MODE_MASK |
878 UVD_CGC_CTRL__MPEG2_MODE_MASK |
879 UVD_CGC_CTRL__REGS_MODE_MASK |
880 UVD_CGC_CTRL__RBC_MODE_MASK |
881 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
882 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
883 UVD_CGC_CTRL__IDCT_MODE_MASK |
884 UVD_CGC_CTRL__MPRD_MODE_MASK |
885 UVD_CGC_CTRL__MPC_MODE_MASK |
886 UVD_CGC_CTRL__LBSI_MODE_MASK |
887 UVD_CGC_CTRL__LRBBM_MODE_MASK |
888 UVD_CGC_CTRL__WCB_MODE_MASK |
889 UVD_CGC_CTRL__VCPU_MODE_MASK |
890 UVD_CGC_CTRL__MMSCH_MODE_MASK);
891 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
892 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
893
894 /* turn off clock gating */
895 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
896 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
897
898 /* turn on SUVD clock gating */
899 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
900 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
901
902 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
903 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
904 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
905}
906
907/**
908 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
909 *
910 * @vinst: VCN instance
911 *
912 * Enable clock gating for VCN block
913 */
914static void vcn_v2_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
915{
916 struct amdgpu_device *adev = vinst->adev;
917 int i = vinst->inst;
918 uint32_t data = 0;
919
920 if (adev->vcn.harvest_config & (1 << i))
921 return;
922 /* enable UVD CGC */
923 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
924 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
925 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
926 else
927 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
928 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
929 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
930 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
931
932 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
933 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
934 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
935 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
936 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
937 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
938 | UVD_CGC_CTRL__SYS_MODE_MASK
939 | UVD_CGC_CTRL__UDEC_MODE_MASK
940 | UVD_CGC_CTRL__MPEG2_MODE_MASK
941 | UVD_CGC_CTRL__REGS_MODE_MASK
942 | UVD_CGC_CTRL__RBC_MODE_MASK
943 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
944 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
945 | UVD_CGC_CTRL__IDCT_MODE_MASK
946 | UVD_CGC_CTRL__MPRD_MODE_MASK
947 | UVD_CGC_CTRL__MPC_MODE_MASK
948 | UVD_CGC_CTRL__LBSI_MODE_MASK
949 | UVD_CGC_CTRL__LRBBM_MODE_MASK
950 | UVD_CGC_CTRL__WCB_MODE_MASK
951 | UVD_CGC_CTRL__VCPU_MODE_MASK);
952 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
953
954 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
955 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
956 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
957 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
958 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
959 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
960 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
961 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
962 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
963 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
964 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
965 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
966}
967
968static void vcn_v2_6_enable_ras(struct amdgpu_vcn_inst *vinst,
969 bool indirect)
970{
971 struct amdgpu_device *adev = vinst->adev;
972 int inst_idx = vinst->inst;
973 uint32_t tmp;
974
975 if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) != IP_VERSION(2, 6, 0))
976 return;
977
978 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
979 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
980 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
981 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
982 WREG32_SOC15_DPG_MODE(inst_idx,
983 SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
984 tmp, 0, indirect);
985
986 tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
987 WREG32_SOC15_DPG_MODE(inst_idx,
988 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
989 tmp, 0, indirect);
990
991 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
992 WREG32_SOC15_DPG_MODE(inst_idx,
993 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
994 tmp, 0, indirect);
995}
996
997static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
998{
999 struct amdgpu_device *adev = vinst->adev;
1000 int inst_idx = vinst->inst;
1001 struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1002 struct amdgpu_ring *ring;
1003 uint32_t rb_bufsz, tmp;
1004 int ret;
1005
1006 /* disable register anti-hang mechanism */
1007 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
1008 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1009 /* enable dynamic power gating mode */
1010 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
1011 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
1012 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
1013 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
1014
1015 if (indirect)
1016 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
1017
1018 /* enable clock gating */
1019 vcn_v2_5_clock_gating_dpg_mode(vinst, sram_sel: 0, indirect);
1020
1021 /* enable VCPU clock */
1022 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1023 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1024 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
1025 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1026 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1027
1028 /* disable master interupt */
1029 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1030 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
1031
1032 /* setup mmUVD_LMI_CTRL */
1033 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1034 UVD_LMI_CTRL__REQ_MODE_MASK |
1035 UVD_LMI_CTRL__CRC_RESET_MASK |
1036 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1037 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1038 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1039 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1040 0x00100000L);
1041 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1042 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
1043
1044 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1045 VCN, 0, mmUVD_MPC_CNTL),
1046 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
1047
1048 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1049 VCN, 0, mmUVD_MPC_SET_MUXA0),
1050 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1051 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1052 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1053 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1054
1055 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1056 VCN, 0, mmUVD_MPC_SET_MUXB0),
1057 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1058 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1059 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1060 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1061
1062 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1063 VCN, 0, mmUVD_MPC_SET_MUX),
1064 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1065 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1066 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1067
1068 vcn_v2_5_mc_resume_dpg_mode(vinst, indirect);
1069
1070 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1071 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1072 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1073 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1074
1075 /* enable LMI MC and UMC channels */
1076 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1077 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
1078
1079 vcn_v2_6_enable_ras(vinst, indirect);
1080
1081 /* unblock VCPU register access */
1082 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1083 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1084
1085 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1086 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1087 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1088 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1089
1090 /* enable master interrupt */
1091 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1092 VCN, 0, mmUVD_MASTINT_EN),
1093 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1094
1095 if (indirect) {
1096 ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, ucode_id: 0);
1097 if (ret) {
1098 dev_err(adev->dev, "vcn sram load failed %d\n", ret);
1099 return ret;
1100 }
1101 }
1102
1103 ring = &adev->vcn.inst[inst_idx].ring_dec;
1104 /* force RBC into idle state */
1105 rb_bufsz = order_base_2(ring->ring_size);
1106 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1107 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1108 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1109 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1110 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1111 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1112
1113 /* Stall DPG before WPTR/RPTR reset */
1114 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1115 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1116 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1117 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1118
1119 /* set the write pointer delay */
1120 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1121
1122 /* set the wb address */
1123 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1124 (upper_32_bits(ring->gpu_addr) >> 2));
1125
1126 /* program the RB_BASE for ring buffer */
1127 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1128 lower_32_bits(ring->gpu_addr));
1129 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1130 upper_32_bits(ring->gpu_addr));
1131
1132 /* Initialize the ring buffer's read and write pointers */
1133 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1134
1135 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1136
1137 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1138 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1139 lower_32_bits(ring->wptr));
1140
1141 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1142 /* Unstall DPG */
1143 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1144 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1145
1146 /* Keeping one read-back to ensure all register writes are done,
1147 * otherwise it may introduce race conditions.
1148 */
1149 RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
1150
1151 return 0;
1152}
1153
1154static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
1155{
1156 struct amdgpu_device *adev = vinst->adev;
1157 int i = vinst->inst;
1158 struct amdgpu_fw_shared *fw_shared =
1159 adev->vcn.inst[i].fw_shared.cpu_addr;
1160 struct amdgpu_ring *ring;
1161 uint32_t rb_bufsz, tmp;
1162 int j, k, r;
1163
1164 if (adev->vcn.harvest_config & (1 << i))
1165 return 0;
1166
1167 if (adev->pm.dpm_enabled)
1168 amdgpu_dpm_enable_vcn(adev, enable: true, inst: i);
1169
1170 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1171 return vcn_v2_5_start_dpg_mode(vinst, indirect: adev->vcn.inst[i].indirect_sram);
1172
1173 /* disable register anti-hang mechanism */
1174 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
1175 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1176
1177 /* set uvd status busy */
1178 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1179 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1180
1181 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1182 return 0;
1183
1184 /* SW clock gating */
1185 vcn_v2_5_disable_clock_gating(vinst);
1186
1187 /* enable VCPU clock */
1188 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1189 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1190
1191 /* disable master interrupt */
1192 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1193 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1194
1195 /* setup mmUVD_LMI_CTRL */
1196 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1197 tmp &= ~0xff;
1198 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1199 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1200 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1201 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1202 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1203
1204 /* setup mmUVD_MPC_CNTL */
1205 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1206 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1207 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1208 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1209
1210 /* setup UVD_MPC_SET_MUXA0 */
1211 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1212 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1213 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1214 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1215 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1216
1217 /* setup UVD_MPC_SET_MUXB0 */
1218 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1219 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1220 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1221 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1222 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1223
1224 /* setup mmUVD_MPC_SET_MUX */
1225 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1226 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1227 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1228 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1229
1230 vcn_v2_5_mc_resume(vinst);
1231
1232 /* VCN global tiling registers */
1233 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1234 adev->gfx.config.gb_addr_config);
1235 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1236 adev->gfx.config.gb_addr_config);
1237
1238 /* enable LMI MC and UMC channels */
1239 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1240 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1241
1242 /* unblock VCPU register access */
1243 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1244 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1245
1246 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1247 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1248
1249 for (k = 0; k < 10; ++k) {
1250 uint32_t status;
1251
1252 for (j = 0; j < 100; ++j) {
1253 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1254 if (status & 2)
1255 break;
1256 if (amdgpu_emu_mode == 1)
1257 msleep(msecs: 500);
1258 else
1259 mdelay(10);
1260 }
1261 r = 0;
1262 if (status & 2)
1263 break;
1264
1265 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1266 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1267 UVD_VCPU_CNTL__BLK_RST_MASK,
1268 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1269 mdelay(10);
1270 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1271 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1272
1273 mdelay(10);
1274 r = -1;
1275 }
1276
1277 if (r) {
1278 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1279 return r;
1280 }
1281
1282 /* enable master interrupt */
1283 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1284 UVD_MASTINT_EN__VCPU_EN_MASK,
1285 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1286
1287 /* clear the busy bit of VCN_STATUS */
1288 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1289 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1290
1291 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1292
1293 ring = &adev->vcn.inst[i].ring_dec;
1294 /* force RBC into idle state */
1295 rb_bufsz = order_base_2(ring->ring_size);
1296 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1297 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1298 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1299 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1300 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1301 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1302
1303 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1304 /* program the RB_BASE for ring buffer */
1305 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1306 lower_32_bits(ring->gpu_addr));
1307 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1308 upper_32_bits(ring->gpu_addr));
1309
1310 /* Initialize the ring buffer's read and write pointers */
1311 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1312
1313 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1314 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1315 lower_32_bits(ring->wptr));
1316 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1317
1318 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1319 ring = &adev->vcn.inst[i].ring_enc[0];
1320 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1321 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1322 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1323 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1324 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1325 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1326
1327 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1328 ring = &adev->vcn.inst[i].ring_enc[1];
1329 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1330 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1331 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1332 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1333 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1334 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1335
1336 /* Keeping one read-back to ensure all register writes are done,
1337 * otherwise it may introduce race conditions.
1338 */
1339 RREG32_SOC15(VCN, i, mmUVD_STATUS);
1340
1341 return 0;
1342}
1343
1344static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1345 struct amdgpu_mm_table *table)
1346{
1347 uint32_t data = 0, loop = 0, size = 0;
1348 uint64_t addr = table->gpu_addr;
1349 struct mmsch_v1_1_init_header *header = NULL;
1350
1351 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1352 size = header->total_size;
1353
1354 /*
1355 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1356 * memory descriptor location
1357 */
1358 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1359 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1360
1361 /* 2, update vmid of descriptor */
1362 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1363 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1364 /* use domain0 for MM scheduler */
1365 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1366 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1367
1368 /* 3, notify mmsch about the size of this descriptor */
1369 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1370
1371 /* 4, set resp to zero */
1372 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1373
1374 /*
1375 * 5, kick off the initialization and wait until
1376 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1377 */
1378 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1379
1380 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1381 loop = 10;
1382 while ((data & 0x10000002) != 0x10000002) {
1383 udelay(usec: 100);
1384 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1385 loop--;
1386 if (!loop)
1387 break;
1388 }
1389
1390 if (!loop) {
1391 dev_err(adev->dev,
1392 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1393 data);
1394 return -EBUSY;
1395 }
1396
1397 return 0;
1398}
1399
1400static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1401{
1402 struct amdgpu_ring *ring;
1403 uint32_t offset, size, tmp, i, rb_bufsz;
1404 uint32_t table_size = 0;
1405 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1406 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1407 struct mmsch_v1_0_cmd_end end = { { 0 } };
1408 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1409 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1410
1411 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1412 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1413 end.cmd_header.command_type = MMSCH_COMMAND__END;
1414
1415 header->version = MMSCH_VERSION;
1416 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1417 init_table += header->total_size;
1418
1419 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1420 header->eng[i].table_offset = header->total_size;
1421 header->eng[i].init_status = 0;
1422 header->eng[i].table_size = 0;
1423
1424 table_size = 0;
1425
1426 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1427 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1428 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1429
1430 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1431 /* mc resume*/
1432 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1433 MMSCH_V1_0_INSERT_DIRECT_WT(
1434 SOC15_REG_OFFSET(VCN, i,
1435 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1436 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1437 MMSCH_V1_0_INSERT_DIRECT_WT(
1438 SOC15_REG_OFFSET(VCN, i,
1439 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1440 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1441 offset = 0;
1442 MMSCH_V1_0_INSERT_DIRECT_WT(
1443 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1444 } else {
1445 MMSCH_V1_0_INSERT_DIRECT_WT(
1446 SOC15_REG_OFFSET(VCN, i,
1447 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1448 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1449 MMSCH_V1_0_INSERT_DIRECT_WT(
1450 SOC15_REG_OFFSET(VCN, i,
1451 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1452 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1453 offset = size;
1454 MMSCH_V1_0_INSERT_DIRECT_WT(
1455 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1456 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1457 }
1458
1459 MMSCH_V1_0_INSERT_DIRECT_WT(
1460 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1461 size);
1462 MMSCH_V1_0_INSERT_DIRECT_WT(
1463 SOC15_REG_OFFSET(VCN, i,
1464 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1465 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1466 MMSCH_V1_0_INSERT_DIRECT_WT(
1467 SOC15_REG_OFFSET(VCN, i,
1468 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1469 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1470 MMSCH_V1_0_INSERT_DIRECT_WT(
1471 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1472 0);
1473 MMSCH_V1_0_INSERT_DIRECT_WT(
1474 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1475 AMDGPU_VCN_STACK_SIZE);
1476 MMSCH_V1_0_INSERT_DIRECT_WT(
1477 SOC15_REG_OFFSET(VCN, i,
1478 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1479 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1480 AMDGPU_VCN_STACK_SIZE));
1481 MMSCH_V1_0_INSERT_DIRECT_WT(
1482 SOC15_REG_OFFSET(VCN, i,
1483 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1484 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1485 AMDGPU_VCN_STACK_SIZE));
1486 MMSCH_V1_0_INSERT_DIRECT_WT(
1487 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1488 0);
1489 MMSCH_V1_0_INSERT_DIRECT_WT(
1490 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1491 AMDGPU_VCN_CONTEXT_SIZE);
1492
1493 ring = &adev->vcn.inst[i].ring_enc[0];
1494 ring->wptr = 0;
1495
1496 MMSCH_V1_0_INSERT_DIRECT_WT(
1497 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1498 lower_32_bits(ring->gpu_addr));
1499 MMSCH_V1_0_INSERT_DIRECT_WT(
1500 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1501 upper_32_bits(ring->gpu_addr));
1502 MMSCH_V1_0_INSERT_DIRECT_WT(
1503 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1504 ring->ring_size / 4);
1505
1506 ring = &adev->vcn.inst[i].ring_dec;
1507 ring->wptr = 0;
1508 MMSCH_V1_0_INSERT_DIRECT_WT(
1509 SOC15_REG_OFFSET(VCN, i,
1510 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1511 lower_32_bits(ring->gpu_addr));
1512 MMSCH_V1_0_INSERT_DIRECT_WT(
1513 SOC15_REG_OFFSET(VCN, i,
1514 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1515 upper_32_bits(ring->gpu_addr));
1516
1517 /* force RBC into idle state */
1518 rb_bufsz = order_base_2(ring->ring_size);
1519 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1520 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1521 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1522 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1523 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1524 MMSCH_V1_0_INSERT_DIRECT_WT(
1525 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1526
1527 /* add end packet */
1528 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1529 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1530 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1531
1532 /* refine header */
1533 header->eng[i].table_size = table_size;
1534 header->total_size += table_size;
1535 }
1536
1537 return vcn_v2_5_mmsch_start(adev, table: &adev->virt.mm_table);
1538}
1539
1540static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1541{
1542 struct amdgpu_device *adev = vinst->adev;
1543 int inst_idx = vinst->inst;
1544 uint32_t tmp;
1545
1546 /* Wait for power status to be 1 */
1547 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1548 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1549
1550 /* wait for read ptr to be equal to write ptr */
1551 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1552 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1553
1554 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1555 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1556
1557 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1558 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1559
1560 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1561 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1562
1563 /* disable dynamic power gating mode */
1564 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1565 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1566
1567 /* Keeping one read-back to ensure all register writes are done,
1568 * otherwise it may introduce race conditions.
1569 */
1570 RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
1571
1572 return 0;
1573}
1574
1575static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
1576{
1577 struct amdgpu_device *adev = vinst->adev;
1578 int i = vinst->inst;
1579 uint32_t tmp;
1580 int r;
1581
1582 if (adev->vcn.harvest_config & (1 << i))
1583 return 0;
1584
1585 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1586 r = vcn_v2_5_stop_dpg_mode(vinst);
1587 goto done;
1588 }
1589
1590 /* wait for vcn idle */
1591 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1592 if (r)
1593 goto done;
1594
1595 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1596 UVD_LMI_STATUS__READ_CLEAN_MASK |
1597 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1598 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1599 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1600 if (r)
1601 goto done;
1602
1603 /* block LMI UMC channel */
1604 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1605 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1606 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1607
1608 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1609 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1610 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1611 if (r)
1612 goto done;
1613
1614 /* block VCPU register access */
1615 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1616 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1617 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1618
1619 /* reset VCPU */
1620 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1621 UVD_VCPU_CNTL__BLK_RST_MASK,
1622 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1623
1624 /* disable VCPU clock */
1625 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1626 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1627
1628 /* clear status */
1629 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1630
1631 vcn_v2_5_enable_clock_gating(vinst);
1632
1633 /* enable register anti-hang mechanism */
1634 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1635 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1636 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1637
1638 /* Keeping one read-back to ensure all register writes are done,
1639 * otherwise it may introduce race conditions.
1640 */
1641 RREG32_SOC15(VCN, i, mmUVD_STATUS);
1642done:
1643 if (adev->pm.dpm_enabled)
1644 amdgpu_dpm_enable_vcn(adev, enable: false, inst: i);
1645
1646 return r;
1647}
1648
1649static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1650 struct dpg_pause_state *new_state)
1651{
1652 struct amdgpu_device *adev = vinst->adev;
1653 int inst_idx = vinst->inst;
1654 struct amdgpu_ring *ring;
1655 uint32_t reg_data = 0;
1656 int ret_code = 0;
1657
1658 /* pause/unpause if state is changed */
1659 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1660 DRM_DEBUG("dpg pause state changed %d -> %d",
1661 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1662 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1663 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1664
1665 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1666 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1667 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1668
1669 if (!ret_code) {
1670 struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1671
1672 /* pause DPG */
1673 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1674 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1675
1676 /* wait for ACK */
1677 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1678 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1679 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1680
1681 /* Stall DPG before WPTR/RPTR reset */
1682 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1683 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1684 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1685
1686 /* Restore */
1687 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1688 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1689 ring->wptr = 0;
1690 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1691 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1692 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1693 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1694 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1695 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1696
1697 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1698 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1699 ring->wptr = 0;
1700 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1701 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1702 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1703 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1704 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1705 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1706
1707 /* Unstall DPG */
1708 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1709 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1710
1711 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1712 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1713 }
1714 } else {
1715 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1716 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1717 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1718 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1719 }
1720 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1721 }
1722
1723 return 0;
1724}
1725
1726/**
1727 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1728 *
1729 * @ring: amdgpu_ring pointer
1730 *
1731 * Returns the current hardware read pointer
1732 */
1733static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1734{
1735 struct amdgpu_device *adev = ring->adev;
1736
1737 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1738}
1739
1740/**
1741 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1742 *
1743 * @ring: amdgpu_ring pointer
1744 *
1745 * Returns the current hardware write pointer
1746 */
1747static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1748{
1749 struct amdgpu_device *adev = ring->adev;
1750
1751 if (ring->use_doorbell)
1752 return *ring->wptr_cpu_addr;
1753 else
1754 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1755}
1756
1757/**
1758 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1759 *
1760 * @ring: amdgpu_ring pointer
1761 *
1762 * Commits the write pointer to the hardware
1763 */
1764static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1765{
1766 struct amdgpu_device *adev = ring->adev;
1767
1768 if (ring->use_doorbell) {
1769 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1770 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1771 } else {
1772 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1773 }
1774}
1775
1776static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1777 .type = AMDGPU_RING_TYPE_VCN_DEC,
1778 .align_mask = 0xf,
1779 .secure_submission_supported = true,
1780 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1781 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1782 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1783 .emit_frame_size =
1784 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1785 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1786 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1787 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1788 6,
1789 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1790 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1791 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1792 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1793 .test_ring = vcn_v2_0_dec_ring_test_ring,
1794 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1795 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1796 .insert_start = vcn_v2_0_dec_ring_insert_start,
1797 .insert_end = vcn_v2_0_dec_ring_insert_end,
1798 .pad_ib = amdgpu_ring_generic_pad_ib,
1799 .begin_use = vcn_v2_5_ring_begin_use,
1800 .end_use = vcn_v2_5_ring_end_use,
1801 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1802 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1803 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1804 .reset = amdgpu_vcn_ring_reset,
1805};
1806
1807/**
1808 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1809 *
1810 * @ring: amdgpu_ring pointer
1811 *
1812 * Returns the current hardware enc read pointer
1813 */
1814static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1815{
1816 struct amdgpu_device *adev = ring->adev;
1817
1818 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1819 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1820 else
1821 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1822}
1823
1824/**
1825 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1826 *
1827 * @ring: amdgpu_ring pointer
1828 *
1829 * Returns the current hardware enc write pointer
1830 */
1831static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1832{
1833 struct amdgpu_device *adev = ring->adev;
1834
1835 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1836 if (ring->use_doorbell)
1837 return *ring->wptr_cpu_addr;
1838 else
1839 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1840 } else {
1841 if (ring->use_doorbell)
1842 return *ring->wptr_cpu_addr;
1843 else
1844 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1845 }
1846}
1847
1848/**
1849 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1850 *
1851 * @ring: amdgpu_ring pointer
1852 *
1853 * Commits the enc write pointer to the hardware
1854 */
1855static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1856{
1857 struct amdgpu_device *adev = ring->adev;
1858
1859 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1860 if (ring->use_doorbell) {
1861 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1862 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1863 } else {
1864 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1865 }
1866 } else {
1867 if (ring->use_doorbell) {
1868 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1869 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1870 } else {
1871 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1872 }
1873 }
1874}
1875
1876static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1877 .type = AMDGPU_RING_TYPE_VCN_ENC,
1878 .align_mask = 0x3f,
1879 .nop = VCN_ENC_CMD_NO_OP,
1880 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1881 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1882 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1883 .emit_frame_size =
1884 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1885 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1886 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1887 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1888 1, /* vcn_v2_0_enc_ring_insert_end */
1889 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1890 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1891 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1892 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1893 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1894 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1895 .insert_nop = amdgpu_ring_insert_nop,
1896 .insert_end = vcn_v2_0_enc_ring_insert_end,
1897 .pad_ib = amdgpu_ring_generic_pad_ib,
1898 .begin_use = vcn_v2_5_ring_begin_use,
1899 .end_use = vcn_v2_5_ring_end_use,
1900 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1901 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1902 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1903 .reset = amdgpu_vcn_ring_reset,
1904};
1905
1906static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1907{
1908 int i;
1909
1910 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1911 if (adev->vcn.harvest_config & (1 << i))
1912 continue;
1913 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1914 adev->vcn.inst[i].ring_dec.me = i;
1915 }
1916}
1917
1918static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1919{
1920 int i, j;
1921
1922 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1923 if (adev->vcn.harvest_config & (1 << j))
1924 continue;
1925 for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
1926 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1927 adev->vcn.inst[j].ring_enc[i].me = j;
1928 }
1929 }
1930}
1931
1932static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
1933{
1934 int r;
1935
1936 r = vcn_v2_5_stop(vinst);
1937 if (r)
1938 return r;
1939 return vcn_v2_5_start(vinst);
1940}
1941
1942static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
1943{
1944 struct amdgpu_device *adev = ip_block->adev;
1945 int i, ret = 1;
1946
1947 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1948 if (adev->vcn.harvest_config & (1 << i))
1949 continue;
1950
1951 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1952 }
1953
1954 return ret;
1955}
1956
1957static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
1958{
1959 struct amdgpu_device *adev = ip_block->adev;
1960 int i, ret = 0;
1961
1962 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1963 if (adev->vcn.harvest_config & (1 << i))
1964 continue;
1965 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1966 UVD_STATUS__IDLE);
1967 if (ret)
1968 return ret;
1969 }
1970
1971 return ret;
1972}
1973
1974static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1975 enum amd_clockgating_state state)
1976{
1977 struct amdgpu_device *adev = ip_block->adev;
1978 bool enable = (state == AMD_CG_STATE_GATE);
1979 int i;
1980
1981 if (amdgpu_sriov_vf(adev))
1982 return 0;
1983
1984 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1985 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1986
1987 if (enable) {
1988 if (!vcn_v2_5_is_idle(ip_block))
1989 return -EBUSY;
1990 vcn_v2_5_enable_clock_gating(vinst);
1991 } else {
1992 vcn_v2_5_disable_clock_gating(vinst);
1993 }
1994 }
1995
1996 return 0;
1997}
1998
1999static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
2000 enum amd_powergating_state state)
2001{
2002 struct amdgpu_device *adev = vinst->adev;
2003 int ret;
2004
2005 if (amdgpu_sriov_vf(adev))
2006 return 0;
2007
2008 if (state == vinst->cur_state)
2009 return 0;
2010
2011 if (state == AMD_PG_STATE_GATE)
2012 ret = vcn_v2_5_stop(vinst);
2013 else
2014 ret = vcn_v2_5_start(vinst);
2015
2016 if (!ret)
2017 vinst->cur_state = state;
2018
2019 return ret;
2020}
2021
2022static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
2023 struct amdgpu_irq_src *source,
2024 unsigned type,
2025 enum amdgpu_interrupt_state state)
2026{
2027 return 0;
2028}
2029
2030static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
2031 struct amdgpu_irq_src *source,
2032 unsigned int type,
2033 enum amdgpu_interrupt_state state)
2034{
2035 return 0;
2036}
2037
2038static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
2039 struct amdgpu_irq_src *source,
2040 struct amdgpu_iv_entry *entry)
2041{
2042 uint32_t ip_instance;
2043
2044 switch (entry->client_id) {
2045 case SOC15_IH_CLIENTID_VCN:
2046 ip_instance = 0;
2047 break;
2048 case SOC15_IH_CLIENTID_VCN1:
2049 ip_instance = 1;
2050 break;
2051 default:
2052 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2053 return 0;
2054 }
2055
2056 DRM_DEBUG("IH: VCN TRAP\n");
2057
2058 switch (entry->src_id) {
2059 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2060 amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_dec);
2061 break;
2062 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2063 amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_enc[0]);
2064 break;
2065 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2066 amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_enc[1]);
2067 break;
2068 default:
2069 DRM_ERROR("Unhandled interrupt: %d %d\n",
2070 entry->src_id, entry->src_data[0]);
2071 break;
2072 }
2073
2074 return 0;
2075}
2076
2077static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
2078 .set = vcn_v2_5_set_interrupt_state,
2079 .process = vcn_v2_5_process_interrupt,
2080};
2081
2082static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
2083 .set = vcn_v2_6_set_ras_interrupt_state,
2084 .process = amdgpu_vcn_process_poison_irq,
2085};
2086
2087static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
2088{
2089 int i;
2090
2091 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2092 if (adev->vcn.harvest_config & (1 << i))
2093 continue;
2094 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
2095 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
2096
2097 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
2098 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
2099 }
2100}
2101
2102static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
2103 .name = "vcn_v2_5",
2104 .early_init = vcn_v2_5_early_init,
2105 .sw_init = vcn_v2_5_sw_init,
2106 .sw_fini = vcn_v2_5_sw_fini,
2107 .hw_init = vcn_v2_5_hw_init,
2108 .hw_fini = vcn_v2_5_hw_fini,
2109 .suspend = vcn_v2_5_suspend,
2110 .resume = vcn_v2_5_resume,
2111 .is_idle = vcn_v2_5_is_idle,
2112 .wait_for_idle = vcn_v2_5_wait_for_idle,
2113 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
2114 .set_powergating_state = vcn_set_powergating_state,
2115 .dump_ip_state = amdgpu_vcn_dump_ip_state,
2116 .print_ip_state = amdgpu_vcn_print_ip_state,
2117};
2118
2119static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
2120 .name = "vcn_v2_6",
2121 .early_init = vcn_v2_5_early_init,
2122 .sw_init = vcn_v2_5_sw_init,
2123 .sw_fini = vcn_v2_5_sw_fini,
2124 .hw_init = vcn_v2_5_hw_init,
2125 .hw_fini = vcn_v2_5_hw_fini,
2126 .suspend = vcn_v2_5_suspend,
2127 .resume = vcn_v2_5_resume,
2128 .is_idle = vcn_v2_5_is_idle,
2129 .wait_for_idle = vcn_v2_5_wait_for_idle,
2130 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
2131 .set_powergating_state = vcn_set_powergating_state,
2132 .dump_ip_state = amdgpu_vcn_dump_ip_state,
2133 .print_ip_state = amdgpu_vcn_print_ip_state,
2134};
2135
2136const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
2137{
2138 .type = AMD_IP_BLOCK_TYPE_VCN,
2139 .major = 2,
2140 .minor = 5,
2141 .rev = 0,
2142 .funcs = &vcn_v2_5_ip_funcs,
2143};
2144
2145const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
2146{
2147 .type = AMD_IP_BLOCK_TYPE_VCN,
2148 .major = 2,
2149 .minor = 6,
2150 .rev = 0,
2151 .funcs = &vcn_v2_6_ip_funcs,
2152};
2153
2154static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
2155 uint32_t instance, uint32_t sub_block)
2156{
2157 uint32_t poison_stat = 0, reg_value = 0;
2158
2159 switch (sub_block) {
2160 case AMDGPU_VCN_V2_6_VCPU_VCODEC:
2161 reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
2162 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2163 break;
2164 default:
2165 break;
2166 }
2167
2168 if (poison_stat)
2169 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2170 instance, sub_block);
2171
2172 return poison_stat;
2173}
2174
2175static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
2176{
2177 uint32_t inst, sub;
2178 uint32_t poison_stat = 0;
2179
2180 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2181 for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
2182 poison_stat +=
2183 vcn_v2_6_query_poison_by_instance(adev, instance: inst, sub_block: sub);
2184
2185 return !!poison_stat;
2186}
2187
2188const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
2189 .query_poison_status = vcn_v2_6_query_poison_status,
2190};
2191
2192static struct amdgpu_vcn_ras vcn_v2_6_ras = {
2193 .ras_block = {
2194 .hw_ops = &vcn_v2_6_ras_hw_ops,
2195 .ras_late_init = amdgpu_vcn_ras_late_init,
2196 },
2197};
2198
2199static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
2200{
2201 switch (amdgpu_ip_version(adev, ip: VCN_HWIP, inst: 0)) {
2202 case IP_VERSION(2, 6, 0):
2203 adev->vcn.ras = &vcn_v2_6_ras;
2204 break;
2205 default:
2206 break;
2207 }
2208}
2209

source code of linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c