1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_cs.h"
28#include "amdgpu_vcn.h"
29#include "amdgpu_pm.h"
30#include "soc15.h"
31#include "soc15d.h"
32#include "soc15_common.h"
33
34#include "vcn/vcn_1_0_offset.h"
35#include "vcn/vcn_1_0_sh_mask.h"
36#include "mmhub/mmhub_9_1_offset.h"
37#include "mmhub/mmhub_9_1_sh_mask.h"
38
39#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40#include "jpeg_v1_0.h"
41#include "vcn_v1_0.h"
42
43#define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
44#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
45#define mmUVD_REG_XX_MASK_1_0 0x05ac
46#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
47
48static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
49 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
50 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
51 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
52 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
53 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
54 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
55 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
56 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
57 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
58 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
59 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
60 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
61 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
62 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
63 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
64 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
82};
83
84static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst);
85static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
86static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
87static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
88static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
89 enum amd_powergating_state state);
90static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
91 struct dpg_pause_state *new_state);
92
93static void vcn_v1_0_idle_work_handler(struct work_struct *work);
94static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
95
96/**
97 * vcn_v1_0_early_init - set function pointers and load microcode
98 *
99 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
100 *
101 * Set ring and irq function pointers
102 * Load microcode from filesystem
103 */
104static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
105{
106 struct amdgpu_device *adev = ip_block->adev;
107
108 adev->vcn.inst[0].num_enc_rings = 2;
109 adev->vcn.inst[0].set_pg_state = vcn_v1_0_set_pg_state;
110
111 vcn_v1_0_set_dec_ring_funcs(adev);
112 vcn_v1_0_set_enc_ring_funcs(adev);
113 vcn_v1_0_set_irq_funcs(adev);
114
115 jpeg_v1_0_early_init(ip_block);
116
117 return amdgpu_vcn_early_init(adev, i: 0);
118}
119
120/**
121 * vcn_v1_0_sw_init - sw init for VCN block
122 *
123 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
124 *
125 * Load firmware and sw initialization
126 */
127static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
128{
129 struct amdgpu_ring *ring;
130 int i, r;
131 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
132 uint32_t *ptr;
133 struct amdgpu_device *adev = ip_block->adev;
134
135 /* VCN DEC TRAP */
136 r = amdgpu_irq_add_id(adev, client_id: SOC15_IH_CLIENTID_VCN,
137 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, source: &adev->vcn.inst->irq);
138 if (r)
139 return r;
140
141 /* VCN ENC TRAP */
142 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
143 r = amdgpu_irq_add_id(adev, client_id: SOC15_IH_CLIENTID_VCN, src_id: i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
144 source: &adev->vcn.inst->irq);
145 if (r)
146 return r;
147 }
148
149 r = amdgpu_vcn_sw_init(adev, i: 0);
150 if (r)
151 return r;
152
153 /* Override the work func */
154 adev->vcn.inst[0].idle_work.work.func = vcn_v1_0_idle_work_handler;
155
156 amdgpu_vcn_setup_ucode(adev, i: 0);
157
158 r = amdgpu_vcn_resume(adev, i: 0);
159 if (r)
160 return r;
161
162 ring = &adev->vcn.inst->ring_dec;
163 ring->vm_hub = AMDGPU_MMHUB0(0);
164 sprintf(buf: ring->name, fmt: "vcn_dec");
165 r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->vcn.inst->irq, irq_type: 0,
166 hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL);
167 if (r)
168 return r;
169
170 adev->vcn.inst[0].internal.scratch9 = adev->vcn.inst->external.scratch9 =
171 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
172 adev->vcn.inst[0].internal.data0 = adev->vcn.inst->external.data0 =
173 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
174 adev->vcn.inst[0].internal.data1 = adev->vcn.inst->external.data1 =
175 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
176 adev->vcn.inst[0].internal.cmd = adev->vcn.inst->external.cmd =
177 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
178 adev->vcn.inst[0].internal.nop = adev->vcn.inst->external.nop =
179 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
180
181 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
182 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(ring: i);
183
184 ring = &adev->vcn.inst->ring_enc[i];
185 ring->vm_hub = AMDGPU_MMHUB0(0);
186 sprintf(buf: ring->name, fmt: "vcn_enc%d", i);
187 r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->vcn.inst->irq, irq_type: 0,
188 hw_prio, NULL);
189 if (r)
190 return r;
191 }
192
193 adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
194
195 if (amdgpu_vcnfw_log) {
196 struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
197
198 fw_shared->present_flag_0 = 0;
199 amdgpu_vcn_fwlog_init(vcn: adev->vcn.inst);
200 }
201
202 r = jpeg_v1_0_sw_init(ip_block);
203
204 /* Allocate memory for VCN IP Dump buffer */
205 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
206 if (!ptr) {
207 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
208 adev->vcn.ip_dump = NULL;
209 } else {
210 adev->vcn.ip_dump = ptr;
211 }
212 return r;
213}
214
215/**
216 * vcn_v1_0_sw_fini - sw fini for VCN block
217 *
218 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
219 *
220 * VCN suspend and free up sw allocation
221 */
222static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
223{
224 int r;
225 struct amdgpu_device *adev = ip_block->adev;
226
227 r = amdgpu_vcn_suspend(adev, i: 0);
228 if (r)
229 return r;
230
231 jpeg_v1_0_sw_fini(ip_block);
232
233 amdgpu_vcn_sw_fini(adev, i: 0);
234
235 kfree(objp: adev->vcn.ip_dump);
236
237 return 0;
238}
239
240/**
241 * vcn_v1_0_hw_init - start and test VCN block
242 *
243 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
244 *
245 * Initialize the hardware, boot up the VCPU and do some testing
246 */
247static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
248{
249 struct amdgpu_device *adev = ip_block->adev;
250 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
251 int i, r;
252
253 r = amdgpu_ring_test_helper(ring);
254 if (r)
255 return r;
256
257 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
258 ring = &adev->vcn.inst->ring_enc[i];
259 r = amdgpu_ring_test_helper(ring);
260 if (r)
261 return r;
262 }
263
264 ring = adev->jpeg.inst->ring_dec;
265 r = amdgpu_ring_test_helper(ring);
266
267 return r;
268}
269
270/**
271 * vcn_v1_0_hw_fini - stop the hardware block
272 *
273 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
274 *
275 * Stop the VCN block, mark ring as not ready any more
276 */
277static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
278{
279 struct amdgpu_device *adev = ip_block->adev;
280 struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
281
282 cancel_delayed_work_sync(dwork: &vinst->idle_work);
283
284 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
285 (vinst->cur_state != AMD_PG_STATE_GATE &&
286 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
287 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
288 }
289
290 return 0;
291}
292
293/**
294 * vcn_v1_0_suspend - suspend VCN block
295 *
296 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
297 *
298 * HW fini and suspend VCN block
299 */
300static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
301{
302 int r;
303 struct amdgpu_device *adev = ip_block->adev;
304 bool idle_work_unexecuted;
305
306 idle_work_unexecuted = cancel_delayed_work_sync(dwork: &adev->vcn.inst[0].idle_work);
307 if (idle_work_unexecuted) {
308 if (adev->pm.dpm_enabled)
309 amdgpu_dpm_enable_vcn(adev, enable: false, inst: 0);
310 }
311
312 r = vcn_v1_0_hw_fini(ip_block);
313 if (r)
314 return r;
315
316 r = amdgpu_vcn_suspend(adev, i: 0);
317
318 return r;
319}
320
321/**
322 * vcn_v1_0_resume - resume VCN block
323 *
324 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
325 *
326 * Resume firmware and hw init VCN block
327 */
328static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
329{
330 int r;
331
332 r = amdgpu_vcn_resume(adev: ip_block->adev, i: 0);
333 if (r)
334 return r;
335
336 r = vcn_v1_0_hw_init(ip_block);
337
338 return r;
339}
340
341/**
342 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
343 *
344 * @vinst: VCN instance
345 *
346 * Let the VCN memory controller know it's offsets
347 */
348static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_vcn_inst *vinst)
349{
350 struct amdgpu_device *adev = vinst->adev;
351 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
352 uint32_t offset;
353
354 /* cache window 0: fw */
355 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
356 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
357 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
358 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
359 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
360 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
361 offset = 0;
362 } else {
363 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
364 lower_32_bits(adev->vcn.inst->gpu_addr));
365 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
366 upper_32_bits(adev->vcn.inst->gpu_addr));
367 offset = size;
368 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
369 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
370 }
371
372 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
373
374 /* cache window 1: stack */
375 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
376 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
377 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
378 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
379 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
380 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
381
382 /* cache window 2: context */
383 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
384 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
385 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
386 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
387 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
388 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
389
390 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
391 adev->gfx.config.gb_addr_config);
392 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
393 adev->gfx.config.gb_addr_config);
394 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
395 adev->gfx.config.gb_addr_config);
396 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
397 adev->gfx.config.gb_addr_config);
398 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
399 adev->gfx.config.gb_addr_config);
400 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
401 adev->gfx.config.gb_addr_config);
402 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
403 adev->gfx.config.gb_addr_config);
404 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
405 adev->gfx.config.gb_addr_config);
406 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
407 adev->gfx.config.gb_addr_config);
408 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
409 adev->gfx.config.gb_addr_config);
410 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
411 adev->gfx.config.gb_addr_config);
412 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
413 adev->gfx.config.gb_addr_config);
414}
415
416static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst)
417{
418 struct amdgpu_device *adev = vinst->adev;
419 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
420 uint32_t offset;
421
422 /* cache window 0: fw */
423 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
424 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
425 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
426 0xFFFFFFFF, 0);
427 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
428 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
429 0xFFFFFFFF, 0);
430 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
431 0xFFFFFFFF, 0);
432 offset = 0;
433 } else {
434 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
435 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
436 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
437 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
438 offset = size;
439 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
440 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
441 }
442
443 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
444
445 /* cache window 1: stack */
446 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
447 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
448 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
449 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
450 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
451 0xFFFFFFFF, 0);
452 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
453 0xFFFFFFFF, 0);
454
455 /* cache window 2: context */
456 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
457 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
458 0xFFFFFFFF, 0);
459 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
460 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
461 0xFFFFFFFF, 0);
462 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
463 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
464 0xFFFFFFFF, 0);
465
466 /* VCN global tiling registers */
467 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
468 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
469 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
470 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
471 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
472 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
473 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
474 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
475 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
476 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
477 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
478 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
479 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
480 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
481 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
482 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
483 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
484 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
485 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
486 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
487}
488
489/**
490 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
491 *
492 * @vinst: VCN instance
493 *
494 * Disable clock gating for VCN block
495 */
496static void vcn_v1_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
497{
498 struct amdgpu_device *adev = vinst->adev;
499 uint32_t data;
500
501 /* JPEG disable CGC */
502 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
503
504 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
505 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
506 else
507 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
508
509 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
510 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
511 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
512
513 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
514 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
515 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
516
517 /* UVD disable CGC */
518 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
519 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
520 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
521 else
522 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
523
524 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
525 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
526 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
527
528 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
529 data &= ~(UVD_CGC_GATE__SYS_MASK
530 | UVD_CGC_GATE__UDEC_MASK
531 | UVD_CGC_GATE__MPEG2_MASK
532 | UVD_CGC_GATE__REGS_MASK
533 | UVD_CGC_GATE__RBC_MASK
534 | UVD_CGC_GATE__LMI_MC_MASK
535 | UVD_CGC_GATE__LMI_UMC_MASK
536 | UVD_CGC_GATE__IDCT_MASK
537 | UVD_CGC_GATE__MPRD_MASK
538 | UVD_CGC_GATE__MPC_MASK
539 | UVD_CGC_GATE__LBSI_MASK
540 | UVD_CGC_GATE__LRBBM_MASK
541 | UVD_CGC_GATE__UDEC_RE_MASK
542 | UVD_CGC_GATE__UDEC_CM_MASK
543 | UVD_CGC_GATE__UDEC_IT_MASK
544 | UVD_CGC_GATE__UDEC_DB_MASK
545 | UVD_CGC_GATE__UDEC_MP_MASK
546 | UVD_CGC_GATE__WCB_MASK
547 | UVD_CGC_GATE__VCPU_MASK
548 | UVD_CGC_GATE__SCPU_MASK);
549 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
550
551 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
552 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
553 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
554 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
555 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
556 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
557 | UVD_CGC_CTRL__SYS_MODE_MASK
558 | UVD_CGC_CTRL__UDEC_MODE_MASK
559 | UVD_CGC_CTRL__MPEG2_MODE_MASK
560 | UVD_CGC_CTRL__REGS_MODE_MASK
561 | UVD_CGC_CTRL__RBC_MODE_MASK
562 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
563 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
564 | UVD_CGC_CTRL__IDCT_MODE_MASK
565 | UVD_CGC_CTRL__MPRD_MODE_MASK
566 | UVD_CGC_CTRL__MPC_MODE_MASK
567 | UVD_CGC_CTRL__LBSI_MODE_MASK
568 | UVD_CGC_CTRL__LRBBM_MODE_MASK
569 | UVD_CGC_CTRL__WCB_MODE_MASK
570 | UVD_CGC_CTRL__VCPU_MODE_MASK
571 | UVD_CGC_CTRL__SCPU_MODE_MASK);
572 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
573
574 /* turn on */
575 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
576 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
577 | UVD_SUVD_CGC_GATE__SIT_MASK
578 | UVD_SUVD_CGC_GATE__SMP_MASK
579 | UVD_SUVD_CGC_GATE__SCM_MASK
580 | UVD_SUVD_CGC_GATE__SDB_MASK
581 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
582 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
583 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
584 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
585 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
586 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
587 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
588 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
589 | UVD_SUVD_CGC_GATE__SCLR_MASK
590 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
591 | UVD_SUVD_CGC_GATE__ENT_MASK
592 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
593 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
594 | UVD_SUVD_CGC_GATE__SITE_MASK
595 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
596 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
597 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
598 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
599 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
600 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
601
602 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
603 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
604 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
605 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
606 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
607 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
608 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
609 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
610 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
611 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
612 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
613 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
614}
615
616/**
617 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
618 *
619 * @vinst: Pointer to the VCN instance structure
620 *
621 * Enable clock gating for VCN block
622 */
623static void vcn_v1_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
624{
625 struct amdgpu_device *adev = vinst->adev;
626 uint32_t data = 0;
627
628 /* enable JPEG CGC */
629 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
630 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
631 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
632 else
633 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
634 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
635 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
636 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
637
638 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
639 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
640 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
641
642 /* enable UVD CGC */
643 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
644 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
645 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
646 else
647 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
648 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
649 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
650 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
651
652 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
653 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
654 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
655 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
656 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
657 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
658 | UVD_CGC_CTRL__SYS_MODE_MASK
659 | UVD_CGC_CTRL__UDEC_MODE_MASK
660 | UVD_CGC_CTRL__MPEG2_MODE_MASK
661 | UVD_CGC_CTRL__REGS_MODE_MASK
662 | UVD_CGC_CTRL__RBC_MODE_MASK
663 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
664 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
665 | UVD_CGC_CTRL__IDCT_MODE_MASK
666 | UVD_CGC_CTRL__MPRD_MODE_MASK
667 | UVD_CGC_CTRL__MPC_MODE_MASK
668 | UVD_CGC_CTRL__LBSI_MODE_MASK
669 | UVD_CGC_CTRL__LRBBM_MODE_MASK
670 | UVD_CGC_CTRL__WCB_MODE_MASK
671 | UVD_CGC_CTRL__VCPU_MODE_MASK
672 | UVD_CGC_CTRL__SCPU_MODE_MASK);
673 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
674
675 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
676 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
677 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
678 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
679 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
680 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
681 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
682 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
683 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
684 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
685 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
686 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
687}
688
689static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
690 uint8_t sram_sel)
691{
692 struct amdgpu_device *adev = vinst->adev;
693 uint32_t reg_data = 0;
694
695 /* disable JPEG CGC */
696 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
697 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
698 else
699 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
700 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
701 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
702 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
703
704 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
705
706 /* enable sw clock gating control */
707 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
708 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
709 else
710 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
711 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
712 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
713 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
714 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
715 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
716 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
717 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
718 UVD_CGC_CTRL__SYS_MODE_MASK |
719 UVD_CGC_CTRL__UDEC_MODE_MASK |
720 UVD_CGC_CTRL__MPEG2_MODE_MASK |
721 UVD_CGC_CTRL__REGS_MODE_MASK |
722 UVD_CGC_CTRL__RBC_MODE_MASK |
723 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
724 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
725 UVD_CGC_CTRL__IDCT_MODE_MASK |
726 UVD_CGC_CTRL__MPRD_MODE_MASK |
727 UVD_CGC_CTRL__MPC_MODE_MASK |
728 UVD_CGC_CTRL__LBSI_MODE_MASK |
729 UVD_CGC_CTRL__LRBBM_MODE_MASK |
730 UVD_CGC_CTRL__WCB_MODE_MASK |
731 UVD_CGC_CTRL__VCPU_MODE_MASK |
732 UVD_CGC_CTRL__SCPU_MODE_MASK);
733 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
734
735 /* turn off clock gating */
736 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
737
738 /* turn on SUVD clock gating */
739 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
740
741 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
742 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
743}
744
745static void vcn_1_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
746{
747 struct amdgpu_device *adev = vinst->adev;
748 uint32_t data = 0;
749
750 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
751 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
752 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
753 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
754 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
755 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
756 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
757 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
758 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
759 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
760 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
761 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
762
763 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
764 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF);
765 } else {
766 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
767 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
768 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
769 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
770 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
771 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
772 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
773 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
774 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
775 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
776 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
777 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
778 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF);
779 }
780
781 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
782
783 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
784 data &= ~0x103;
785 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
786 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
787
788 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
789}
790
791static void vcn_1_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
792{
793 struct amdgpu_device *adev = vinst->adev;
794 uint32_t data = 0;
795
796 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
797 /* Before power off, this indicator has to be turned on */
798 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
799 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
800 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
801 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
802
803
804 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
805 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
806 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
807 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
808 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
809 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
810 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
811 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
812 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
813 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
814 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
815
816 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
817
818 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
819 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
820 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
821 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
822 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
823 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
824 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
825 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
826 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
827 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
828 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
829 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF);
830 }
831}
832
833/**
834 * vcn_v1_0_start_spg_mode - start VCN block
835 *
836 * @vinst: VCN instance
837 *
838 * Setup and start the VCN block
839 */
840static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
841{
842 struct amdgpu_device *adev = vinst->adev;
843 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
844 uint32_t rb_bufsz, tmp;
845 uint32_t lmi_swap_cntl;
846 int i, j, r;
847
848 /* disable byte swapping */
849 lmi_swap_cntl = 0;
850
851 vcn_1_0_disable_static_power_gating(vinst);
852
853 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
854 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
855
856 /* disable clock gating */
857 vcn_v1_0_disable_clock_gating(vinst);
858
859 /* disable interupt */
860 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
861 ~UVD_MASTINT_EN__VCPU_EN_MASK);
862
863 /* initialize VCN memory controller */
864 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
865 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
866 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
867 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
868 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
869 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
870
871#ifdef __BIG_ENDIAN
872 /* swap (8 in 32) RB and IB */
873 lmi_swap_cntl = 0xa;
874#endif
875 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
876
877 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
878 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
879 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
880 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
881
882 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
883 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
884 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
885 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
886 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
887
888 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
889 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
890 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
891 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
892 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
893
894 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
895 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
896 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
897 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
898
899 vcn_v1_0_mc_resume_spg_mode(vinst);
900
901 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
902 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
903 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
904
905 /* enable VCPU clock */
906 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
907
908 /* boot up the VCPU */
909 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
910 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
911
912 /* enable UMC */
913 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
914 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
915
916 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
917 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
918 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
919 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
920
921 for (i = 0; i < 10; ++i) {
922 uint32_t status;
923
924 for (j = 0; j < 100; ++j) {
925 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
926 if (status & UVD_STATUS__IDLE)
927 break;
928 mdelay(10);
929 }
930 r = 0;
931 if (status & UVD_STATUS__IDLE)
932 break;
933
934 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
935 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
936 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
937 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
938 mdelay(10);
939 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
940 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
941 mdelay(10);
942 r = -1;
943 }
944
945 if (r) {
946 DRM_ERROR("VCN decode not responding, giving up!!!\n");
947 return r;
948 }
949 /* enable master interrupt */
950 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
951 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
952
953 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
954 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
955 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
956 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
957
958 /* clear the busy bit of UVD_STATUS */
959 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
960 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
961
962 /* force RBC into idle state */
963 rb_bufsz = order_base_2(ring->ring_size);
964 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
965 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
966 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
967 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
968 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
969 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
970
971 /* set the write pointer delay */
972 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
973
974 /* set the wb address */
975 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
976 (upper_32_bits(ring->gpu_addr) >> 2));
977
978 /* program the RB_BASE for ring buffer */
979 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
980 lower_32_bits(ring->gpu_addr));
981 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
982 upper_32_bits(ring->gpu_addr));
983
984 /* Initialize the ring buffer's read and write pointers */
985 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
986
987 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
988
989 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
990 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
991 lower_32_bits(ring->wptr));
992
993 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
994 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
995
996 ring = &adev->vcn.inst->ring_enc[0];
997 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
998 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
999 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1000 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1001 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1002
1003 ring = &adev->vcn.inst->ring_enc[1];
1004 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1005 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1006 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1007 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1008 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1009
1010 jpeg_v1_0_start(adev, mode: 0);
1011
1012 /* Keeping one read-back to ensure all register writes are done,
1013 * otherwise it may introduce race conditions.
1014 */
1015 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1016
1017 return 0;
1018}
1019
1020static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
1021{
1022 struct amdgpu_device *adev = vinst->adev;
1023 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
1024 uint32_t rb_bufsz, tmp;
1025 uint32_t lmi_swap_cntl;
1026
1027 /* disable byte swapping */
1028 lmi_swap_cntl = 0;
1029
1030 vcn_1_0_enable_static_power_gating(vinst);
1031
1032 /* enable dynamic power gating mode */
1033 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1034 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
1035 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
1036 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
1037
1038 /* enable clock gating */
1039 vcn_v1_0_clock_gating_dpg_mode(vinst, sram_sel: 0);
1040
1041 /* enable VCPU clock */
1042 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1043 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1044 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
1045 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
1046
1047 /* disable interupt */
1048 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1049 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1050
1051 /* initialize VCN memory controller */
1052 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1053 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1054 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1055 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1056 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1057 UVD_LMI_CTRL__REQ_MODE_MASK |
1058 UVD_LMI_CTRL__CRC_RESET_MASK |
1059 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1060 0x00100000L, 0xFFFFFFFF, 0);
1061
1062#ifdef __BIG_ENDIAN
1063 /* swap (8 in 32) RB and IB */
1064 lmi_swap_cntl = 0xa;
1065#endif
1066 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1067
1068 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL,
1069 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1070
1071 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0,
1072 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1073 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1074 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1075 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1076
1077 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0,
1078 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1079 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1080 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1081 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1082
1083 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX,
1084 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1085 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1086 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1087
1088 vcn_v1_0_mc_resume_dpg_mode(vinst);
1089
1090 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1091 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1092
1093 /* boot up the VCPU */
1094 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1095
1096 /* enable UMC */
1097 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2,
1098 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1099 0xFFFFFFFF, 0);
1100
1101 /* enable master interrupt */
1102 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1103 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1104
1105 vcn_v1_0_clock_gating_dpg_mode(vinst, sram_sel: 1);
1106 /* setup mmUVD_LMI_CTRL */
1107 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1108 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1109 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1110 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1111 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1112 UVD_LMI_CTRL__REQ_MODE_MASK |
1113 UVD_LMI_CTRL__CRC_RESET_MASK |
1114 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1115 0x00100000L, 0xFFFFFFFF, 1);
1116
1117 tmp = adev->gfx.config.gb_addr_config;
1118 /* setup VCN global tiling registers */
1119 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1120 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1121
1122 /* enable System Interrupt for JRBC */
1123 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN,
1124 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1125
1126 /* force RBC into idle state */
1127 rb_bufsz = order_base_2(ring->ring_size);
1128 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1129 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1130 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1131 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1132 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1133 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1134
1135 /* set the write pointer delay */
1136 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1137
1138 /* set the wb address */
1139 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1140 (upper_32_bits(ring->gpu_addr) >> 2));
1141
1142 /* program the RB_BASE for ring buffer */
1143 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1144 lower_32_bits(ring->gpu_addr));
1145 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1146 upper_32_bits(ring->gpu_addr));
1147
1148 /* Initialize the ring buffer's read and write pointers */
1149 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1150
1151 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1152
1153 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1154 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1155 lower_32_bits(ring->wptr));
1156
1157 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1158 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1159
1160 jpeg_v1_0_start(adev, mode: 1);
1161
1162 /* Keeping one read-back to ensure all register writes are done,
1163 * otherwise it may introduce race conditions.
1164 */
1165 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1166
1167 return 0;
1168}
1169
1170static int vcn_v1_0_start(struct amdgpu_vcn_inst *vinst)
1171{
1172 struct amdgpu_device *adev = vinst->adev;
1173
1174 return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
1175 vcn_v1_0_start_dpg_mode(vinst) : vcn_v1_0_start_spg_mode(vinst);
1176}
1177
1178/**
1179 * vcn_v1_0_stop_spg_mode - stop VCN block
1180 *
1181 * @vinst: VCN instance
1182 *
1183 * stop the VCN block
1184 */
1185static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
1186{
1187 struct amdgpu_device *adev = vinst->adev;
1188 int tmp;
1189
1190 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1191
1192 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1193 UVD_LMI_STATUS__READ_CLEAN_MASK |
1194 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1195 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1196 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1197
1198 /* stall UMC channel */
1199 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1200 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1201 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1202
1203 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1204 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1205 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1206
1207 /* disable VCPU clock */
1208 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1209 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1210
1211 /* reset LMI UMC/LMI */
1212 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1213 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1214 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1215
1216 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1217 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1218 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1219
1220 /* put VCPU into reset */
1221 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1222 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1223 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1224
1225 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1226
1227 vcn_v1_0_enable_clock_gating(vinst);
1228 vcn_1_0_enable_static_power_gating(vinst);
1229
1230 /* Keeping one read-back to ensure all register writes are done,
1231 * otherwise it may introduce race conditions.
1232 */
1233 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1234
1235 return 0;
1236}
1237
1238static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1239{
1240 struct amdgpu_device *adev = vinst->adev;
1241 uint32_t tmp;
1242
1243 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1244 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1245 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1246 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1247
1248 /* wait for read ptr to be equal to write ptr */
1249 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1250 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1251
1252 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1253 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1254
1255 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1256 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF);
1257
1258 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1259 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1260
1261 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1262 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1263 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1264
1265 /* disable dynamic power gating mode */
1266 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1267 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1268
1269 /* Keeping one read-back to ensure all register writes are done,
1270 * otherwise it may introduce race conditions.
1271 */
1272 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1273
1274 return 0;
1275}
1276
1277static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst)
1278{
1279 struct amdgpu_device *adev = vinst->adev;
1280 int r;
1281
1282 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1283 r = vcn_v1_0_stop_dpg_mode(vinst);
1284 else
1285 r = vcn_v1_0_stop_spg_mode(vinst);
1286
1287 return r;
1288}
1289
1290static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1291 struct dpg_pause_state *new_state)
1292{
1293 struct amdgpu_device *adev = vinst->adev;
1294 int inst_idx = vinst->inst;
1295 int ret_code;
1296 uint32_t reg_data = 0;
1297 uint32_t reg_data2 = 0;
1298 struct amdgpu_ring *ring;
1299
1300 /* pause/unpause if state is changed */
1301 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1302 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1303 adev->vcn.inst[inst_idx].pause_state.fw_based,
1304 adev->vcn.inst[inst_idx].pause_state.jpeg,
1305 new_state->fw_based, new_state->jpeg);
1306
1307 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1308 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1309
1310 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1311 ret_code = 0;
1312
1313 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1314 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1315 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1316 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1317
1318 if (!ret_code) {
1319 /* pause DPG non-jpeg */
1320 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1321 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1322 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1323 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1324 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1325
1326 /* Restore */
1327 ring = &adev->vcn.inst->ring_enc[0];
1328 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1329 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1330 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1331 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1332 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1333
1334 ring = &adev->vcn.inst->ring_enc[1];
1335 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1336 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1337 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1338 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1339 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1340
1341 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1342 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1343 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1344 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1345 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1346 }
1347 } else {
1348 /* unpause dpg non-jpeg, no need to wait */
1349 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1350 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1351 }
1352 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1353 }
1354
1355 /* pause/unpause if state is changed */
1356 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
1357 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1358 adev->vcn.inst[inst_idx].pause_state.fw_based,
1359 adev->vcn.inst[inst_idx].pause_state.jpeg,
1360 new_state->fw_based, new_state->jpeg);
1361
1362 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1363 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1364
1365 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1366 ret_code = 0;
1367
1368 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1369 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1370 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1371 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1372
1373 if (!ret_code) {
1374 /* Make sure JPRG Snoop is disabled before sending the pause */
1375 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1376 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1377 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1378
1379 /* pause DPG jpeg */
1380 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1381 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1382 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1383 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1384 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1385
1386 /* Restore */
1387 ring = adev->jpeg.inst->ring_dec;
1388 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1389 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1390 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1391 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1392 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1393 lower_32_bits(ring->gpu_addr));
1394 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1395 upper_32_bits(ring->gpu_addr));
1396 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1397 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1398 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1399 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1400
1401 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1402 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1403 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1404 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1405 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1406 }
1407 } else {
1408 /* unpause dpg jpeg, no need to wait */
1409 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1410 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1411 }
1412 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
1413 }
1414
1415 return 0;
1416}
1417
1418static bool vcn_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
1419{
1420 struct amdgpu_device *adev = ip_block->adev;
1421
1422 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1423}
1424
1425static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1426{
1427 struct amdgpu_device *adev = ip_block->adev;
1428 int ret;
1429
1430 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1431 UVD_STATUS__IDLE);
1432
1433 return ret;
1434}
1435
1436static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1437 enum amd_clockgating_state state)
1438{
1439 struct amdgpu_device *adev = ip_block->adev;
1440 struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
1441 bool enable = (state == AMD_CG_STATE_GATE);
1442
1443 if (enable) {
1444 /* wait for STATUS to clear */
1445 if (!vcn_v1_0_is_idle(ip_block))
1446 return -EBUSY;
1447 vcn_v1_0_enable_clock_gating(vinst);
1448 } else {
1449 /* disable HW gating and enable Sw gating */
1450 vcn_v1_0_disable_clock_gating(vinst);
1451 }
1452 return 0;
1453}
1454
1455/**
1456 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1457 *
1458 * @ring: amdgpu_ring pointer
1459 *
1460 * Returns the current hardware read pointer
1461 */
1462static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1463{
1464 struct amdgpu_device *adev = ring->adev;
1465
1466 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1467}
1468
1469/**
1470 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1471 *
1472 * @ring: amdgpu_ring pointer
1473 *
1474 * Returns the current hardware write pointer
1475 */
1476static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1477{
1478 struct amdgpu_device *adev = ring->adev;
1479
1480 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1481}
1482
1483/**
1484 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1485 *
1486 * @ring: amdgpu_ring pointer
1487 *
1488 * Commits the write pointer to the hardware
1489 */
1490static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1491{
1492 struct amdgpu_device *adev = ring->adev;
1493
1494 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1495 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1496 lower_32_bits(ring->wptr) | 0x80000000);
1497
1498 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1499}
1500
1501/**
1502 * vcn_v1_0_dec_ring_insert_start - insert a start command
1503 *
1504 * @ring: amdgpu_ring pointer
1505 *
1506 * Write a start command to the ring.
1507 */
1508static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1509{
1510 struct amdgpu_device *adev = ring->adev;
1511
1512 amdgpu_ring_write(ring,
1513 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1514 amdgpu_ring_write(ring, v: 0);
1515 amdgpu_ring_write(ring,
1516 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1517 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1518}
1519
1520/**
1521 * vcn_v1_0_dec_ring_insert_end - insert a end command
1522 *
1523 * @ring: amdgpu_ring pointer
1524 *
1525 * Write a end command to the ring.
1526 */
1527static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1528{
1529 struct amdgpu_device *adev = ring->adev;
1530
1531 amdgpu_ring_write(ring,
1532 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1533 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1534}
1535
1536/**
1537 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1538 *
1539 * @ring: amdgpu_ring pointer
1540 * @addr: address
1541 * @seq: sequence number
1542 * @flags: fence related flags
1543 *
1544 * Write a fence and a trap command to the ring.
1545 */
1546static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1547 unsigned flags)
1548{
1549 struct amdgpu_device *adev = ring->adev;
1550
1551 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1552
1553 amdgpu_ring_write(ring,
1554 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1555 amdgpu_ring_write(ring, v: seq);
1556 amdgpu_ring_write(ring,
1557 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1558 amdgpu_ring_write(ring, v: addr & 0xffffffff);
1559 amdgpu_ring_write(ring,
1560 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1561 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1562 amdgpu_ring_write(ring,
1563 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1564 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1565
1566 amdgpu_ring_write(ring,
1567 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1568 amdgpu_ring_write(ring, v: 0);
1569 amdgpu_ring_write(ring,
1570 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1571 amdgpu_ring_write(ring, v: 0);
1572 amdgpu_ring_write(ring,
1573 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1574 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1575}
1576
1577/**
1578 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1579 *
1580 * @ring: amdgpu_ring pointer
1581 * @job: job to retrieve vmid from
1582 * @ib: indirect buffer to execute
1583 * @flags: unused
1584 *
1585 * Write ring commands to execute the indirect buffer
1586 */
1587static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1588 struct amdgpu_job *job,
1589 struct amdgpu_ib *ib,
1590 uint32_t flags)
1591{
1592 struct amdgpu_device *adev = ring->adev;
1593 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1594
1595 amdgpu_ring_write(ring,
1596 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1597 amdgpu_ring_write(ring, v: vmid);
1598
1599 amdgpu_ring_write(ring,
1600 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1601 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1602 amdgpu_ring_write(ring,
1603 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1604 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1605 amdgpu_ring_write(ring,
1606 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1607 amdgpu_ring_write(ring, v: ib->length_dw);
1608}
1609
1610static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1611 uint32_t reg, uint32_t val,
1612 uint32_t mask)
1613{
1614 struct amdgpu_device *adev = ring->adev;
1615
1616 amdgpu_ring_write(ring,
1617 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1618 amdgpu_ring_write(ring, v: reg << 2);
1619 amdgpu_ring_write(ring,
1620 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1621 amdgpu_ring_write(ring, v: val);
1622 amdgpu_ring_write(ring,
1623 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1624 amdgpu_ring_write(ring, v: mask);
1625 amdgpu_ring_write(ring,
1626 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1627 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1628}
1629
1630static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1631 unsigned vmid, uint64_t pd_addr)
1632{
1633 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1634 uint32_t data0, data1, mask;
1635
1636 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1637
1638 /* wait for register write */
1639 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1640 data1 = lower_32_bits(pd_addr);
1641 mask = 0xffffffff;
1642 vcn_v1_0_dec_ring_emit_reg_wait(ring, reg: data0, val: data1, mask);
1643}
1644
1645static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1646 uint32_t reg, uint32_t val)
1647{
1648 struct amdgpu_device *adev = ring->adev;
1649
1650 amdgpu_ring_write(ring,
1651 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1652 amdgpu_ring_write(ring, v: reg << 2);
1653 amdgpu_ring_write(ring,
1654 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1655 amdgpu_ring_write(ring, v: val);
1656 amdgpu_ring_write(ring,
1657 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1658 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1659}
1660
1661/**
1662 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1663 *
1664 * @ring: amdgpu_ring pointer
1665 *
1666 * Returns the current hardware enc read pointer
1667 */
1668static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1669{
1670 struct amdgpu_device *adev = ring->adev;
1671
1672 if (ring == &adev->vcn.inst->ring_enc[0])
1673 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1674 else
1675 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1676}
1677
1678 /**
1679 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1680 *
1681 * @ring: amdgpu_ring pointer
1682 *
1683 * Returns the current hardware enc write pointer
1684 */
1685static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1686{
1687 struct amdgpu_device *adev = ring->adev;
1688
1689 if (ring == &adev->vcn.inst->ring_enc[0])
1690 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1691 else
1692 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1693}
1694
1695 /**
1696 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1697 *
1698 * @ring: amdgpu_ring pointer
1699 *
1700 * Commits the enc write pointer to the hardware
1701 */
1702static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1703{
1704 struct amdgpu_device *adev = ring->adev;
1705
1706 if (ring == &adev->vcn.inst->ring_enc[0])
1707 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1708 lower_32_bits(ring->wptr));
1709 else
1710 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1711 lower_32_bits(ring->wptr));
1712}
1713
1714/**
1715 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1716 *
1717 * @ring: amdgpu_ring pointer
1718 * @addr: address
1719 * @seq: sequence number
1720 * @flags: fence related flags
1721 *
1722 * Write enc a fence and a trap command to the ring.
1723 */
1724static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1725 u64 seq, unsigned flags)
1726{
1727 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1728
1729 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1730 amdgpu_ring_write(ring, v: addr);
1731 amdgpu_ring_write(ring, upper_32_bits(addr));
1732 amdgpu_ring_write(ring, v: seq);
1733 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1734}
1735
1736static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1737{
1738 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1739}
1740
1741/**
1742 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1743 *
1744 * @ring: amdgpu_ring pointer
1745 * @job: job to retrive vmid from
1746 * @ib: indirect buffer to execute
1747 * @flags: unused
1748 *
1749 * Write enc ring commands to execute the indirect buffer
1750 */
1751static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1752 struct amdgpu_job *job,
1753 struct amdgpu_ib *ib,
1754 uint32_t flags)
1755{
1756 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1757
1758 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1759 amdgpu_ring_write(ring, v: vmid);
1760 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1761 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1762 amdgpu_ring_write(ring, v: ib->length_dw);
1763}
1764
1765static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1766 uint32_t reg, uint32_t val,
1767 uint32_t mask)
1768{
1769 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1770 amdgpu_ring_write(ring, v: reg << 2);
1771 amdgpu_ring_write(ring, v: mask);
1772 amdgpu_ring_write(ring, v: val);
1773}
1774
1775static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1776 unsigned int vmid, uint64_t pd_addr)
1777{
1778 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1779
1780 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1781
1782 /* wait for reg writes */
1783 vcn_v1_0_enc_ring_emit_reg_wait(ring, reg: hub->ctx0_ptb_addr_lo32 +
1784 vmid * hub->ctx_addr_distance,
1785 lower_32_bits(pd_addr), mask: 0xffffffff);
1786}
1787
1788static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1789 uint32_t reg, uint32_t val)
1790{
1791 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1792 amdgpu_ring_write(ring, v: reg << 2);
1793 amdgpu_ring_write(ring, v: val);
1794}
1795
1796static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1797 struct amdgpu_irq_src *source,
1798 unsigned type,
1799 enum amdgpu_interrupt_state state)
1800{
1801 return 0;
1802}
1803
1804static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1805 struct amdgpu_irq_src *source,
1806 struct amdgpu_iv_entry *entry)
1807{
1808 DRM_DEBUG("IH: VCN TRAP\n");
1809
1810 switch (entry->src_id) {
1811 case 124:
1812 amdgpu_fence_process(ring: &adev->vcn.inst->ring_dec);
1813 break;
1814 case 119:
1815 amdgpu_fence_process(ring: &adev->vcn.inst->ring_enc[0]);
1816 break;
1817 case 120:
1818 amdgpu_fence_process(ring: &adev->vcn.inst->ring_enc[1]);
1819 break;
1820 default:
1821 DRM_ERROR("Unhandled interrupt: %d %d\n",
1822 entry->src_id, entry->src_data[0]);
1823 break;
1824 }
1825
1826 return 0;
1827}
1828
1829static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1830{
1831 struct amdgpu_device *adev = ring->adev;
1832 int i;
1833
1834 WARN_ON(ring->wptr % 2 || count % 2);
1835
1836 for (i = 0; i < count / 2; i++) {
1837 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1838 amdgpu_ring_write(ring, v: 0);
1839 }
1840}
1841
1842static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
1843 enum amd_powergating_state state)
1844{
1845 /* This doesn't actually powergate the VCN block.
1846 * That's done in the dpm code via the SMC. This
1847 * just re-inits the block as necessary. The actual
1848 * gating still happens in the dpm code. We should
1849 * revisit this when there is a cleaner line between
1850 * the smc and the hw blocks
1851 */
1852 int ret;
1853
1854 if (state == vinst->cur_state)
1855 return 0;
1856
1857 if (state == AMD_PG_STATE_GATE)
1858 ret = vcn_v1_0_stop(vinst);
1859 else
1860 ret = vcn_v1_0_start(vinst);
1861
1862 if (!ret)
1863 vinst->cur_state = state;
1864
1865 return ret;
1866}
1867
1868static void vcn_v1_0_idle_work_handler(struct work_struct *work)
1869{
1870 struct amdgpu_vcn_inst *vcn_inst =
1871 container_of(work, struct amdgpu_vcn_inst, idle_work.work);
1872 struct amdgpu_device *adev = vcn_inst->adev;
1873 unsigned int fences = 0, i;
1874
1875 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
1876 fences += amdgpu_fence_count_emitted(ring: &adev->vcn.inst->ring_enc[i]);
1877
1878 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1879 struct dpg_pause_state new_state;
1880
1881 if (fences)
1882 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1883 else
1884 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1885
1886 if (amdgpu_fence_count_emitted(ring: adev->jpeg.inst->ring_dec))
1887 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1888 else
1889 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1890
1891 adev->vcn.inst->pause_dpg_mode(vcn_inst, &new_state);
1892 }
1893
1894 fences += amdgpu_fence_count_emitted(ring: adev->jpeg.inst->ring_dec);
1895 fences += amdgpu_fence_count_emitted(ring: &adev->vcn.inst->ring_dec);
1896
1897 if (fences == 0) {
1898 amdgpu_gfx_off_ctrl(adev, enable: true);
1899 if (adev->pm.dpm_enabled)
1900 amdgpu_dpm_enable_vcn(adev, enable: false, inst: 0);
1901 else
1902 amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_VCN,
1903 state: AMD_PG_STATE_GATE);
1904 } else {
1905 schedule_delayed_work(dwork: &adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
1906 }
1907}
1908
1909static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
1910{
1911 struct amdgpu_device *adev = ring->adev;
1912 bool set_clocks = !cancel_delayed_work_sync(dwork: &adev->vcn.inst[0].idle_work);
1913
1914 mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
1915
1916 if (amdgpu_fence_wait_empty(ring: ring->adev->jpeg.inst->ring_dec))
1917 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
1918
1919 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
1920
1921}
1922
1923void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
1924{
1925 struct amdgpu_device *adev = ring->adev;
1926
1927 if (set_clocks) {
1928 amdgpu_gfx_off_ctrl(adev, enable: false);
1929 if (adev->pm.dpm_enabled)
1930 amdgpu_dpm_enable_vcn(adev, enable: true, inst: 0);
1931 else
1932 amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_VCN,
1933 state: AMD_PG_STATE_UNGATE);
1934 }
1935
1936 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1937 struct dpg_pause_state new_state;
1938 unsigned int fences = 0, i;
1939
1940 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
1941 fences += amdgpu_fence_count_emitted(ring: &adev->vcn.inst->ring_enc[i]);
1942
1943 if (fences)
1944 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1945 else
1946 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1947
1948 if (amdgpu_fence_count_emitted(ring: adev->jpeg.inst->ring_dec))
1949 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1950 else
1951 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1952
1953 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
1954 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1955 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
1956 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1957
1958 adev->vcn.inst->pause_dpg_mode(adev->vcn.inst, &new_state);
1959 }
1960}
1961
1962void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
1963{
1964 schedule_delayed_work(dwork: &ring->adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
1965 mutex_unlock(lock: &ring->adev->vcn.inst[0].vcn1_jpeg1_workaround);
1966}
1967
1968static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1969{
1970 struct amdgpu_device *adev = ip_block->adev;
1971 int i, j;
1972 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
1973 uint32_t inst_off, is_powered;
1974
1975 if (!adev->vcn.ip_dump)
1976 return;
1977
1978 drm_printf(p, f: "num_instances:%d\n", adev->vcn.num_vcn_inst);
1979 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1980 if (adev->vcn.harvest_config & (1 << i)) {
1981 drm_printf(p, f: "\nHarvested Instance:VCN%d Skipping dump\n", i);
1982 continue;
1983 }
1984
1985 inst_off = i * reg_count;
1986 is_powered = (adev->vcn.ip_dump[inst_off] &
1987 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1988
1989 if (is_powered) {
1990 drm_printf(p, f: "\nActive Instance:VCN%d\n", i);
1991 for (j = 0; j < reg_count; j++)
1992 drm_printf(p, f: "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name,
1993 adev->vcn.ip_dump[inst_off + j]);
1994 } else {
1995 drm_printf(p, f: "\nInactive Instance:VCN%d\n", i);
1996 }
1997 }
1998}
1999
2000static void vcn_v1_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
2001{
2002 struct amdgpu_device *adev = ip_block->adev;
2003 int i, j;
2004 bool is_powered;
2005 uint32_t inst_off;
2006 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
2007
2008 if (!adev->vcn.ip_dump)
2009 return;
2010
2011 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2012 if (adev->vcn.harvest_config & (1 << i))
2013 continue;
2014
2015 inst_off = i * reg_count;
2016 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
2017 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
2018 is_powered = (adev->vcn.ip_dump[inst_off] &
2019 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2020
2021 if (is_powered)
2022 for (j = 1; j < reg_count; j++)
2023 adev->vcn.ip_dump[inst_off + j] =
2024 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_1_0[j], i));
2025 }
2026}
2027
2028static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
2029 .name = "vcn_v1_0",
2030 .early_init = vcn_v1_0_early_init,
2031 .sw_init = vcn_v1_0_sw_init,
2032 .sw_fini = vcn_v1_0_sw_fini,
2033 .hw_init = vcn_v1_0_hw_init,
2034 .hw_fini = vcn_v1_0_hw_fini,
2035 .suspend = vcn_v1_0_suspend,
2036 .resume = vcn_v1_0_resume,
2037 .is_idle = vcn_v1_0_is_idle,
2038 .wait_for_idle = vcn_v1_0_wait_for_idle,
2039 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
2040 .set_powergating_state = vcn_set_powergating_state,
2041 .dump_ip_state = vcn_v1_0_dump_ip_state,
2042 .print_ip_state = vcn_v1_0_print_ip_state,
2043};
2044
2045/*
2046 * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
2047 * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
2048 * before command submission as a workaround.
2049 */
2050static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
2051 struct amdgpu_job *job,
2052 uint64_t addr)
2053{
2054 struct ttm_operation_ctx ctx = { false, false };
2055 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
2056 struct amdgpu_vm *vm = &fpriv->vm;
2057 struct amdgpu_bo_va_mapping *mapping;
2058 struct amdgpu_bo *bo;
2059 int r;
2060
2061 addr &= AMDGPU_GMC_HOLE_MASK;
2062 if (addr & 0x7) {
2063 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
2064 return -EINVAL;
2065 }
2066
2067 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr: addr/AMDGPU_GPU_PAGE_SIZE);
2068 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
2069 return -EINVAL;
2070
2071 bo = mapping->bo_va->base.bo;
2072 if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
2073 return 0;
2074
2075 amdgpu_bo_placement_from_domain(abo: bo, AMDGPU_GEM_DOMAIN_VRAM);
2076 r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx);
2077 if (r) {
2078 DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
2079 return r;
2080 }
2081
2082 return r;
2083}
2084
2085static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
2086 struct amdgpu_job *job,
2087 struct amdgpu_ib *ib)
2088{
2089 uint32_t msg_lo = 0, msg_hi = 0;
2090 int i, r;
2091
2092 if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
2093 return 0;
2094
2095 for (i = 0; i < ib->length_dw; i += 2) {
2096 uint32_t reg = amdgpu_ib_get_value(ib, idx: i);
2097 uint32_t val = amdgpu_ib_get_value(ib, idx: i + 1);
2098
2099 if (reg == PACKET0(p->adev->vcn.inst[0].internal.data0, 0)) {
2100 msg_lo = val;
2101 } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.data1, 0)) {
2102 msg_hi = val;
2103 } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.cmd, 0)) {
2104 r = vcn_v1_0_validate_bo(parser: p, job,
2105 addr: ((u64)msg_hi) << 32 | msg_lo);
2106 if (r)
2107 return r;
2108 }
2109 }
2110
2111 return 0;
2112}
2113
2114static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
2115 .type = AMDGPU_RING_TYPE_VCN_DEC,
2116 .align_mask = 0xf,
2117 .support_64bit_ptrs = false,
2118 .no_user_fence = true,
2119 .secure_submission_supported = true,
2120 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
2121 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
2122 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
2123 .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
2124 .emit_frame_size =
2125 6 + 6 + /* hdp invalidate / flush */
2126 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2127 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2128 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
2129 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
2130 6,
2131 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
2132 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
2133 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
2134 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
2135 .test_ring = amdgpu_vcn_dec_ring_test_ring,
2136 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2137 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
2138 .insert_start = vcn_v1_0_dec_ring_insert_start,
2139 .insert_end = vcn_v1_0_dec_ring_insert_end,
2140 .pad_ib = amdgpu_ring_generic_pad_ib,
2141 .begin_use = vcn_v1_0_ring_begin_use,
2142 .end_use = vcn_v1_0_ring_end_use,
2143 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2144 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2145 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2146};
2147
2148static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2149 .type = AMDGPU_RING_TYPE_VCN_ENC,
2150 .align_mask = 0x3f,
2151 .nop = VCN_ENC_CMD_NO_OP,
2152 .support_64bit_ptrs = false,
2153 .no_user_fence = true,
2154 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2155 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2156 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2157 .emit_frame_size =
2158 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2159 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2160 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2161 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2162 1, /* vcn_v1_0_enc_ring_insert_end */
2163 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2164 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2165 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2166 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2167 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2168 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2169 .insert_nop = amdgpu_ring_insert_nop,
2170 .insert_end = vcn_v1_0_enc_ring_insert_end,
2171 .pad_ib = amdgpu_ring_generic_pad_ib,
2172 .begin_use = vcn_v1_0_ring_begin_use,
2173 .end_use = vcn_v1_0_ring_end_use,
2174 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2175 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2176 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2177};
2178
2179static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2180{
2181 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2182}
2183
2184static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2185{
2186 int i;
2187
2188 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
2189 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2190}
2191
2192static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2193 .set = vcn_v1_0_set_interrupt_state,
2194 .process = vcn_v1_0_process_interrupt,
2195};
2196
2197static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2198{
2199 adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 2;
2200 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2201}
2202
2203const struct amdgpu_ip_block_version vcn_v1_0_ip_block = {
2204 .type = AMD_IP_BLOCK_TYPE_VCN,
2205 .major = 1,
2206 .minor = 0,
2207 .rev = 0,
2208 .funcs = &vcn_v1_0_ip_funcs,
2209};
2210

source code of linux/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c