| 1 | /* |
| 2 | * Copyright 2022 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | */ |
| 22 | |
| 23 | #include <linux/firmware.h> |
| 24 | #include <drm/drm_drv.h> |
| 25 | |
| 26 | #include "amdgpu.h" |
| 27 | #include "amdgpu_ucode.h" |
| 28 | #include "amdgpu_vpe.h" |
| 29 | #include "vpe_v6_1.h" |
| 30 | #include "soc15_common.h" |
| 31 | #include "ivsrcid/vpe/irqsrcs_vpe_6_1.h" |
| 32 | #include "vpe/vpe_6_1_0_offset.h" |
| 33 | #include "vpe/vpe_6_1_0_sh_mask.h" |
| 34 | |
| 35 | MODULE_FIRMWARE("amdgpu/vpe_6_1_0.bin" ); |
| 36 | MODULE_FIRMWARE("amdgpu/vpe_6_1_1.bin" ); |
| 37 | MODULE_FIRMWARE("amdgpu/vpe_6_1_3.bin" ); |
| 38 | |
| 39 | #define VPE_THREAD1_UCODE_OFFSET 0x8000 |
| 40 | |
| 41 | #define regVPEC_COLLABORATE_CNTL 0x0013 |
| 42 | #define regVPEC_COLLABORATE_CNTL_BASE_IDX 0 |
| 43 | #define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN__SHIFT 0x0 |
| 44 | #define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN_MASK 0x00000001L |
| 45 | |
| 46 | #define regVPEC_COLLABORATE_CFG 0x0014 |
| 47 | #define regVPEC_COLLABORATE_CFG_BASE_IDX 0 |
| 48 | #define VPEC_COLLABORATE_CFG__MASTER_ID__SHIFT 0x0 |
| 49 | #define VPEC_COLLABORATE_CFG__MASTER_EN__SHIFT 0x3 |
| 50 | #define VPEC_COLLABORATE_CFG__SLAVE0_ID__SHIFT 0x4 |
| 51 | #define VPEC_COLLABORATE_CFG__SLAVE0_EN__SHIFT 0x7 |
| 52 | #define VPEC_COLLABORATE_CFG__MASTER_ID_MASK 0x00000007L |
| 53 | #define VPEC_COLLABORATE_CFG__MASTER_EN_MASK 0x00000008L |
| 54 | #define VPEC_COLLABORATE_CFG__SLAVE0_ID_MASK 0x00000070L |
| 55 | #define VPEC_COLLABORATE_CFG__SLAVE0_EN_MASK 0x00000080L |
| 56 | |
| 57 | #define regVPEC_CNTL_6_1_1 0x0016 |
| 58 | #define regVPEC_CNTL_6_1_1_BASE_IDX 0 |
| 59 | #define regVPEC_QUEUE_RESET_REQ_6_1_1 0x002c |
| 60 | #define regVPEC_QUEUE_RESET_REQ_6_1_1_BASE_IDX 0 |
| 61 | #define regVPEC_PUB_DUMMY2_6_1_1 0x004c |
| 62 | #define regVPEC_PUB_DUMMY2_6_1_1_BASE_IDX 0 |
| 63 | |
| 64 | static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset) |
| 65 | { |
| 66 | uint32_t base; |
| 67 | |
| 68 | base = vpe->ring.adev->reg_offset[VPE_HWIP][inst][0]; |
| 69 | |
| 70 | return base + offset; |
| 71 | } |
| 72 | |
| 73 | static void vpe_v6_1_halt(struct amdgpu_vpe *vpe, bool halt) |
| 74 | { |
| 75 | struct amdgpu_device *adev = vpe->ring.adev; |
| 76 | uint32_t i, f32_cntl; |
| 77 | |
| 78 | for (i = 0; i < vpe->num_instances; i++) { |
| 79 | f32_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL)); |
| 80 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, halt ? 1 : 0); |
| 81 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, halt ? 1 : 0); |
| 82 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL), f32_cntl); |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | static int vpe_v6_1_irq_init(struct amdgpu_vpe *vpe) |
| 87 | { |
| 88 | struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); |
| 89 | int ret; |
| 90 | |
| 91 | ret = amdgpu_irq_add_id(adev, client_id: SOC21_IH_CLIENTID_VPE, |
| 92 | VPE_6_1_SRCID__VPE_TRAP, |
| 93 | source: &adev->vpe.trap_irq); |
| 94 | if (ret) |
| 95 | return ret; |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | static void vpe_v6_1_set_collaborate_mode(struct amdgpu_vpe *vpe, bool enable) |
| 101 | { |
| 102 | struct amdgpu_device *adev = vpe->ring.adev; |
| 103 | uint32_t vpe_colla_cntl, vpe_colla_cfg, i; |
| 104 | |
| 105 | if (!vpe->collaborate_mode) |
| 106 | return; |
| 107 | |
| 108 | for (i = 0; i < vpe->num_instances; i++) { |
| 109 | vpe_colla_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL)); |
| 110 | vpe_colla_cntl = REG_SET_FIELD(vpe_colla_cntl, VPEC_COLLABORATE_CNTL, |
| 111 | COLLABORATE_MODE_EN, enable ? 1 : 0); |
| 112 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL), vpe_colla_cntl); |
| 113 | |
| 114 | vpe_colla_cfg = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG)); |
| 115 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_ID, 0); |
| 116 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_EN, enable ? 1 : 0); |
| 117 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_ID, 1); |
| 118 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_EN, enable ? 1 : 0); |
| 119 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG), vpe_colla_cfg); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe) |
| 124 | { |
| 125 | struct amdgpu_device *adev = vpe->ring.adev; |
| 126 | const struct vpe_firmware_header_v1_0 *vpe_hdr; |
| 127 | const __le32 *data; |
| 128 | uint32_t ucode_offset[2], ucode_size[2]; |
| 129 | uint32_t i, j, size_dw; |
| 130 | uint32_t ret; |
| 131 | |
| 132 | /* disable UMSCH_INT_ENABLE */ |
| 133 | for (j = 0; j < vpe->num_instances; j++) { |
| 134 | |
| 135 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
| 136 | ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1)); |
| 137 | else |
| 138 | ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL)); |
| 139 | |
| 140 | ret = REG_SET_FIELD(ret, VPEC_CNTL, UMSCH_INT_ENABLE, 0); |
| 141 | |
| 142 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
| 143 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1), ret); |
| 144 | else |
| 145 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret); |
| 146 | } |
| 147 | |
| 148 | /* setup collaborate mode */ |
| 149 | vpe_v6_1_set_collaborate_mode(vpe, enable: true); |
| 150 | /* setup DPM */ |
| 151 | if (amdgpu_vpe_configure_dpm(vpe)) |
| 152 | dev_warn(adev->dev, "VPE failed to enable DPM\n" ); |
| 153 | |
| 154 | /* |
| 155 | * For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well. |
| 156 | * Here use instance 0 as master. |
| 157 | */ |
| 158 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
| 159 | uint32_t f32_offset, f32_cntl; |
| 160 | |
| 161 | f32_offset = vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL); |
| 162 | f32_cntl = RREG32(f32_offset); |
| 163 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, 0); |
| 164 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, 0); |
| 165 | |
| 166 | adev->vpe.cmdbuf_cpu_addr[0] = f32_offset; |
| 167 | adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl; |
| 168 | |
| 169 | return amdgpu_vpe_psp_update_sram(adev); |
| 170 | } |
| 171 | |
| 172 | vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; |
| 173 | |
| 174 | /* Thread 0(command thread) ucode offset/size */ |
| 175 | ucode_offset[0] = le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes); |
| 176 | ucode_size[0] = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes); |
| 177 | /* Thread 1(control thread) ucode offset/size */ |
| 178 | ucode_offset[1] = le32_to_cpu(vpe_hdr->ctl_ucode_offset); |
| 179 | ucode_size[1] = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes); |
| 180 | |
| 181 | vpe_v6_1_halt(vpe, halt: true); |
| 182 | |
| 183 | for (j = 0; j < vpe->num_instances; j++) { |
| 184 | for (i = 0; i < 2; i++) { |
| 185 | if (i > 0) |
| 186 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), VPE_THREAD1_UCODE_OFFSET); |
| 187 | else |
| 188 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), 0); |
| 189 | |
| 190 | data = (const __le32 *)(adev->vpe.fw->data + ucode_offset[i]); |
| 191 | size_dw = ucode_size[i] / sizeof(__le32); |
| 192 | |
| 193 | while (size_dw--) { |
| 194 | if (amdgpu_emu_mode && size_dw % 500 == 0) |
| 195 | msleep(msecs: 1); |
| 196 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_DATA), le32_to_cpup(data++)); |
| 197 | } |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | vpe_v6_1_halt(vpe, halt: false); |
| 202 | |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe) |
| 207 | { |
| 208 | struct amdgpu_ring *ring = &vpe->ring; |
| 209 | struct amdgpu_device *adev = ring->adev; |
| 210 | uint32_t doorbell, doorbell_offset; |
| 211 | uint32_t rb_bufsz, rb_cntl; |
| 212 | uint32_t ib_cntl, i; |
| 213 | int ret; |
| 214 | |
| 215 | for (i = 0; i < vpe->num_instances; i++) { |
| 216 | /* Set ring buffer size in dwords */ |
| 217 | rb_bufsz = order_base_2(ring->ring_size / 4); |
| 218 | rb_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL)); |
| 219 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); |
| 220 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_PRIV, 1); |
| 221 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_VMID, 0); |
| 222 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl); |
| 223 | |
| 224 | /* Initialize the ring buffer's read and write pointers */ |
| 225 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR), 0); |
| 226 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_HI), 0); |
| 227 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), 0); |
| 228 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), 0); |
| 229 | |
| 230 | /* set the wb address whether it's enabled or not */ |
| 231 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_LO), |
| 232 | lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); |
| 233 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_HI), |
| 234 | upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); |
| 235 | |
| 236 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); |
| 237 | |
| 238 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE), ring->gpu_addr >> 8); |
| 239 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); |
| 240 | |
| 241 | ring->wptr = 0; |
| 242 | |
| 243 | /* before programing wptr to a less value, need set minor_ptr_update first */ |
| 244 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 1); |
| 245 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); |
| 246 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); |
| 247 | /* set minor_ptr_update to 0 after wptr programed */ |
| 248 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 0); |
| 249 | |
| 250 | doorbell_offset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET)); |
| 251 | doorbell_offset = REG_SET_FIELD(doorbell_offset, VPEC_QUEUE0_DOORBELL_OFFSET, OFFSET, ring->doorbell_index + i*4); |
| 252 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET), doorbell_offset); |
| 253 | |
| 254 | doorbell = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL)); |
| 255 | doorbell = REG_SET_FIELD(doorbell, VPEC_QUEUE0_DOORBELL, ENABLE, ring->use_doorbell ? 1 : 0); |
| 256 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL), doorbell); |
| 257 | |
| 258 | adev->nbio.funcs->vpe_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index + i*4, 4); |
| 259 | |
| 260 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); |
| 261 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 1); |
| 262 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl); |
| 263 | |
| 264 | ib_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL)); |
| 265 | ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 1); |
| 266 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL), ib_cntl); |
| 267 | } |
| 268 | |
| 269 | ret = amdgpu_ring_test_helper(ring); |
| 270 | if (ret) |
| 271 | return ret; |
| 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe) |
| 277 | { |
| 278 | struct amdgpu_device *adev = vpe->ring.adev; |
| 279 | uint32_t queue_reset, i; |
| 280 | int ret; |
| 281 | |
| 282 | for (i = 0; i < vpe->num_instances; i++) { |
| 283 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
| 284 | queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1)); |
| 285 | else |
| 286 | queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ)); |
| 287 | |
| 288 | queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1); |
| 289 | |
| 290 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) { |
| 291 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1), queue_reset); |
| 292 | ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ_6_1_1, 0, |
| 293 | VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK); |
| 294 | } else { |
| 295 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ), queue_reset); |
| 296 | ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ, 0, |
| 297 | VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK); |
| 298 | } |
| 299 | |
| 300 | if (ret) |
| 301 | dev_err(adev->dev, "VPE queue reset failed\n" ); |
| 302 | } |
| 303 | |
| 304 | vpe->ring.sched.ready = false; |
| 305 | |
| 306 | return ret; |
| 307 | } |
| 308 | |
| 309 | static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev, |
| 310 | struct amdgpu_irq_src *source, |
| 311 | unsigned int type, |
| 312 | enum amdgpu_interrupt_state state) |
| 313 | { |
| 314 | struct amdgpu_vpe *vpe = &adev->vpe; |
| 315 | uint32_t vpe_cntl; |
| 316 | |
| 317 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
| 318 | vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1)); |
| 319 | else |
| 320 | vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL)); |
| 321 | |
| 322 | vpe_cntl = REG_SET_FIELD(vpe_cntl, VPEC_CNTL, TRAP_ENABLE, |
| 323 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); |
| 324 | |
| 325 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
| 326 | WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1), vpe_cntl); |
| 327 | else |
| 328 | WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), vpe_cntl); |
| 329 | |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | static int vpe_v6_1_process_trap_irq(struct amdgpu_device *adev, |
| 334 | struct amdgpu_irq_src *source, |
| 335 | struct amdgpu_iv_entry *entry) |
| 336 | { |
| 337 | |
| 338 | dev_dbg(adev->dev, "IH: VPE trap\n" ); |
| 339 | |
| 340 | switch (entry->client_id) { |
| 341 | case SOC21_IH_CLIENTID_VPE: |
| 342 | amdgpu_fence_process(ring: &adev->vpe.ring); |
| 343 | break; |
| 344 | default: |
| 345 | break; |
| 346 | } |
| 347 | |
| 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe) |
| 352 | { |
| 353 | struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); |
| 354 | |
| 355 | vpe->regs.queue0_rb_rptr_lo = regVPEC_QUEUE0_RB_RPTR; |
| 356 | vpe->regs.queue0_rb_rptr_hi = regVPEC_QUEUE0_RB_RPTR_HI; |
| 357 | vpe->regs.queue0_rb_wptr_lo = regVPEC_QUEUE0_RB_WPTR; |
| 358 | vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI; |
| 359 | vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT; |
| 360 | |
| 361 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
| 362 | vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2_6_1_1; |
| 363 | else |
| 364 | vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2; |
| 365 | |
| 366 | vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4; |
| 367 | vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3; |
| 368 | vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4; |
| 369 | vpe->regs.dpm_busy_clamp_threshold = regVPEC_QUEUE7_DUMMY2; |
| 370 | vpe->regs.dpm_idle_clamp_threshold = regVPEC_QUEUE7_DUMMY3; |
| 371 | vpe->regs.dpm_request_lv = regVPEC_QUEUE7_DUMMY1; |
| 372 | vpe->regs.context_indicator = regVPEC_QUEUE6_DUMMY3; |
| 373 | |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | static const struct vpe_funcs vpe_v6_1_funcs = { |
| 378 | .get_reg_offset = vpe_v6_1_get_reg_offset, |
| 379 | .set_regs = vpe_v6_1_set_regs, |
| 380 | .irq_init = vpe_v6_1_irq_init, |
| 381 | .init_microcode = amdgpu_vpe_init_microcode, |
| 382 | .load_microcode = vpe_v6_1_load_microcode, |
| 383 | .ring_init = amdgpu_vpe_ring_init, |
| 384 | .ring_start = vpe_v6_1_ring_start, |
| 385 | .ring_stop = vpe_v_6_1_ring_stop, |
| 386 | .ring_fini = amdgpu_vpe_ring_fini, |
| 387 | }; |
| 388 | |
| 389 | static const struct amdgpu_irq_src_funcs vpe_v6_1_trap_irq_funcs = { |
| 390 | .set = vpe_v6_1_set_trap_irq_state, |
| 391 | .process = vpe_v6_1_process_trap_irq, |
| 392 | }; |
| 393 | |
| 394 | void vpe_v6_1_set_funcs(struct amdgpu_vpe *vpe) |
| 395 | { |
| 396 | vpe->funcs = &vpe_v6_1_funcs; |
| 397 | vpe->trap_irq.funcs = &vpe_v6_1_trap_irq_funcs; |
| 398 | } |
| 399 | |