| 1 | /* |
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | */ |
| 22 | |
| 23 | #include "amdgpu.h" |
| 24 | #include "amdgpu_amdkfd.h" |
| 25 | #include "gfx_v8_0.h" |
| 26 | #include "gca/gfx_8_0_sh_mask.h" |
| 27 | #include "gca/gfx_8_0_d.h" |
| 28 | #include "gca/gfx_8_0_enum.h" |
| 29 | #include "oss/oss_3_0_sh_mask.h" |
| 30 | #include "oss/oss_3_0_d.h" |
| 31 | #include "gmc/gmc_8_1_sh_mask.h" |
| 32 | #include "gmc/gmc_8_1_d.h" |
| 33 | #include "vi_structs.h" |
| 34 | #include "vid.h" |
| 35 | |
| 36 | enum hqd_dequeue_request_type { |
| 37 | NO_ACTION = 0, |
| 38 | DRAIN_PIPE, |
| 39 | RESET_WAVES |
| 40 | }; |
| 41 | |
| 42 | static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, |
| 43 | uint32_t queue, uint32_t vmid) |
| 44 | { |
| 45 | uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); |
| 46 | |
| 47 | mutex_lock(&adev->srbm_mutex); |
| 48 | WREG32(mmSRBM_GFX_CNTL, value); |
| 49 | } |
| 50 | |
| 51 | static void unlock_srbm(struct amdgpu_device *adev) |
| 52 | { |
| 53 | WREG32(mmSRBM_GFX_CNTL, 0); |
| 54 | mutex_unlock(lock: &adev->srbm_mutex); |
| 55 | } |
| 56 | |
| 57 | static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, |
| 58 | uint32_t queue_id) |
| 59 | { |
| 60 | uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
| 61 | uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
| 62 | |
| 63 | lock_srbm(adev, mec, pipe, queue: queue_id, vmid: 0); |
| 64 | } |
| 65 | |
| 66 | static void release_queue(struct amdgpu_device *adev) |
| 67 | { |
| 68 | unlock_srbm(adev); |
| 69 | } |
| 70 | |
| 71 | static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, |
| 72 | uint32_t sh_mem_config, |
| 73 | uint32_t sh_mem_ape1_base, |
| 74 | uint32_t sh_mem_ape1_limit, |
| 75 | uint32_t sh_mem_bases, uint32_t inst) |
| 76 | { |
| 77 | lock_srbm(adev, mec: 0, pipe: 0, queue: 0, vmid); |
| 78 | |
| 79 | WREG32(mmSH_MEM_CONFIG, sh_mem_config); |
| 80 | WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); |
| 81 | WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); |
| 82 | WREG32(mmSH_MEM_BASES, sh_mem_bases); |
| 83 | |
| 84 | unlock_srbm(adev); |
| 85 | } |
| 86 | |
| 87 | static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, |
| 88 | unsigned int vmid, uint32_t inst) |
| 89 | { |
| 90 | /* |
| 91 | * We have to assume that there is no outstanding mapping. |
| 92 | * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because |
| 93 | * a mapping is in progress or because a mapping finished |
| 94 | * and the SW cleared it. |
| 95 | * So the protocol is to always wait & clear. |
| 96 | */ |
| 97 | uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | |
| 98 | ATC_VMID0_PASID_MAPPING__VALID_MASK; |
| 99 | |
| 100 | WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); |
| 101 | |
| 102 | while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) |
| 103 | cpu_relax(); |
| 104 | WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); |
| 105 | |
| 106 | /* Mapping vmid to pasid also for IH block */ |
| 107 | WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping); |
| 108 | |
| 109 | return 0; |
| 110 | } |
| 111 | |
| 112 | static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, |
| 113 | uint32_t inst) |
| 114 | { |
| 115 | uint32_t mec; |
| 116 | uint32_t pipe; |
| 117 | |
| 118 | mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
| 119 | pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
| 120 | |
| 121 | lock_srbm(adev, mec, pipe, queue: 0, vmid: 0); |
| 122 | |
| 123 | WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | |
| 124 | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); |
| 125 | |
| 126 | unlock_srbm(adev); |
| 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m) |
| 132 | { |
| 133 | uint32_t retval; |
| 134 | |
| 135 | retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + |
| 136 | m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET; |
| 137 | |
| 138 | pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n" , |
| 139 | m->sdma_engine_id, m->sdma_queue_id, retval); |
| 140 | |
| 141 | return retval; |
| 142 | } |
| 143 | |
| 144 | static inline struct vi_mqd *get_mqd(void *mqd) |
| 145 | { |
| 146 | return (struct vi_mqd *)mqd; |
| 147 | } |
| 148 | |
| 149 | static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) |
| 150 | { |
| 151 | return (struct vi_sdma_mqd *)mqd; |
| 152 | } |
| 153 | |
| 154 | static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, |
| 155 | uint32_t pipe_id, uint32_t queue_id, |
| 156 | uint32_t __user *wptr, uint32_t wptr_shift, |
| 157 | uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) |
| 158 | { |
| 159 | struct vi_mqd *m; |
| 160 | uint32_t *mqd_hqd; |
| 161 | uint32_t reg, wptr_val, data; |
| 162 | bool valid_wptr = false; |
| 163 | |
| 164 | m = get_mqd(mqd); |
| 165 | |
| 166 | acquire_queue(adev, pipe_id, queue_id); |
| 167 | |
| 168 | /* HIQ is set during driver init period with vmid set to 0*/ |
| 169 | if (m->cp_hqd_vmid == 0) { |
| 170 | uint32_t value, mec, pipe; |
| 171 | |
| 172 | mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
| 173 | pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
| 174 | |
| 175 | pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n" , |
| 176 | mec, pipe, queue_id); |
| 177 | value = RREG32(mmRLC_CP_SCHEDULERS); |
| 178 | value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, |
| 179 | ((mec << 5) | (pipe << 3) | queue_id | 0x80)); |
| 180 | WREG32(mmRLC_CP_SCHEDULERS, value); |
| 181 | } |
| 182 | |
| 183 | /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ |
| 184 | mqd_hqd = &m->cp_mqd_base_addr_lo; |
| 185 | |
| 186 | for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++) |
| 187 | WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); |
| 188 | |
| 189 | /* Tonga errata: EOP RPTR/WPTR should be left unmodified. |
| 190 | * This is safe since EOP RPTR==WPTR for any inactive HQD |
| 191 | * on ASICs that do not support context-save. |
| 192 | * EOP writes/reads can start anywhere in the ring. |
| 193 | */ |
| 194 | if (adev->asic_type != CHIP_TONGA) { |
| 195 | WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); |
| 196 | WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); |
| 197 | WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); |
| 198 | } |
| 199 | |
| 200 | for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++) |
| 201 | WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); |
| 202 | |
| 203 | /* Copy userspace write pointer value to register. |
| 204 | * Activate doorbell logic to monitor subsequent changes. |
| 205 | */ |
| 206 | data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, |
| 207 | CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); |
| 208 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); |
| 209 | |
| 210 | /* read_user_ptr may take the mm->mmap_lock. |
| 211 | * release srbm_mutex to avoid circular dependency between |
| 212 | * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex. |
| 213 | */ |
| 214 | release_queue(adev); |
| 215 | valid_wptr = read_user_wptr(mm, wptr, wptr_val); |
| 216 | acquire_queue(adev, pipe_id, queue_id); |
| 217 | if (valid_wptr) |
| 218 | WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); |
| 219 | |
| 220 | data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); |
| 221 | WREG32(mmCP_HQD_ACTIVE, data); |
| 222 | |
| 223 | release_queue(adev); |
| 224 | |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | static int kgd_hqd_dump(struct amdgpu_device *adev, |
| 229 | uint32_t pipe_id, uint32_t queue_id, |
| 230 | uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) |
| 231 | { |
| 232 | uint32_t i = 0, reg; |
| 233 | #define HQD_N_REGS (54+4) |
| 234 | #define DUMP_REG(addr) do { \ |
| 235 | if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ |
| 236 | break; \ |
| 237 | (*dump)[i][0] = (addr) << 2; \ |
| 238 | (*dump)[i++][1] = RREG32(addr); \ |
| 239 | } while (0) |
| 240 | |
| 241 | *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); |
| 242 | if (*dump == NULL) |
| 243 | return -ENOMEM; |
| 244 | |
| 245 | acquire_queue(adev, pipe_id, queue_id); |
| 246 | |
| 247 | DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); |
| 248 | DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); |
| 249 | DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2); |
| 250 | DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3); |
| 251 | |
| 252 | for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++) |
| 253 | DUMP_REG(reg); |
| 254 | |
| 255 | release_queue(adev); |
| 256 | |
| 257 | WARN_ON_ONCE(i != HQD_N_REGS); |
| 258 | *n_regs = i; |
| 259 | |
| 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, |
| 264 | uint32_t __user *wptr, struct mm_struct *mm) |
| 265 | { |
| 266 | struct vi_sdma_mqd *m; |
| 267 | unsigned long end_jiffies; |
| 268 | uint32_t sdma_rlc_reg_offset; |
| 269 | uint32_t data; |
| 270 | |
| 271 | m = get_sdma_mqd(mqd); |
| 272 | sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); |
| 273 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, |
| 274 | m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); |
| 275 | |
| 276 | end_jiffies = msecs_to_jiffies(m: 2000) + jiffies; |
| 277 | while (true) { |
| 278 | data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); |
| 279 | if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) |
| 280 | break; |
| 281 | if (time_after(jiffies, end_jiffies)) { |
| 282 | pr_err("SDMA RLC not idle in %s\n" , __func__); |
| 283 | return -ETIME; |
| 284 | } |
| 285 | usleep_range(min: 500, max: 1000); |
| 286 | } |
| 287 | |
| 288 | data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, |
| 289 | ENABLE, 1); |
| 290 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); |
| 291 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, |
| 292 | m->sdmax_rlcx_rb_rptr); |
| 293 | |
| 294 | if (read_user_wptr(mm, wptr, data)) |
| 295 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data); |
| 296 | else |
| 297 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, |
| 298 | m->sdmax_rlcx_rb_rptr); |
| 299 | |
| 300 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR, |
| 301 | m->sdmax_rlcx_virtual_addr); |
| 302 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); |
| 303 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, |
| 304 | m->sdmax_rlcx_rb_base_hi); |
| 305 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, |
| 306 | m->sdmax_rlcx_rb_rptr_addr_lo); |
| 307 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, |
| 308 | m->sdmax_rlcx_rb_rptr_addr_hi); |
| 309 | |
| 310 | data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, |
| 311 | RB_ENABLE, 1); |
| 312 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); |
| 313 | |
| 314 | return 0; |
| 315 | } |
| 316 | |
| 317 | static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, |
| 318 | uint32_t engine_id, uint32_t queue_id, |
| 319 | uint32_t (**dump)[2], uint32_t *n_regs) |
| 320 | { |
| 321 | uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + |
| 322 | queue_id * KFD_VI_SDMA_QUEUE_OFFSET; |
| 323 | uint32_t i = 0, reg; |
| 324 | #undef HQD_N_REGS |
| 325 | #define HQD_N_REGS (19+4+2+3+7) |
| 326 | |
| 327 | *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); |
| 328 | if (*dump == NULL) |
| 329 | return -ENOMEM; |
| 330 | |
| 331 | for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) |
| 332 | DUMP_REG(sdma_offset + reg); |
| 333 | for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK; |
| 334 | reg++) |
| 335 | DUMP_REG(sdma_offset + reg); |
| 336 | for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; |
| 337 | reg++) |
| 338 | DUMP_REG(sdma_offset + reg); |
| 339 | for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG; |
| 340 | reg++) |
| 341 | DUMP_REG(sdma_offset + reg); |
| 342 | for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; |
| 343 | reg++) |
| 344 | DUMP_REG(sdma_offset + reg); |
| 345 | |
| 346 | WARN_ON_ONCE(i != HQD_N_REGS); |
| 347 | *n_regs = i; |
| 348 | |
| 349 | return 0; |
| 350 | } |
| 351 | |
| 352 | static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, |
| 353 | uint64_t queue_address, uint32_t pipe_id, |
| 354 | uint32_t queue_id, uint32_t inst) |
| 355 | { |
| 356 | uint32_t act; |
| 357 | bool retval = false; |
| 358 | uint32_t low, high; |
| 359 | |
| 360 | acquire_queue(adev, pipe_id, queue_id); |
| 361 | act = RREG32(mmCP_HQD_ACTIVE); |
| 362 | if (act) { |
| 363 | low = lower_32_bits(queue_address >> 8); |
| 364 | high = upper_32_bits(queue_address >> 8); |
| 365 | |
| 366 | if (low == RREG32(mmCP_HQD_PQ_BASE) && |
| 367 | high == RREG32(mmCP_HQD_PQ_BASE_HI)) |
| 368 | retval = true; |
| 369 | } |
| 370 | release_queue(adev); |
| 371 | return retval; |
| 372 | } |
| 373 | |
| 374 | static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) |
| 375 | { |
| 376 | struct vi_sdma_mqd *m; |
| 377 | uint32_t sdma_rlc_reg_offset; |
| 378 | uint32_t sdma_rlc_rb_cntl; |
| 379 | |
| 380 | m = get_sdma_mqd(mqd); |
| 381 | sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); |
| 382 | |
| 383 | sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); |
| 384 | |
| 385 | if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) |
| 386 | return true; |
| 387 | |
| 388 | return false; |
| 389 | } |
| 390 | |
| 391 | static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, |
| 392 | enum kfd_preempt_type reset_type, |
| 393 | unsigned int utimeout, uint32_t pipe_id, |
| 394 | uint32_t queue_id, uint32_t inst) |
| 395 | { |
| 396 | uint32_t temp; |
| 397 | enum hqd_dequeue_request_type type; |
| 398 | unsigned long flags, end_jiffies; |
| 399 | int retry; |
| 400 | struct vi_mqd *m = get_mqd(mqd); |
| 401 | |
| 402 | if (amdgpu_in_reset(adev)) |
| 403 | return -EIO; |
| 404 | |
| 405 | acquire_queue(adev, pipe_id, queue_id); |
| 406 | |
| 407 | if (m->cp_hqd_vmid == 0) |
| 408 | WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); |
| 409 | |
| 410 | switch (reset_type) { |
| 411 | case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: |
| 412 | type = DRAIN_PIPE; |
| 413 | break; |
| 414 | case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: |
| 415 | type = RESET_WAVES; |
| 416 | break; |
| 417 | default: |
| 418 | type = DRAIN_PIPE; |
| 419 | break; |
| 420 | } |
| 421 | |
| 422 | /* Workaround: If IQ timer is active and the wait time is close to or |
| 423 | * equal to 0, dequeueing is not safe. Wait until either the wait time |
| 424 | * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is |
| 425 | * cleared before continuing. Also, ensure wait times are set to at |
| 426 | * least 0x3. |
| 427 | */ |
| 428 | local_irq_save(flags); |
| 429 | preempt_disable(); |
| 430 | retry = 5000; /* wait for 500 usecs at maximum */ |
| 431 | while (true) { |
| 432 | temp = RREG32(mmCP_HQD_IQ_TIMER); |
| 433 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { |
| 434 | pr_debug("HW is processing IQ\n" ); |
| 435 | goto loop; |
| 436 | } |
| 437 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { |
| 438 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) |
| 439 | == 3) /* SEM-rearm is safe */ |
| 440 | break; |
| 441 | /* Wait time 3 is safe for CP, but our MMIO read/write |
| 442 | * time is close to 1 microsecond, so check for 10 to |
| 443 | * leave more buffer room |
| 444 | */ |
| 445 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) |
| 446 | >= 10) |
| 447 | break; |
| 448 | pr_debug("IQ timer is active\n" ); |
| 449 | } else |
| 450 | break; |
| 451 | loop: |
| 452 | if (!retry) { |
| 453 | pr_err("CP HQD IQ timer status time out\n" ); |
| 454 | break; |
| 455 | } |
| 456 | ndelay(100); |
| 457 | --retry; |
| 458 | } |
| 459 | retry = 1000; |
| 460 | while (true) { |
| 461 | temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); |
| 462 | if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) |
| 463 | break; |
| 464 | pr_debug("Dequeue request is pending\n" ); |
| 465 | |
| 466 | if (!retry) { |
| 467 | pr_err("CP HQD dequeue request time out\n" ); |
| 468 | break; |
| 469 | } |
| 470 | ndelay(100); |
| 471 | --retry; |
| 472 | } |
| 473 | local_irq_restore(flags); |
| 474 | preempt_enable(); |
| 475 | |
| 476 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); |
| 477 | |
| 478 | end_jiffies = (utimeout * HZ / 1000) + jiffies; |
| 479 | while (true) { |
| 480 | temp = RREG32(mmCP_HQD_ACTIVE); |
| 481 | if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) |
| 482 | break; |
| 483 | if (time_after(jiffies, end_jiffies)) { |
| 484 | pr_err("cp queue preemption time out.\n" ); |
| 485 | release_queue(adev); |
| 486 | return -ETIME; |
| 487 | } |
| 488 | usleep_range(min: 500, max: 1000); |
| 489 | } |
| 490 | |
| 491 | release_queue(adev); |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, |
| 496 | unsigned int utimeout) |
| 497 | { |
| 498 | struct vi_sdma_mqd *m; |
| 499 | uint32_t sdma_rlc_reg_offset; |
| 500 | uint32_t temp; |
| 501 | unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; |
| 502 | |
| 503 | m = get_sdma_mqd(mqd); |
| 504 | sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); |
| 505 | |
| 506 | temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); |
| 507 | temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; |
| 508 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); |
| 509 | |
| 510 | while (true) { |
| 511 | temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); |
| 512 | if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) |
| 513 | break; |
| 514 | if (time_after(jiffies, end_jiffies)) { |
| 515 | pr_err("SDMA RLC not idle in %s\n" , __func__); |
| 516 | return -ETIME; |
| 517 | } |
| 518 | usleep_range(min: 500, max: 1000); |
| 519 | } |
| 520 | |
| 521 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); |
| 522 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, |
| 523 | RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | |
| 524 | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); |
| 525 | |
| 526 | m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); |
| 527 | |
| 528 | return 0; |
| 529 | } |
| 530 | |
| 531 | static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, |
| 532 | uint8_t vmid, uint16_t *p_pasid) |
| 533 | { |
| 534 | uint32_t value; |
| 535 | |
| 536 | value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); |
| 537 | *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; |
| 538 | |
| 539 | return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); |
| 540 | } |
| 541 | |
| 542 | static int kgd_wave_control_execute(struct amdgpu_device *adev, |
| 543 | uint32_t gfx_index_val, |
| 544 | uint32_t sq_cmd, uint32_t inst) |
| 545 | { |
| 546 | uint32_t data = 0; |
| 547 | |
| 548 | mutex_lock(&adev->grbm_idx_mutex); |
| 549 | |
| 550 | WREG32(mmGRBM_GFX_INDEX, gfx_index_val); |
| 551 | WREG32(mmSQ_CMD, sq_cmd); |
| 552 | |
| 553 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
| 554 | INSTANCE_BROADCAST_WRITES, 1); |
| 555 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
| 556 | SH_BROADCAST_WRITES, 1); |
| 557 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
| 558 | SE_BROADCAST_WRITES, 1); |
| 559 | |
| 560 | WREG32(mmGRBM_GFX_INDEX, data); |
| 561 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
| 562 | |
| 563 | return 0; |
| 564 | } |
| 565 | |
| 566 | static void set_scratch_backing_va(struct amdgpu_device *adev, |
| 567 | uint64_t va, uint32_t vmid) |
| 568 | { |
| 569 | lock_srbm(adev, mec: 0, pipe: 0, queue: 0, vmid); |
| 570 | WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); |
| 571 | unlock_srbm(adev); |
| 572 | } |
| 573 | |
| 574 | static void set_vm_context_page_table_base(struct amdgpu_device *adev, |
| 575 | uint32_t vmid, uint64_t page_table_base) |
| 576 | { |
| 577 | if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { |
| 578 | pr_err("trying to set page table base for wrong VMID\n" ); |
| 579 | return; |
| 580 | } |
| 581 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, |
| 582 | lower_32_bits(page_table_base)); |
| 583 | } |
| 584 | |
| 585 | static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev, |
| 586 | int engine, int queue) |
| 587 | |
| 588 | { |
| 589 | return 0; |
| 590 | } |
| 591 | |
| 592 | const struct kfd2kgd_calls gfx_v8_kfd2kgd = { |
| 593 | .program_sh_mem_settings = kgd_program_sh_mem_settings, |
| 594 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, |
| 595 | .init_interrupts = kgd_init_interrupts, |
| 596 | .hqd_load = kgd_hqd_load, |
| 597 | .hqd_sdma_load = kgd_hqd_sdma_load, |
| 598 | .hqd_dump = kgd_hqd_dump, |
| 599 | .hqd_sdma_dump = kgd_hqd_sdma_dump, |
| 600 | .hqd_is_occupied = kgd_hqd_is_occupied, |
| 601 | .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, |
| 602 | .hqd_destroy = kgd_hqd_destroy, |
| 603 | .hqd_sdma_destroy = kgd_hqd_sdma_destroy, |
| 604 | .wave_control_execute = kgd_wave_control_execute, |
| 605 | .get_atc_vmid_pasid_mapping_info = |
| 606 | get_atc_vmid_pasid_mapping_info, |
| 607 | .set_scratch_backing_va = set_scratch_backing_va, |
| 608 | .set_vm_context_page_table_base = set_vm_context_page_table_base, |
| 609 | .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell, |
| 610 | }; |
| 611 | |