| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
| 2 | /* |
| 3 | * Copyright 2014-2022 Advanced Micro Devices, Inc. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be included in |
| 13 | * all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 21 | * OTHER DEALINGS IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "kfd_mqd_manager.h" |
| 26 | #include "amdgpu_amdkfd.h" |
| 27 | #include "kfd_device_queue_manager.h" |
| 28 | |
| 29 | /* Mapping queue priority to pipe priority, indexed by queue priority */ |
| 30 | int pipe_priority_map[] = { |
| 31 | KFD_PIPE_PRIORITY_CS_LOW, |
| 32 | KFD_PIPE_PRIORITY_CS_LOW, |
| 33 | KFD_PIPE_PRIORITY_CS_LOW, |
| 34 | KFD_PIPE_PRIORITY_CS_LOW, |
| 35 | KFD_PIPE_PRIORITY_CS_LOW, |
| 36 | KFD_PIPE_PRIORITY_CS_LOW, |
| 37 | KFD_PIPE_PRIORITY_CS_LOW, |
| 38 | KFD_PIPE_PRIORITY_CS_MEDIUM, |
| 39 | KFD_PIPE_PRIORITY_CS_MEDIUM, |
| 40 | KFD_PIPE_PRIORITY_CS_MEDIUM, |
| 41 | KFD_PIPE_PRIORITY_CS_MEDIUM, |
| 42 | KFD_PIPE_PRIORITY_CS_HIGH, |
| 43 | KFD_PIPE_PRIORITY_CS_HIGH, |
| 44 | KFD_PIPE_PRIORITY_CS_HIGH, |
| 45 | KFD_PIPE_PRIORITY_CS_HIGH, |
| 46 | KFD_PIPE_PRIORITY_CS_HIGH |
| 47 | }; |
| 48 | |
| 49 | struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q) |
| 50 | { |
| 51 | struct kfd_mem_obj *mqd_mem_obj; |
| 52 | |
| 53 | mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); |
| 54 | if (!mqd_mem_obj) |
| 55 | return NULL; |
| 56 | |
| 57 | mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; |
| 58 | mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; |
| 59 | mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; |
| 60 | |
| 61 | return mqd_mem_obj; |
| 62 | } |
| 63 | |
| 64 | struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, |
| 65 | struct queue_properties *q) |
| 66 | { |
| 67 | struct kfd_mem_obj *mqd_mem_obj; |
| 68 | uint64_t offset; |
| 69 | |
| 70 | mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); |
| 71 | if (!mqd_mem_obj) |
| 72 | return NULL; |
| 73 | |
| 74 | offset = (q->sdma_engine_id * |
| 75 | dev->kfd->device_info.num_sdma_queues_per_engine + |
| 76 | q->sdma_queue_id) * |
| 77 | dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; |
| 78 | |
| 79 | offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * |
| 80 | NUM_XCC(dev->xcc_mask); |
| 81 | |
| 82 | mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem |
| 83 | + offset); |
| 84 | mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; |
| 85 | mqd_mem_obj->cpu_ptr = (uint32_t *)((uint64_t) |
| 86 | dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); |
| 87 | |
| 88 | return mqd_mem_obj; |
| 89 | } |
| 90 | |
| 91 | void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, |
| 92 | struct kfd_mem_obj *mqd_mem_obj) |
| 93 | { |
| 94 | WARN_ON(!mqd_mem_obj->gtt_mem); |
| 95 | kfree(objp: mqd_mem_obj); |
| 96 | } |
| 97 | |
| 98 | void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, |
| 99 | const uint32_t *cu_mask, uint32_t cu_mask_count, |
| 100 | uint32_t *se_mask, uint32_t inst) |
| 101 | { |
| 102 | struct amdgpu_cu_info *cu_info = &mm->dev->adev->gfx.cu_info; |
| 103 | struct amdgpu_gfx_config *gfx_info = &mm->dev->adev->gfx.config; |
| 104 | uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0}; |
| 105 | bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0); |
| 106 | uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1; |
| 107 | int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1; |
| 108 | uint32_t cu_active_per_node; |
| 109 | int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask); |
| 110 | int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1; |
| 111 | |
| 112 | cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes; |
| 113 | if (cu_mask_count > cu_active_per_node) |
| 114 | cu_mask_count = cu_active_per_node; |
| 115 | |
| 116 | /* Exceeding these bounds corrupts the stack and indicates a coding error. |
| 117 | * Returning with no CU's enabled will hang the queue, which should be |
| 118 | * attention grabbing. |
| 119 | */ |
| 120 | if (gfx_info->max_shader_engines > KFD_MAX_NUM_SE) { |
| 121 | dev_err(mm->dev->adev->dev, |
| 122 | "Exceeded KFD_MAX_NUM_SE, chip reports %d\n" , |
| 123 | gfx_info->max_shader_engines); |
| 124 | return; |
| 125 | } |
| 126 | if (gfx_info->max_sh_per_se > KFD_MAX_NUM_SH_PER_SE) { |
| 127 | dev_err(mm->dev->adev->dev, |
| 128 | "Exceeded KFD_MAX_NUM_SH, chip reports %d\n" , |
| 129 | gfx_info->max_sh_per_se * gfx_info->max_shader_engines); |
| 130 | return; |
| 131 | } |
| 132 | |
| 133 | cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) && |
| 134 | KFD_GC_VERSION(mm->dev) < IP_VERSION(13, 0, 0)) ? 2 : 1; |
| 135 | |
| 136 | /* Count active CUs per SH. |
| 137 | * |
| 138 | * Some CUs in an SH may be disabled. HW expects disabled CUs to be |
| 139 | * represented in the high bits of each SH's enable mask (the upper and lower |
| 140 | * 16 bits of se_mask) and will take care of the actual distribution of |
| 141 | * disabled CUs within each SH automatically. |
| 142 | * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1. |
| 143 | * |
| 144 | * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info. |
| 145 | * See note on GFX11 cu_bitmap layout in gfx_v11_0_get_cu_info. |
| 146 | */ |
| 147 | for (se = 0; se < gfx_info->max_shader_engines; se++) |
| 148 | for (sh = 0; sh < gfx_info->max_sh_per_se; sh++) |
| 149 | cu_per_sh[se][sh] = hweight32( |
| 150 | cu_info->bitmap[xcc_inst][se % 4][sh + (se / 4) * |
| 151 | cu_bitmap_sh_mul]); |
| 152 | |
| 153 | /* Symmetrically map cu_mask to all SEs & SHs: |
| 154 | * se_mask programs up to 2 SH in the upper and lower 16 bits. |
| 155 | * |
| 156 | * Examples |
| 157 | * Assuming 1 SH/SE, 4 SEs: |
| 158 | * cu_mask[0] bit0 -> se_mask[0] bit0 |
| 159 | * cu_mask[0] bit1 -> se_mask[1] bit0 |
| 160 | * ... |
| 161 | * cu_mask[0] bit4 -> se_mask[0] bit1 |
| 162 | * ... |
| 163 | * |
| 164 | * Assuming 2 SH/SE, 4 SEs |
| 165 | * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0) |
| 166 | * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0) |
| 167 | * ... |
| 168 | * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0) |
| 169 | * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0) |
| 170 | * ... |
| 171 | * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1) |
| 172 | * ... |
| 173 | * |
| 174 | * For GFX 9.4.3, the following code only looks at a |
| 175 | * subset of the cu_mask corresponding to the inst parameter. |
| 176 | * If we have n XCCs under one GPU node |
| 177 | * cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0) |
| 178 | * cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0) |
| 179 | * .. |
| 180 | * cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0) |
| 181 | * cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0) |
| 182 | * |
| 183 | * For example, if there are 6 XCCs under 1 KFD node, this code |
| 184 | * running for each inst, will look at the bits as: |
| 185 | * inst, inst + 6, inst + 12... |
| 186 | * |
| 187 | * First ensure all CUs are disabled, then enable user specified CUs. |
| 188 | */ |
| 189 | for (i = 0; i < gfx_info->max_shader_engines; i++) |
| 190 | se_mask[i] = 0; |
| 191 | |
| 192 | i = inst; |
| 193 | for (cu = 0; cu < 16; cu += cu_inc) { |
| 194 | for (sh = 0; sh < gfx_info->max_sh_per_se; sh++) { |
| 195 | for (se = 0; se < gfx_info->max_shader_engines; se++) { |
| 196 | if (cu_per_sh[se][sh] > cu) { |
| 197 | if (cu_mask[i / 32] & (en_mask << (i % 32))) |
| 198 | se_mask[se] |= en_mask << (cu + sh * 16); |
| 199 | i += inc; |
| 200 | if (i >= cu_mask_count) |
| 201 | return; |
| 202 | } |
| 203 | } |
| 204 | } |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, |
| 209 | uint32_t pipe_id, uint32_t queue_id, |
| 210 | struct queue_properties *p, struct mm_struct *mms) |
| 211 | { |
| 212 | return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, |
| 213 | queue_id, p->doorbell_off, 0); |
| 214 | } |
| 215 | |
| 216 | int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd, |
| 217 | enum kfd_preempt_type type, unsigned int timeout, |
| 218 | uint32_t pipe_id, uint32_t queue_id) |
| 219 | { |
| 220 | return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout, |
| 221 | pipe_id, queue_id, 0); |
| 222 | } |
| 223 | |
| 224 | void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd, |
| 225 | struct kfd_mem_obj *mqd_mem_obj) |
| 226 | { |
| 227 | if (mqd_mem_obj->gtt_mem) { |
| 228 | amdgpu_amdkfd_free_gtt_mem(adev: mm->dev->adev, mem_obj: &mqd_mem_obj->gtt_mem); |
| 229 | kfree(objp: mqd_mem_obj); |
| 230 | } else { |
| 231 | kfd_gtt_sa_free(node: mm->dev, mem_obj: mqd_mem_obj); |
| 232 | } |
| 233 | } |
| 234 | |
| 235 | bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd, |
| 236 | uint64_t queue_address, uint32_t pipe_id, |
| 237 | uint32_t queue_id) |
| 238 | { |
| 239 | return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address, |
| 240 | pipe_id, queue_id, 0); |
| 241 | } |
| 242 | |
| 243 | int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd, |
| 244 | uint32_t pipe_id, uint32_t queue_id, |
| 245 | struct queue_properties *p, struct mm_struct *mms) |
| 246 | { |
| 247 | return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, |
| 248 | (uint32_t __user *)p->write_ptr, |
| 249 | mms); |
| 250 | } |
| 251 | |
| 252 | /* |
| 253 | * preempt type here is ignored because there is only one way |
| 254 | * to preempt sdma queue |
| 255 | */ |
| 256 | int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, |
| 257 | enum kfd_preempt_type type, |
| 258 | unsigned int timeout, uint32_t pipe_id, |
| 259 | uint32_t queue_id) |
| 260 | { |
| 261 | return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); |
| 262 | } |
| 263 | |
| 264 | bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd, |
| 265 | uint64_t queue_address, uint32_t pipe_id, |
| 266 | uint32_t queue_id) |
| 267 | { |
| 268 | return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); |
| 269 | } |
| 270 | |
| 271 | uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev) |
| 272 | { |
| 273 | return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; |
| 274 | } |
| 275 | |
| 276 | void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj, |
| 277 | uint32_t virtual_xcc_id) |
| 278 | { |
| 279 | uint64_t offset; |
| 280 | |
| 281 | offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id; |
| 282 | |
| 283 | mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ? |
| 284 | dev->dqm->hiq_sdma_mqd.gtt_mem : NULL; |
| 285 | mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; |
| 286 | mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t) |
| 287 | dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); |
| 288 | } |
| 289 | |
| 290 | uint64_t kfd_mqd_stride(struct mqd_manager *mm, |
| 291 | struct queue_properties *q) |
| 292 | { |
| 293 | return mm->mqd_size; |
| 294 | } |
| 295 | |
| 296 | bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id, |
| 297 | uint32_t inst) |
| 298 | { |
| 299 | if (doorbell_id) { |
| 300 | struct device *dev = node->adev->dev; |
| 301 | |
| 302 | if (node->adev->xcp_mgr && node->adev->xcp_mgr->num_xcps > 0) |
| 303 | dev_err(dev, "XCC %d: Queue preemption failed for queue with doorbell_id: %x\n" , |
| 304 | inst, doorbell_id); |
| 305 | else |
| 306 | dev_err(dev, "Queue preemption failed for queue with doorbell_id: %x\n" , |
| 307 | doorbell_id); |
| 308 | return true; |
| 309 | } |
| 310 | |
| 311 | return false; |
| 312 | } |
| 313 | |