| 1 | /* |
| 2 | * Copyright 2017 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | #include "amdgpu_ids.h" |
| 24 | |
| 25 | #include <linux/idr.h> |
| 26 | #include <linux/dma-fence-array.h> |
| 27 | |
| 28 | |
| 29 | #include "amdgpu.h" |
| 30 | #include "amdgpu_trace.h" |
| 31 | |
| 32 | /* |
| 33 | * PASID manager |
| 34 | * |
| 35 | * PASIDs are global address space identifiers that can be shared |
| 36 | * between the GPU, an IOMMU and the driver. VMs on different devices |
| 37 | * may use the same PASID if they share the same address |
| 38 | * space. Therefore PASIDs are allocated using a global IDA. VMs are |
| 39 | * looked up from the PASID per amdgpu_device. |
| 40 | */ |
| 41 | static DEFINE_IDA(amdgpu_pasid_ida); |
| 42 | |
| 43 | /* Helper to free pasid from a fence callback */ |
| 44 | struct amdgpu_pasid_cb { |
| 45 | struct dma_fence_cb cb; |
| 46 | u32 pasid; |
| 47 | }; |
| 48 | |
| 49 | /** |
| 50 | * amdgpu_pasid_alloc - Allocate a PASID |
| 51 | * @bits: Maximum width of the PASID in bits, must be at least 1 |
| 52 | * |
| 53 | * Allocates a PASID of the given width while keeping smaller PASIDs |
| 54 | * available if possible. |
| 55 | * |
| 56 | * Returns a positive integer on success. Returns %-EINVAL if bits==0. |
| 57 | * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on |
| 58 | * memory allocation failure. |
| 59 | */ |
| 60 | int amdgpu_pasid_alloc(unsigned int bits) |
| 61 | { |
| 62 | int pasid = -EINVAL; |
| 63 | |
| 64 | for (bits = min(bits, 31U); bits > 0; bits--) { |
| 65 | pasid = ida_alloc_range(&amdgpu_pasid_ida, min: 1U << (bits - 1), |
| 66 | max: (1U << bits) - 1, GFP_KERNEL); |
| 67 | if (pasid != -ENOSPC) |
| 68 | break; |
| 69 | } |
| 70 | |
| 71 | if (pasid >= 0) |
| 72 | trace_amdgpu_pasid_allocated(pasid); |
| 73 | |
| 74 | return pasid; |
| 75 | } |
| 76 | |
| 77 | /** |
| 78 | * amdgpu_pasid_free - Free a PASID |
| 79 | * @pasid: PASID to free |
| 80 | */ |
| 81 | void amdgpu_pasid_free(u32 pasid) |
| 82 | { |
| 83 | trace_amdgpu_pasid_freed(pasid); |
| 84 | ida_free(&amdgpu_pasid_ida, id: pasid); |
| 85 | } |
| 86 | |
| 87 | static void amdgpu_pasid_free_cb(struct dma_fence *fence, |
| 88 | struct dma_fence_cb *_cb) |
| 89 | { |
| 90 | struct amdgpu_pasid_cb *cb = |
| 91 | container_of(_cb, struct amdgpu_pasid_cb, cb); |
| 92 | |
| 93 | amdgpu_pasid_free(pasid: cb->pasid); |
| 94 | dma_fence_put(fence); |
| 95 | kfree(objp: cb); |
| 96 | } |
| 97 | |
| 98 | /** |
| 99 | * amdgpu_pasid_free_delayed - free pasid when fences signal |
| 100 | * |
| 101 | * @resv: reservation object with the fences to wait for |
| 102 | * @pasid: pasid to free |
| 103 | * |
| 104 | * Free the pasid only after all the fences in resv are signaled. |
| 105 | */ |
| 106 | void amdgpu_pasid_free_delayed(struct dma_resv *resv, |
| 107 | u32 pasid) |
| 108 | { |
| 109 | struct amdgpu_pasid_cb *cb; |
| 110 | struct dma_fence *fence; |
| 111 | int r; |
| 112 | |
| 113 | r = dma_resv_get_singleton(obj: resv, usage: DMA_RESV_USAGE_BOOKKEEP, fence: &fence); |
| 114 | if (r) |
| 115 | goto fallback; |
| 116 | |
| 117 | if (!fence) { |
| 118 | amdgpu_pasid_free(pasid); |
| 119 | return; |
| 120 | } |
| 121 | |
| 122 | cb = kmalloc(sizeof(*cb), GFP_KERNEL); |
| 123 | if (!cb) { |
| 124 | /* Last resort when we are OOM */ |
| 125 | dma_fence_wait(fence, intr: false); |
| 126 | dma_fence_put(fence); |
| 127 | amdgpu_pasid_free(pasid); |
| 128 | } else { |
| 129 | cb->pasid = pasid; |
| 130 | if (dma_fence_add_callback(fence, cb: &cb->cb, |
| 131 | func: amdgpu_pasid_free_cb)) |
| 132 | amdgpu_pasid_free_cb(fence, cb: &cb->cb); |
| 133 | } |
| 134 | |
| 135 | return; |
| 136 | |
| 137 | fallback: |
| 138 | /* Not enough memory for the delayed delete, as last resort |
| 139 | * block for all the fences to complete. |
| 140 | */ |
| 141 | dma_resv_wait_timeout(obj: resv, usage: DMA_RESV_USAGE_BOOKKEEP, |
| 142 | intr: false, MAX_SCHEDULE_TIMEOUT); |
| 143 | amdgpu_pasid_free(pasid); |
| 144 | } |
| 145 | |
| 146 | /* |
| 147 | * VMID manager |
| 148 | * |
| 149 | * VMIDs are a per VMHUB identifier for page tables handling. |
| 150 | */ |
| 151 | |
| 152 | /** |
| 153 | * amdgpu_vmid_had_gpu_reset - check if reset occured since last use |
| 154 | * |
| 155 | * @adev: amdgpu_device pointer |
| 156 | * @id: VMID structure |
| 157 | * |
| 158 | * Check if GPU reset occured since last use of the VMID. |
| 159 | */ |
| 160 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, |
| 161 | struct amdgpu_vmid *id) |
| 162 | { |
| 163 | return id->current_gpu_reset_count != |
| 164 | atomic_read(v: &adev->gpu_reset_counter); |
| 165 | } |
| 166 | |
| 167 | /* Check if we need to switch to another set of resources */ |
| 168 | static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id, |
| 169 | struct amdgpu_job *job) |
| 170 | { |
| 171 | return id->gds_base != job->gds_base || |
| 172 | id->gds_size != job->gds_size || |
| 173 | id->gws_base != job->gws_base || |
| 174 | id->gws_size != job->gws_size || |
| 175 | id->oa_base != job->oa_base || |
| 176 | id->oa_size != job->oa_size; |
| 177 | } |
| 178 | |
| 179 | /* Check if the id is compatible with the job */ |
| 180 | static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, |
| 181 | struct amdgpu_job *job) |
| 182 | { |
| 183 | return id->pd_gpu_addr == job->vm_pd_addr && |
| 184 | !amdgpu_vmid_gds_switch_needed(id, job); |
| 185 | } |
| 186 | |
| 187 | /** |
| 188 | * amdgpu_vmid_grab_idle - grab idle VMID |
| 189 | * |
| 190 | * @ring: ring we want to submit job to |
| 191 | * @idle: resulting idle VMID |
| 192 | * @fence: fence to wait for if no id could be grabbed |
| 193 | * |
| 194 | * Try to find an idle VMID, if none is idle add a fence to wait to the sync |
| 195 | * object. Returns -ENOMEM when we are out of memory. |
| 196 | */ |
| 197 | static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, |
| 198 | struct amdgpu_vmid **idle, |
| 199 | struct dma_fence **fence) |
| 200 | { |
| 201 | struct amdgpu_device *adev = ring->adev; |
| 202 | unsigned vmhub = ring->vm_hub; |
| 203 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 204 | |
| 205 | /* If anybody is waiting for a VMID let everybody wait for fairness */ |
| 206 | if (!dma_fence_is_signaled(fence: ring->vmid_wait)) { |
| 207 | *fence = dma_fence_get(fence: ring->vmid_wait); |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | /* Check if we have an idle VMID */ |
| 212 | list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) { |
| 213 | /* Don't use per engine and per process VMID at the same time */ |
| 214 | struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? |
| 215 | NULL : ring; |
| 216 | |
| 217 | *fence = amdgpu_sync_peek_fence(sync: &(*idle)->active, ring: r); |
| 218 | if (!(*fence)) |
| 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * If we can't find a idle VMID to use, wait on a fence from the least |
| 224 | * recently used in the hope that it will be available soon. |
| 225 | */ |
| 226 | *idle = NULL; |
| 227 | dma_fence_put(fence: ring->vmid_wait); |
| 228 | ring->vmid_wait = dma_fence_get(fence: *fence); |
| 229 | |
| 230 | /* This is the reference we return */ |
| 231 | dma_fence_get(fence: *fence); |
| 232 | return 0; |
| 233 | } |
| 234 | |
| 235 | /** |
| 236 | * amdgpu_vmid_grab_reserved - try to assign reserved VMID |
| 237 | * |
| 238 | * @vm: vm to allocate id for |
| 239 | * @ring: ring we want to submit job to |
| 240 | * @job: job who wants to use the VMID |
| 241 | * @id: resulting VMID |
| 242 | * @fence: fence to wait for if no id could be grabbed |
| 243 | * |
| 244 | * Try to assign a reserved VMID. |
| 245 | */ |
| 246 | static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, |
| 247 | struct amdgpu_ring *ring, |
| 248 | struct amdgpu_job *job, |
| 249 | struct amdgpu_vmid **id, |
| 250 | struct dma_fence **fence) |
| 251 | { |
| 252 | struct amdgpu_device *adev = ring->adev; |
| 253 | unsigned vmhub = ring->vm_hub; |
| 254 | uint64_t fence_context = adev->fence_context + ring->idx; |
| 255 | bool needs_flush = vm->use_cpu_for_update; |
| 256 | uint64_t updates = amdgpu_vm_tlb_seq(vm); |
| 257 | int r; |
| 258 | |
| 259 | *id = vm->reserved_vmid[vmhub]; |
| 260 | if ((*id)->owner != vm->immediate.fence_context || |
| 261 | !amdgpu_vmid_compatible(id: *id, job) || |
| 262 | (*id)->flushed_updates < updates || |
| 263 | !(*id)->last_flush || |
| 264 | ((*id)->last_flush->context != fence_context && |
| 265 | !dma_fence_is_signaled(fence: (*id)->last_flush))) |
| 266 | needs_flush = true; |
| 267 | |
| 268 | if ((*id)->owner != vm->immediate.fence_context || |
| 269 | (!adev->vm_manager.concurrent_flush && needs_flush)) { |
| 270 | struct dma_fence *tmp; |
| 271 | |
| 272 | /* Don't use per engine and per process VMID at the |
| 273 | * same time |
| 274 | */ |
| 275 | if (adev->vm_manager.concurrent_flush) |
| 276 | ring = NULL; |
| 277 | |
| 278 | /* to prevent one context starved by another context */ |
| 279 | (*id)->pd_gpu_addr = 0; |
| 280 | tmp = amdgpu_sync_peek_fence(sync: &(*id)->active, ring); |
| 281 | if (tmp) { |
| 282 | *id = NULL; |
| 283 | *fence = dma_fence_get(fence: tmp); |
| 284 | return 0; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | /* Good we can use this VMID. Remember this submission as |
| 289 | * user of the VMID. |
| 290 | */ |
| 291 | r = amdgpu_sync_fence(sync: &(*id)->active, f: &job->base.s_fence->finished, |
| 292 | GFP_ATOMIC); |
| 293 | if (r) |
| 294 | return r; |
| 295 | |
| 296 | job->vm_needs_flush = needs_flush; |
| 297 | job->spm_update_needed = true; |
| 298 | return 0; |
| 299 | } |
| 300 | |
| 301 | /** |
| 302 | * amdgpu_vmid_grab_used - try to reuse a VMID |
| 303 | * |
| 304 | * @vm: vm to allocate id for |
| 305 | * @ring: ring we want to submit job to |
| 306 | * @job: job who wants to use the VMID |
| 307 | * @id: resulting VMID |
| 308 | * |
| 309 | * Try to reuse a VMID for this submission. |
| 310 | */ |
| 311 | static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, |
| 312 | struct amdgpu_ring *ring, |
| 313 | struct amdgpu_job *job, |
| 314 | struct amdgpu_vmid **id) |
| 315 | { |
| 316 | struct amdgpu_device *adev = ring->adev; |
| 317 | unsigned vmhub = ring->vm_hub; |
| 318 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 319 | uint64_t fence_context = adev->fence_context + ring->idx; |
| 320 | uint64_t updates = amdgpu_vm_tlb_seq(vm); |
| 321 | int r; |
| 322 | |
| 323 | job->vm_needs_flush = vm->use_cpu_for_update; |
| 324 | |
| 325 | /* Check if we can use a VMID already assigned to this VM */ |
| 326 | list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { |
| 327 | bool needs_flush = vm->use_cpu_for_update; |
| 328 | |
| 329 | /* Check all the prerequisites to using this VMID */ |
| 330 | if ((*id)->owner != vm->immediate.fence_context) |
| 331 | continue; |
| 332 | |
| 333 | if (!amdgpu_vmid_compatible(id: *id, job)) |
| 334 | continue; |
| 335 | |
| 336 | if (!(*id)->last_flush || |
| 337 | ((*id)->last_flush->context != fence_context && |
| 338 | !dma_fence_is_signaled(fence: (*id)->last_flush))) |
| 339 | needs_flush = true; |
| 340 | |
| 341 | if ((*id)->flushed_updates < updates) |
| 342 | needs_flush = true; |
| 343 | |
| 344 | if (needs_flush && !adev->vm_manager.concurrent_flush) |
| 345 | continue; |
| 346 | |
| 347 | /* Good, we can use this VMID. Remember this submission as |
| 348 | * user of the VMID. |
| 349 | */ |
| 350 | r = amdgpu_sync_fence(sync: &(*id)->active, |
| 351 | f: &job->base.s_fence->finished, |
| 352 | GFP_ATOMIC); |
| 353 | if (r) |
| 354 | return r; |
| 355 | |
| 356 | job->vm_needs_flush |= needs_flush; |
| 357 | return 0; |
| 358 | } |
| 359 | |
| 360 | *id = NULL; |
| 361 | return 0; |
| 362 | } |
| 363 | |
| 364 | /** |
| 365 | * amdgpu_vmid_grab - allocate the next free VMID |
| 366 | * |
| 367 | * @vm: vm to allocate id for |
| 368 | * @ring: ring we want to submit job to |
| 369 | * @job: job who wants to use the VMID |
| 370 | * @fence: fence to wait for if no id could be grabbed |
| 371 | * |
| 372 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
| 373 | */ |
| 374 | int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
| 375 | struct amdgpu_job *job, struct dma_fence **fence) |
| 376 | { |
| 377 | struct amdgpu_device *adev = ring->adev; |
| 378 | unsigned vmhub = ring->vm_hub; |
| 379 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 380 | struct amdgpu_vmid *idle = NULL; |
| 381 | struct amdgpu_vmid *id = NULL; |
| 382 | int r = 0; |
| 383 | |
| 384 | mutex_lock(&id_mgr->lock); |
| 385 | r = amdgpu_vmid_grab_idle(ring, idle: &idle, fence); |
| 386 | if (r || !idle) |
| 387 | goto error; |
| 388 | |
| 389 | if (amdgpu_vmid_uses_reserved(vm, vmhub)) { |
| 390 | r = amdgpu_vmid_grab_reserved(vm, ring, job, id: &id, fence); |
| 391 | if (r || !id) |
| 392 | goto error; |
| 393 | } else { |
| 394 | r = amdgpu_vmid_grab_used(vm, ring, job, id: &id); |
| 395 | if (r) |
| 396 | goto error; |
| 397 | |
| 398 | if (!id) { |
| 399 | /* Still no ID to use? Then use the idle one found earlier */ |
| 400 | id = idle; |
| 401 | |
| 402 | /* Remember this submission as user of the VMID */ |
| 403 | r = amdgpu_sync_fence(sync: &id->active, |
| 404 | f: &job->base.s_fence->finished, |
| 405 | GFP_ATOMIC); |
| 406 | if (r) |
| 407 | goto error; |
| 408 | |
| 409 | job->vm_needs_flush = true; |
| 410 | } |
| 411 | |
| 412 | list_move_tail(list: &id->list, head: &id_mgr->ids_lru); |
| 413 | } |
| 414 | |
| 415 | job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job); |
| 416 | if (job->vm_needs_flush) { |
| 417 | id->flushed_updates = amdgpu_vm_tlb_seq(vm); |
| 418 | dma_fence_put(fence: id->last_flush); |
| 419 | id->last_flush = NULL; |
| 420 | } |
| 421 | job->vmid = id - id_mgr->ids; |
| 422 | job->pasid = vm->pasid; |
| 423 | |
| 424 | id->gds_base = job->gds_base; |
| 425 | id->gds_size = job->gds_size; |
| 426 | id->gws_base = job->gws_base; |
| 427 | id->gws_size = job->gws_size; |
| 428 | id->oa_base = job->oa_base; |
| 429 | id->oa_size = job->oa_size; |
| 430 | id->pd_gpu_addr = job->vm_pd_addr; |
| 431 | id->owner = vm->immediate.fence_context; |
| 432 | |
| 433 | trace_amdgpu_vm_grab_id(vm, ring, job); |
| 434 | |
| 435 | error: |
| 436 | mutex_unlock(lock: &id_mgr->lock); |
| 437 | return r; |
| 438 | } |
| 439 | |
| 440 | /* |
| 441 | * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID |
| 442 | * @vm: the VM to check |
| 443 | * @vmhub: the VMHUB which will be used |
| 444 | * |
| 445 | * Returns: True if the VM will use a reserved VMID. |
| 446 | */ |
| 447 | bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) |
| 448 | { |
| 449 | return vm->reserved_vmid[vmhub]; |
| 450 | } |
| 451 | |
| 452 | /* |
| 453 | * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm |
| 454 | * @adev: amdgpu device structure |
| 455 | * @vm: the VM to reserve an ID for |
| 456 | * @vmhub: the VMHUB which should be used |
| 457 | * |
| 458 | * Mostly used to have a reserved VMID for debugging and SPM. |
| 459 | * |
| 460 | * Returns: 0 for success, -ENOENT if an ID is already reserved. |
| 461 | */ |
| 462 | int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
| 463 | unsigned vmhub) |
| 464 | { |
| 465 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 466 | struct amdgpu_vmid *id; |
| 467 | int r = 0; |
| 468 | |
| 469 | mutex_lock(&id_mgr->lock); |
| 470 | if (vm->reserved_vmid[vmhub]) |
| 471 | goto unlock; |
| 472 | if (id_mgr->reserved_vmid) { |
| 473 | r = -ENOENT; |
| 474 | goto unlock; |
| 475 | } |
| 476 | /* Remove from normal round robin handling */ |
| 477 | id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); |
| 478 | list_del_init(entry: &id->list); |
| 479 | vm->reserved_vmid[vmhub] = id; |
| 480 | id_mgr->reserved_vmid = true; |
| 481 | mutex_unlock(lock: &id_mgr->lock); |
| 482 | |
| 483 | return 0; |
| 484 | unlock: |
| 485 | mutex_unlock(lock: &id_mgr->lock); |
| 486 | return r; |
| 487 | } |
| 488 | |
| 489 | /* |
| 490 | * amdgpu_vmid_free_reserved - free up a reserved VMID again |
| 491 | * @adev: amdgpu device structure |
| 492 | * @vm: the VM with the reserved ID |
| 493 | * @vmhub: the VMHUB which should be used |
| 494 | */ |
| 495 | void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
| 496 | unsigned vmhub) |
| 497 | { |
| 498 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 499 | |
| 500 | mutex_lock(&id_mgr->lock); |
| 501 | if (vm->reserved_vmid[vmhub]) { |
| 502 | list_add(new: &vm->reserved_vmid[vmhub]->list, |
| 503 | head: &id_mgr->ids_lru); |
| 504 | vm->reserved_vmid[vmhub] = NULL; |
| 505 | id_mgr->reserved_vmid = false; |
| 506 | } |
| 507 | mutex_unlock(lock: &id_mgr->lock); |
| 508 | } |
| 509 | |
| 510 | /** |
| 511 | * amdgpu_vmid_reset - reset VMID to zero |
| 512 | * |
| 513 | * @adev: amdgpu device structure |
| 514 | * @vmhub: vmhub type |
| 515 | * @vmid: vmid number to use |
| 516 | * |
| 517 | * Reset saved GDW, GWS and OA to force switch on next flush. |
| 518 | */ |
| 519 | void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, |
| 520 | unsigned vmid) |
| 521 | { |
| 522 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
| 523 | struct amdgpu_vmid *id = &id_mgr->ids[vmid]; |
| 524 | |
| 525 | mutex_lock(&id_mgr->lock); |
| 526 | id->owner = 0; |
| 527 | id->gds_base = 0; |
| 528 | id->gds_size = 0; |
| 529 | id->gws_base = 0; |
| 530 | id->gws_size = 0; |
| 531 | id->oa_base = 0; |
| 532 | id->oa_size = 0; |
| 533 | mutex_unlock(lock: &id_mgr->lock); |
| 534 | } |
| 535 | |
| 536 | /** |
| 537 | * amdgpu_vmid_reset_all - reset VMID to zero |
| 538 | * |
| 539 | * @adev: amdgpu device structure |
| 540 | * |
| 541 | * Reset VMID to force flush on next use |
| 542 | */ |
| 543 | void amdgpu_vmid_reset_all(struct amdgpu_device *adev) |
| 544 | { |
| 545 | unsigned i, j; |
| 546 | |
| 547 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
| 548 | struct amdgpu_vmid_mgr *id_mgr = |
| 549 | &adev->vm_manager.id_mgr[i]; |
| 550 | |
| 551 | for (j = 1; j < id_mgr->num_ids; ++j) |
| 552 | amdgpu_vmid_reset(adev, vmhub: i, vmid: j); |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | /** |
| 557 | * amdgpu_vmid_mgr_init - init the VMID manager |
| 558 | * |
| 559 | * @adev: amdgpu_device pointer |
| 560 | * |
| 561 | * Initialize the VM manager structures |
| 562 | */ |
| 563 | void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) |
| 564 | { |
| 565 | unsigned i, j; |
| 566 | |
| 567 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
| 568 | struct amdgpu_vmid_mgr *id_mgr = |
| 569 | &adev->vm_manager.id_mgr[i]; |
| 570 | |
| 571 | mutex_init(&id_mgr->lock); |
| 572 | INIT_LIST_HEAD(list: &id_mgr->ids_lru); |
| 573 | |
| 574 | /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */ |
| 575 | if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) < IP_VERSION(10, 0, 0)) |
| 576 | /* manage only VMIDs not used by KFD */ |
| 577 | id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; |
| 578 | else if (AMDGPU_IS_MMHUB0(i) || |
| 579 | AMDGPU_IS_MMHUB1(i)) |
| 580 | id_mgr->num_ids = 16; |
| 581 | else |
| 582 | /* manage only VMIDs not used by KFD */ |
| 583 | id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; |
| 584 | |
| 585 | /* skip over VMID 0, since it is the system VM */ |
| 586 | for (j = 1; j < id_mgr->num_ids; ++j) { |
| 587 | amdgpu_vmid_reset(adev, vmhub: i, vmid: j); |
| 588 | amdgpu_sync_create(sync: &id_mgr->ids[j].active); |
| 589 | list_add_tail(new: &id_mgr->ids[j].list, head: &id_mgr->ids_lru); |
| 590 | } |
| 591 | } |
| 592 | } |
| 593 | |
| 594 | /** |
| 595 | * amdgpu_vmid_mgr_fini - cleanup VM manager |
| 596 | * |
| 597 | * @adev: amdgpu_device pointer |
| 598 | * |
| 599 | * Cleanup the VM manager and free resources. |
| 600 | */ |
| 601 | void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) |
| 602 | { |
| 603 | unsigned i, j; |
| 604 | |
| 605 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
| 606 | struct amdgpu_vmid_mgr *id_mgr = |
| 607 | &adev->vm_manager.id_mgr[i]; |
| 608 | |
| 609 | mutex_destroy(lock: &id_mgr->lock); |
| 610 | for (j = 0; j < AMDGPU_NUM_VMID; ++j) { |
| 611 | struct amdgpu_vmid *id = &id_mgr->ids[j]; |
| 612 | |
| 613 | amdgpu_sync_free(sync: &id->active); |
| 614 | dma_fence_put(fence: id->last_flush); |
| 615 | dma_fence_put(fence: id->pasid_mapping); |
| 616 | } |
| 617 | } |
| 618 | } |
| 619 | |