| 1 | /* |
| 2 | * Copyright 2018 Advanced Micro Devices, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | #ifndef __AMDGPU_GMC_H__ |
| 27 | #define __AMDGPU_GMC_H__ |
| 28 | |
| 29 | #include <linux/types.h> |
| 30 | |
| 31 | #include "amdgpu_irq.h" |
| 32 | #include "amdgpu_xgmi.h" |
| 33 | #include "amdgpu_ras.h" |
| 34 | |
| 35 | /* VA hole for 48bit addresses on Vega10 */ |
| 36 | #define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL |
| 37 | #define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL |
| 38 | |
| 39 | /* |
| 40 | * Hardware is programmed as if the hole doesn't exists with start and end |
| 41 | * address values. |
| 42 | * |
| 43 | * This mask is used to remove the upper 16bits of the VA and so come up with |
| 44 | * the linear addr value. |
| 45 | */ |
| 46 | #define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL |
| 47 | |
| 48 | /* |
| 49 | * Ring size as power of two for the log of recent faults. |
| 50 | */ |
| 51 | #define AMDGPU_GMC_FAULT_RING_ORDER 8 |
| 52 | #define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER) |
| 53 | |
| 54 | /* |
| 55 | * Hash size as power of two for the log of recent faults |
| 56 | */ |
| 57 | #define AMDGPU_GMC_FAULT_HASH_ORDER 8 |
| 58 | #define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER) |
| 59 | |
| 60 | /* |
| 61 | * Number of IH timestamp ticks until a fault is considered handled |
| 62 | */ |
| 63 | #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL |
| 64 | |
| 65 | /* XNACK flags */ |
| 66 | #define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0) |
| 67 | |
| 68 | struct firmware; |
| 69 | |
| 70 | enum amdgpu_memory_partition { |
| 71 | UNKNOWN_MEMORY_PARTITION_MODE = 0, |
| 72 | AMDGPU_NPS1_PARTITION_MODE = 1, |
| 73 | AMDGPU_NPS2_PARTITION_MODE = 2, |
| 74 | AMDGPU_NPS3_PARTITION_MODE = 3, |
| 75 | AMDGPU_NPS4_PARTITION_MODE = 4, |
| 76 | AMDGPU_NPS6_PARTITION_MODE = 6, |
| 77 | AMDGPU_NPS8_PARTITION_MODE = 8, |
| 78 | }; |
| 79 | |
| 80 | #define AMDGPU_ALL_NPS_MASK \ |
| 81 | (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \ |
| 82 | BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \ |
| 83 | BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE)) |
| 84 | |
| 85 | #define AMDGPU_GMC_INIT_RESET_NPS BIT(0) |
| 86 | |
| 87 | #define AMDGPU_MAX_MEM_RANGES 8 |
| 88 | |
| 89 | #define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80 |
| 90 | #define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40 |
| 91 | #define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20 |
| 92 | #define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10 |
| 93 | |
| 94 | /* |
| 95 | * GMC page fault information |
| 96 | */ |
| 97 | struct amdgpu_gmc_fault { |
| 98 | uint64_t timestamp:48; |
| 99 | uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER; |
| 100 | atomic64_t key; |
| 101 | uint64_t timestamp_expiry:48; |
| 102 | }; |
| 103 | |
| 104 | /* |
| 105 | * VMHUB structures, functions & helpers |
| 106 | */ |
| 107 | struct amdgpu_vmhub_funcs { |
| 108 | void (*print_l2_protection_fault_status)(struct amdgpu_device *adev, |
| 109 | uint32_t status); |
| 110 | uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type); |
| 111 | }; |
| 112 | |
| 113 | struct amdgpu_vmhub { |
| 114 | uint32_t ctx0_ptb_addr_lo32; |
| 115 | uint32_t ctx0_ptb_addr_hi32; |
| 116 | uint32_t vm_inv_eng0_sem; |
| 117 | uint32_t vm_inv_eng0_req; |
| 118 | uint32_t vm_inv_eng0_ack; |
| 119 | uint32_t vm_context0_cntl; |
| 120 | uint32_t vm_l2_pro_fault_status; |
| 121 | uint32_t vm_l2_pro_fault_cntl; |
| 122 | |
| 123 | /* |
| 124 | * store the register distances between two continuous context domain |
| 125 | * and invalidation engine. |
| 126 | */ |
| 127 | uint32_t ctx_distance; |
| 128 | uint32_t ctx_addr_distance; /* include LO32/HI32 */ |
| 129 | uint32_t eng_distance; |
| 130 | uint32_t eng_addr_distance; /* include LO32/HI32 */ |
| 131 | |
| 132 | uint32_t vm_cntx_cntl; |
| 133 | uint32_t vm_cntx_cntl_vm_fault; |
| 134 | uint32_t vm_l2_bank_select_reserved_cid2; |
| 135 | |
| 136 | uint32_t vm_contexts_disable; |
| 137 | |
| 138 | bool sdma_invalidation_workaround; |
| 139 | |
| 140 | const struct amdgpu_vmhub_funcs *vmhub_funcs; |
| 141 | }; |
| 142 | |
| 143 | /* |
| 144 | * GPU MC structures, functions & helpers |
| 145 | */ |
| 146 | struct amdgpu_gmc_funcs { |
| 147 | /* flush the vm tlb via mmio */ |
| 148 | void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid, |
| 149 | uint32_t vmhub, uint32_t flush_type); |
| 150 | /* flush the vm tlb via pasid */ |
| 151 | void (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid, |
| 152 | uint32_t flush_type, bool all_hub, |
| 153 | uint32_t inst); |
| 154 | /* flush the vm tlb via ring */ |
| 155 | uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, |
| 156 | uint64_t pd_addr); |
| 157 | /* Change the VMID -> PASID mapping */ |
| 158 | void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid, |
| 159 | unsigned pasid); |
| 160 | /* enable/disable PRT support */ |
| 161 | void (*set_prt)(struct amdgpu_device *adev, bool enable); |
| 162 | /* get the pde for a given mc addr */ |
| 163 | void (*get_vm_pde)(struct amdgpu_device *adev, int level, |
| 164 | u64 *dst, u64 *flags); |
| 165 | /* get the pte flags to use for PTEs */ |
| 166 | void (*get_vm_pte)(struct amdgpu_device *adev, |
| 167 | struct amdgpu_vm *vm, |
| 168 | struct amdgpu_bo *bo, |
| 169 | uint32_t vm_flags, |
| 170 | uint64_t *pte_flags); |
| 171 | /* override per-page pte flags */ |
| 172 | void (*override_vm_pte_flags)(struct amdgpu_device *dev, |
| 173 | struct amdgpu_vm *vm, |
| 174 | uint64_t addr, uint64_t *flags); |
| 175 | /* get the amount of memory used by the vbios for pre-OS console */ |
| 176 | unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); |
| 177 | /* get the DCC buffer alignment */ |
| 178 | unsigned int (*get_dcc_alignment)(struct amdgpu_device *adev); |
| 179 | |
| 180 | enum amdgpu_memory_partition (*query_mem_partition_mode)( |
| 181 | struct amdgpu_device *adev); |
| 182 | /* Request NPS mode */ |
| 183 | int (*request_mem_partition_mode)(struct amdgpu_device *adev, |
| 184 | int nps_mode); |
| 185 | bool (*need_reset_on_init)(struct amdgpu_device *adev); |
| 186 | }; |
| 187 | |
| 188 | struct amdgpu_mem_partition_info { |
| 189 | union { |
| 190 | struct { |
| 191 | uint32_t fpfn; |
| 192 | uint32_t lpfn; |
| 193 | } range; |
| 194 | struct { |
| 195 | int node; |
| 196 | } numa; |
| 197 | }; |
| 198 | uint64_t size; |
| 199 | }; |
| 200 | |
| 201 | #define INVALID_PFN -1 |
| 202 | |
| 203 | struct amdgpu_gmc_memrange { |
| 204 | uint64_t base_address; |
| 205 | uint64_t limit_address; |
| 206 | uint32_t flags; |
| 207 | int nid_mask; |
| 208 | }; |
| 209 | |
| 210 | enum amdgpu_gart_placement { |
| 211 | AMDGPU_GART_PLACEMENT_BEST_FIT = 0, |
| 212 | AMDGPU_GART_PLACEMENT_HIGH, |
| 213 | AMDGPU_GART_PLACEMENT_LOW, |
| 214 | }; |
| 215 | |
| 216 | struct amdgpu_gmc { |
| 217 | /* FB's physical address in MMIO space (for CPU to |
| 218 | * map FB). This is different compared to the agp/ |
| 219 | * gart/vram_start/end field as the later is from |
| 220 | * GPU's view and aper_base is from CPU's view. |
| 221 | */ |
| 222 | resource_size_t aper_size; |
| 223 | resource_size_t aper_base; |
| 224 | /* for some chips with <= 32MB we need to lie |
| 225 | * about vram size near mc fb location */ |
| 226 | u64 mc_vram_size; |
| 227 | u64 visible_vram_size; |
| 228 | /* AGP aperture start and end in MC address space |
| 229 | * Driver find a hole in the MC address space |
| 230 | * to place AGP by setting MC_VM_AGP_BOT/TOP registers |
| 231 | * Under VMID0, logical address == MC address. AGP |
| 232 | * aperture maps to physical bus or IOVA addressed. |
| 233 | * AGP aperture is used to simulate FB in ZFB case. |
| 234 | * AGP aperture is also used for page table in system |
| 235 | * memory (mainly for APU). |
| 236 | * |
| 237 | */ |
| 238 | u64 agp_size; |
| 239 | u64 agp_start; |
| 240 | u64 agp_end; |
| 241 | /* GART aperture start and end in MC address space |
| 242 | * Driver find a hole in the MC address space |
| 243 | * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR |
| 244 | * registers |
| 245 | * Under VMID0, logical address inside GART aperture will |
| 246 | * be translated through gpuvm gart page table to access |
| 247 | * paged system memory |
| 248 | */ |
| 249 | u64 gart_size; |
| 250 | u64 gart_start; |
| 251 | u64 gart_end; |
| 252 | /* Frame buffer aperture of this GPU device. Different from |
| 253 | * fb_start (see below), this only covers the local GPU device. |
| 254 | * If driver uses FB aperture to access FB, driver get fb_start from |
| 255 | * MC_VM_FB_LOCATION_BASE (set by vbios) and calculate vram_start |
| 256 | * of this local device by adding an offset inside the XGMI hive. |
| 257 | * If driver uses GART table for VMID0 FB access, driver finds a hole in |
| 258 | * VMID0's virtual address space to place the SYSVM aperture inside |
| 259 | * which the first part is vram and the second part is gart (covering |
| 260 | * system ram). |
| 261 | */ |
| 262 | u64 vram_start; |
| 263 | u64 vram_end; |
| 264 | /* FB region , it's same as local vram region in single GPU, in XGMI |
| 265 | * configuration, this region covers all GPUs in the same hive , |
| 266 | * each GPU in the hive has the same view of this FB region . |
| 267 | * GPU0's vram starts at offset (0 * segment size) , |
| 268 | * GPU1 starts at offset (1 * segment size), etc. |
| 269 | */ |
| 270 | u64 fb_start; |
| 271 | u64 fb_end; |
| 272 | unsigned vram_width; |
| 273 | u64 real_vram_size; |
| 274 | int vram_mtrr; |
| 275 | u64 mc_mask; |
| 276 | const struct firmware *fw; /* MC firmware */ |
| 277 | uint32_t fw_version; |
| 278 | struct amdgpu_irq_src vm_fault; |
| 279 | uint32_t vram_type; |
| 280 | uint8_t vram_vendor; |
| 281 | uint32_t srbm_soft_reset; |
| 282 | bool prt_warning; |
| 283 | uint32_t sdpif_register; |
| 284 | /* apertures */ |
| 285 | u64 shared_aperture_start; |
| 286 | u64 shared_aperture_end; |
| 287 | u64 private_aperture_start; |
| 288 | u64 private_aperture_end; |
| 289 | /* protects concurrent invalidation */ |
| 290 | spinlock_t invalidate_lock; |
| 291 | bool translate_further; |
| 292 | struct kfd_vm_fault_info *vm_fault_info; |
| 293 | atomic_t vm_fault_info_updated; |
| 294 | |
| 295 | struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE]; |
| 296 | struct { |
| 297 | uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER; |
| 298 | } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE]; |
| 299 | uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER; |
| 300 | |
| 301 | bool tmz_enabled; |
| 302 | bool is_app_apu; |
| 303 | |
| 304 | struct amdgpu_mem_partition_info *mem_partitions; |
| 305 | uint8_t num_mem_partitions; |
| 306 | const struct amdgpu_gmc_funcs *gmc_funcs; |
| 307 | enum amdgpu_memory_partition requested_nps_mode; |
| 308 | uint32_t supported_nps_modes; |
| 309 | uint32_t reset_flags; |
| 310 | |
| 311 | struct amdgpu_xgmi xgmi; |
| 312 | struct amdgpu_irq_src ecc_irq; |
| 313 | int noretry; |
| 314 | uint32_t xnack_flags; |
| 315 | |
| 316 | uint32_t vmid0_page_table_block_size; |
| 317 | uint32_t vmid0_page_table_depth; |
| 318 | struct amdgpu_bo *pdb0_bo; |
| 319 | /* CPU kmapped address of pdb0*/ |
| 320 | void *ptr_pdb0; |
| 321 | |
| 322 | /* MALL size */ |
| 323 | u64 mall_size; |
| 324 | uint32_t m_half_use; |
| 325 | |
| 326 | /* number of UMC instances */ |
| 327 | int num_umc; |
| 328 | /* mode2 save restore */ |
| 329 | u64 VM_L2_CNTL; |
| 330 | u64 VM_L2_CNTL2; |
| 331 | u64 VM_DUMMY_PAGE_FAULT_CNTL; |
| 332 | u64 VM_DUMMY_PAGE_FAULT_ADDR_LO32; |
| 333 | u64 VM_DUMMY_PAGE_FAULT_ADDR_HI32; |
| 334 | u64 VM_L2_PROTECTION_FAULT_CNTL; |
| 335 | u64 VM_L2_PROTECTION_FAULT_CNTL2; |
| 336 | u64 VM_L2_PROTECTION_FAULT_MM_CNTL3; |
| 337 | u64 VM_L2_PROTECTION_FAULT_MM_CNTL4; |
| 338 | u64 VM_L2_PROTECTION_FAULT_ADDR_LO32; |
| 339 | u64 VM_L2_PROTECTION_FAULT_ADDR_HI32; |
| 340 | u64 VM_DEBUG; |
| 341 | u64 VM_L2_MM_GROUP_RT_CLASSES; |
| 342 | u64 VM_L2_BANK_SELECT_RESERVED_CID; |
| 343 | u64 VM_L2_BANK_SELECT_RESERVED_CID2; |
| 344 | u64 VM_L2_CACHE_PARITY_CNTL; |
| 345 | u64 VM_L2_IH_LOG_CNTL; |
| 346 | u64 VM_CONTEXT_CNTL[16]; |
| 347 | u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[16]; |
| 348 | u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[16]; |
| 349 | u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[16]; |
| 350 | u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[16]; |
| 351 | u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16]; |
| 352 | u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16]; |
| 353 | u64 MC_VM_MX_L1_TLB_CNTL; |
| 354 | |
| 355 | u64 noretry_flags; |
| 356 | |
| 357 | bool ; |
| 358 | bool ; |
| 359 | bool flush_pasid_uses_kiq; |
| 360 | }; |
| 361 | |
| 362 | #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) |
| 363 | #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) |
| 364 | #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) |
| 365 | #define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \ |
| 366 | ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \ |
| 367 | (pte_flags))) |
| 368 | #define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \ |
| 369 | (adev)->gmc.gmc_funcs->override_vm_pte_flags \ |
| 370 | ((adev), (vm), (addr), (pte_flags)) |
| 371 | #define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) |
| 372 | #define amdgpu_gmc_get_dcc_alignment(adev) ({ \ |
| 373 | typeof(adev) _adev = (adev); \ |
| 374 | _adev->gmc.gmc_funcs->get_dcc_alignment(_adev); \ |
| 375 | }) |
| 376 | |
| 377 | /** |
| 378 | * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR |
| 379 | * |
| 380 | * @adev: amdgpu_device pointer |
| 381 | * |
| 382 | * Returns: |
| 383 | * True if full VRAM is visible through the BAR |
| 384 | */ |
| 385 | static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) |
| 386 | { |
| 387 | WARN_ON(gmc->real_vram_size < gmc->visible_vram_size); |
| 388 | |
| 389 | return (gmc->real_vram_size == gmc->visible_vram_size); |
| 390 | } |
| 391 | |
| 392 | /** |
| 393 | * amdgpu_gmc_sign_extend - sign extend the given gmc address |
| 394 | * |
| 395 | * @addr: address to extend |
| 396 | */ |
| 397 | static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) |
| 398 | { |
| 399 | if (addr >= AMDGPU_GMC_HOLE_START) |
| 400 | addr |= AMDGPU_GMC_HOLE_END; |
| 401 | |
| 402 | return addr; |
| 403 | } |
| 404 | |
| 405 | bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev); |
| 406 | int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev); |
| 407 | void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, |
| 408 | uint64_t *addr, uint64_t *flags); |
| 409 | int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, |
| 410 | uint32_t gpu_page_idx, uint64_t addr, |
| 411 | uint64_t flags); |
| 412 | uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); |
| 413 | uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo); |
| 414 | void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); |
| 415 | void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, |
| 416 | u64 base); |
| 417 | void amdgpu_gmc_gart_location(struct amdgpu_device *adev, |
| 418 | struct amdgpu_gmc *mc, |
| 419 | enum amdgpu_gart_placement gart_placement); |
| 420 | void amdgpu_gmc_agp_location(struct amdgpu_device *adev, |
| 421 | struct amdgpu_gmc *mc); |
| 422 | void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev, |
| 423 | struct amdgpu_gmc *mc); |
| 424 | bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, |
| 425 | struct amdgpu_ih_ring *ih, uint64_t addr, |
| 426 | uint16_t pasid, uint64_t timestamp); |
| 427 | void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, |
| 428 | uint16_t pasid); |
| 429 | int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev); |
| 430 | int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); |
| 431 | void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); |
| 432 | int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); |
| 433 | void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, |
| 434 | uint32_t vmhub, uint32_t flush_type); |
| 435 | int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, |
| 436 | uint32_t flush_type, bool all_hub, |
| 437 | uint32_t inst); |
| 438 | void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev, |
| 439 | uint32_t reg0, uint32_t reg1, |
| 440 | uint32_t ref, uint32_t mask, |
| 441 | uint32_t xcc_inst); |
| 442 | |
| 443 | extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev); |
| 444 | extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev); |
| 445 | |
| 446 | extern void |
| 447 | amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, |
| 448 | bool enable); |
| 449 | |
| 450 | void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); |
| 451 | |
| 452 | void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); |
| 453 | uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); |
| 454 | uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); |
| 455 | int amdgpu_gmc_vram_checking(struct amdgpu_device *adev); |
| 456 | int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev); |
| 457 | void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev); |
| 458 | |
| 459 | int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, |
| 460 | struct amdgpu_mem_partition_info *mem_ranges, |
| 461 | uint8_t *exp_ranges); |
| 462 | |
| 463 | int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, |
| 464 | int nps_mode); |
| 465 | void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev); |
| 466 | bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev); |
| 467 | enum amdgpu_memory_partition |
| 468 | amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev); |
| 469 | enum amdgpu_memory_partition |
| 470 | amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes); |
| 471 | enum amdgpu_memory_partition |
| 472 | amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev); |
| 473 | int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev); |
| 474 | void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, |
| 475 | struct amdgpu_mem_partition_info *mem_ranges); |
| 476 | #endif |
| 477 | |