| 1 | /* |
| 2 | * Copyright 2016 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * Author: Monk.liu@amd.com |
| 23 | */ |
| 24 | #ifndef AMDGPU_VIRT_H |
| 25 | #define AMDGPU_VIRT_H |
| 26 | |
| 27 | #include "amdgv_sriovmsg.h" |
| 28 | |
| 29 | #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ |
| 30 | #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ |
| 31 | #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ |
| 32 | #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ |
| 33 | #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ |
| 34 | #define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ |
| 35 | |
| 36 | /* flags for indirect register access path supported by rlcg for sriov */ |
| 37 | #define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28) |
| 38 | #define AMDGPU_RLCG_GC_WRITE (0x0 << 28) |
| 39 | #define AMDGPU_RLCG_GC_READ (0x1 << 28) |
| 40 | #define AMDGPU_RLCG_MMHUB_WRITE (0x2 << 28) |
| 41 | |
| 42 | /* error code for indirect register access path supported by rlcg for sriov */ |
| 43 | #define AMDGPU_RLCG_VFGATE_DISABLED 0x4000000 |
| 44 | #define AMDGPU_RLCG_WRONG_OPERATION_TYPE 0x2000000 |
| 45 | #define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000 |
| 46 | |
| 47 | #define AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK 0xFFFFF |
| 48 | #define AMDGPU_RLCG_SCRATCH1_ERROR_MASK 0xF000000 |
| 49 | |
| 50 | /* all asic after AI use this offset */ |
| 51 | #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 |
| 52 | /* tonga/fiji use this offset */ |
| 53 | #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 |
| 54 | |
| 55 | #define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2 |
| 56 | |
| 57 | /* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */ |
| 58 | #define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA" |
| 59 | #define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4 |
| 60 | |
| 61 | #define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id))) |
| 62 | |
| 63 | enum amdgpu_sriov_vf_mode { |
| 64 | SRIOV_VF_MODE_BARE_METAL = 0, |
| 65 | SRIOV_VF_MODE_ONE_VF, |
| 66 | SRIOV_VF_MODE_MULTI_VF, |
| 67 | }; |
| 68 | |
| 69 | struct amdgpu_mm_table { |
| 70 | struct amdgpu_bo *bo; |
| 71 | uint32_t *cpu_addr; |
| 72 | uint64_t gpu_addr; |
| 73 | }; |
| 74 | |
| 75 | #define AMDGPU_VF_ERROR_ENTRY_SIZE 16 |
| 76 | |
| 77 | /* struct error_entry - amdgpu VF error information. */ |
| 78 | struct amdgpu_vf_error_buffer { |
| 79 | struct mutex lock; |
| 80 | int read_count; |
| 81 | int write_count; |
| 82 | uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; |
| 83 | uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; |
| 84 | uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; |
| 85 | }; |
| 86 | |
| 87 | enum idh_request; |
| 88 | |
| 89 | /** |
| 90 | * struct amdgpu_virt_ops - amdgpu device virt operations |
| 91 | */ |
| 92 | struct amdgpu_virt_ops { |
| 93 | int (*req_full_gpu)(struct amdgpu_device *adev, bool init); |
| 94 | int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); |
| 95 | int (*req_init_data)(struct amdgpu_device *adev); |
| 96 | int (*reset_gpu)(struct amdgpu_device *adev); |
| 97 | void (*ready_to_reset)(struct amdgpu_device *adev); |
| 98 | int (*wait_reset)(struct amdgpu_device *adev); |
| 99 | void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, |
| 100 | u32 data1, u32 data2, u32 data3); |
| 101 | void (*ras_poison_handler)(struct amdgpu_device *adev, |
| 102 | enum amdgpu_ras_block block); |
| 103 | bool (*rcvd_ras_intr)(struct amdgpu_device *adev); |
| 104 | int (*req_ras_err_count)(struct amdgpu_device *adev); |
| 105 | int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr); |
| 106 | int (*req_bad_pages)(struct amdgpu_device *adev); |
| 107 | int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr); |
| 108 | }; |
| 109 | |
| 110 | /* |
| 111 | * Firmware Reserve Frame buffer |
| 112 | */ |
| 113 | struct amdgpu_virt_fw_reserve { |
| 114 | struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; |
| 115 | struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; |
| 116 | void *ras_telemetry; |
| 117 | unsigned int checksum_key; |
| 118 | }; |
| 119 | |
| 120 | /* |
| 121 | * Legacy GIM header |
| 122 | * |
| 123 | * Defination between PF and VF |
| 124 | * Structures forcibly aligned to 4 to keep the same style as PF. |
| 125 | */ |
| 126 | #define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024) |
| 127 | |
| 128 | #define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \ |
| 129 | (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2)) |
| 130 | |
| 131 | enum AMDGIM_FEATURE_FLAG { |
| 132 | /* GIM supports feature of Error log collecting */ |
| 133 | AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1, |
| 134 | /* GIM supports feature of loading uCodes */ |
| 135 | AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, |
| 136 | /* VRAM LOST by GIM */ |
| 137 | AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, |
| 138 | /* MM bandwidth */ |
| 139 | AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, |
| 140 | /* PP ONE VF MODE in GIM */ |
| 141 | AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), |
| 142 | /* Indirect Reg Access enabled */ |
| 143 | AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5), |
| 144 | /* AV1 Support MODE*/ |
| 145 | AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6), |
| 146 | /* VCN RB decouple */ |
| 147 | AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), |
| 148 | /* MES info */ |
| 149 | AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8), |
| 150 | AMDGIM_FEATURE_RAS_CAPS = (1 << 9), |
| 151 | AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10), |
| 152 | AMDGIM_FEATURE_RAS_CPER = (1 << 11), |
| 153 | AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12), |
| 154 | }; |
| 155 | |
| 156 | enum AMDGIM_REG_ACCESS_FLAG { |
| 157 | /* Use PSP to program IH_RB_CNTL */ |
| 158 | AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0), |
| 159 | /* Use RLC to program MMHUB regs */ |
| 160 | AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1), |
| 161 | /* Use RLC to program GC regs */ |
| 162 | AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2), |
| 163 | /* Use PSP to program L1_TLB_CNTL */ |
| 164 | AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3), |
| 165 | /* Use RLCG to program SQ_CONFIG1 */ |
| 166 | AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4), |
| 167 | }; |
| 168 | |
| 169 | struct amdgim_pf2vf_info_v1 { |
| 170 | /* header contains size and version */ |
| 171 | struct amd_sriov_msg_pf2vf_info_header ; |
| 172 | /* max_width * max_height */ |
| 173 | unsigned int uvd_enc_max_pixels_count; |
| 174 | /* 16x16 pixels/sec, codec independent */ |
| 175 | unsigned int uvd_enc_max_bandwidth; |
| 176 | /* max_width * max_height */ |
| 177 | unsigned int vce_enc_max_pixels_count; |
| 178 | /* 16x16 pixels/sec, codec independent */ |
| 179 | unsigned int vce_enc_max_bandwidth; |
| 180 | /* MEC FW position in kb from the start of visible frame buffer */ |
| 181 | unsigned int mecfw_kboffset; |
| 182 | /* The features flags of the GIM driver supports. */ |
| 183 | unsigned int feature_flags; |
| 184 | /* use private key from mailbox 2 to create chueksum */ |
| 185 | unsigned int checksum; |
| 186 | } __aligned(4); |
| 187 | |
| 188 | struct amdgim_vf2pf_info_v1 { |
| 189 | /* header contains size and version */ |
| 190 | struct amd_sriov_msg_vf2pf_info_header ; |
| 191 | /* driver version */ |
| 192 | char driver_version[64]; |
| 193 | /* driver certification, 1=WHQL, 0=None */ |
| 194 | unsigned int driver_cert; |
| 195 | /* guest OS type and version: need a define */ |
| 196 | unsigned int os_info; |
| 197 | /* in the unit of 1M */ |
| 198 | unsigned int fb_usage; |
| 199 | /* guest gfx engine usage percentage */ |
| 200 | unsigned int gfx_usage; |
| 201 | /* guest gfx engine health percentage */ |
| 202 | unsigned int gfx_health; |
| 203 | /* guest compute engine usage percentage */ |
| 204 | unsigned int compute_usage; |
| 205 | /* guest compute engine health percentage */ |
| 206 | unsigned int compute_health; |
| 207 | /* guest vce engine usage percentage. 0xffff means N/A. */ |
| 208 | unsigned int vce_enc_usage; |
| 209 | /* guest vce engine health percentage. 0xffff means N/A. */ |
| 210 | unsigned int vce_enc_health; |
| 211 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
| 212 | unsigned int uvd_enc_usage; |
| 213 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
| 214 | unsigned int uvd_enc_health; |
| 215 | unsigned int checksum; |
| 216 | } __aligned(4); |
| 217 | |
| 218 | struct amdgim_vf2pf_info_v2 { |
| 219 | /* header contains size and version */ |
| 220 | struct amd_sriov_msg_vf2pf_info_header ; |
| 221 | uint32_t checksum; |
| 222 | /* driver version */ |
| 223 | uint8_t driver_version[64]; |
| 224 | /* driver certification, 1=WHQL, 0=None */ |
| 225 | uint32_t driver_cert; |
| 226 | /* guest OS type and version: need a define */ |
| 227 | uint32_t os_info; |
| 228 | /* in the unit of 1M */ |
| 229 | uint32_t fb_usage; |
| 230 | /* guest gfx engine usage percentage */ |
| 231 | uint32_t gfx_usage; |
| 232 | /* guest gfx engine health percentage */ |
| 233 | uint32_t gfx_health; |
| 234 | /* guest compute engine usage percentage */ |
| 235 | uint32_t compute_usage; |
| 236 | /* guest compute engine health percentage */ |
| 237 | uint32_t compute_health; |
| 238 | /* guest vce engine usage percentage. 0xffff means N/A. */ |
| 239 | uint32_t vce_enc_usage; |
| 240 | /* guest vce engine health percentage. 0xffff means N/A. */ |
| 241 | uint32_t vce_enc_health; |
| 242 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
| 243 | uint32_t uvd_enc_usage; |
| 244 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
| 245 | uint32_t uvd_enc_health; |
| 246 | uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; |
| 247 | } __aligned(4); |
| 248 | |
| 249 | struct amdgpu_virt_ras_err_handler_data { |
| 250 | /* point to bad page records array */ |
| 251 | struct eeprom_table_record *bps; |
| 252 | /* point to reserved bo array */ |
| 253 | struct amdgpu_bo **bps_bo; |
| 254 | /* the count of entries */ |
| 255 | int count; |
| 256 | /* last reserved entry's index + 1 */ |
| 257 | int last_reserved; |
| 258 | }; |
| 259 | |
| 260 | struct amdgpu_virt_ras { |
| 261 | struct ratelimit_state ras_error_cnt_rs; |
| 262 | struct ratelimit_state ras_cper_dump_rs; |
| 263 | struct ratelimit_state ras_chk_criti_rs; |
| 264 | struct mutex ras_telemetry_mutex; |
| 265 | uint64_t cper_rptr; |
| 266 | }; |
| 267 | |
| 268 | #define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT) |
| 269 | |
| 270 | DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST); |
| 271 | |
| 272 | struct amdgpu_virt_region { |
| 273 | uint32_t offset; |
| 274 | uint32_t size_kb; |
| 275 | }; |
| 276 | |
| 277 | /* GPU virtualization */ |
| 278 | struct amdgpu_virt { |
| 279 | uint32_t caps; |
| 280 | struct amdgpu_bo *csa_obj; |
| 281 | void *csa_cpu_addr; |
| 282 | bool chained_ib_support; |
| 283 | uint32_t reg_val_offs; |
| 284 | struct amdgpu_irq_src ack_irq; |
| 285 | struct amdgpu_irq_src rcv_irq; |
| 286 | |
| 287 | struct work_struct flr_work; |
| 288 | struct work_struct req_bad_pages_work; |
| 289 | struct work_struct handle_bad_pages_work; |
| 290 | |
| 291 | struct amdgpu_mm_table mm_table; |
| 292 | const struct amdgpu_virt_ops *ops; |
| 293 | struct amdgpu_vf_error_buffer vf_errors; |
| 294 | struct amdgpu_virt_fw_reserve fw_reserve; |
| 295 | struct amdgpu_virt_caps virt_caps; |
| 296 | uint32_t gim_feature; |
| 297 | uint32_t reg_access_mode; |
| 298 | int req_init_data_ver; |
| 299 | bool tdr_debug; |
| 300 | struct amdgpu_virt_ras_err_handler_data *virt_eh_data; |
| 301 | bool ras_init_done; |
| 302 | uint32_t reg_access; |
| 303 | |
| 304 | /* dynamic(v2) critical regions */ |
| 305 | struct amdgpu_virt_region ; |
| 306 | struct amdgpu_virt_region crit_regn; |
| 307 | struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID]; |
| 308 | bool is_dynamic_crit_regn_enabled; |
| 309 | |
| 310 | /* vf2pf message */ |
| 311 | struct delayed_work vf2pf_work; |
| 312 | uint32_t vf2pf_update_interval_ms; |
| 313 | int vf2pf_update_retry_cnt; |
| 314 | |
| 315 | /* multimedia bandwidth config */ |
| 316 | bool is_mm_bw_enabled; |
| 317 | uint32_t decode_max_dimension_pixels; |
| 318 | uint32_t decode_max_frame_pixels; |
| 319 | uint32_t encode_max_dimension_pixels; |
| 320 | uint32_t encode_max_frame_pixels; |
| 321 | |
| 322 | /* the ucode id to signal the autoload */ |
| 323 | uint32_t autoload_ucode_id; |
| 324 | |
| 325 | /* Spinlock to protect access to the RLCG register interface */ |
| 326 | spinlock_t rlcg_reg_lock; |
| 327 | |
| 328 | struct mutex access_req_mutex; |
| 329 | |
| 330 | union amd_sriov_ras_caps ras_en_caps; |
| 331 | union amd_sriov_ras_caps ras_telemetry_en_caps; |
| 332 | struct amdgpu_virt_ras ras; |
| 333 | struct amd_sriov_ras_telemetry_error_count count_cache; |
| 334 | |
| 335 | /* hibernate and resume with different VF feature for xgmi enabled system */ |
| 336 | bool is_xgmi_node_migrate_enabled; |
| 337 | }; |
| 338 | |
| 339 | struct amdgpu_video_codec_info; |
| 340 | |
| 341 | #define amdgpu_sriov_enabled(adev) \ |
| 342 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) |
| 343 | |
| 344 | #define amdgpu_sriov_vf(adev) \ |
| 345 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF) |
| 346 | |
| 347 | #define amdgpu_sriov_bios(adev) \ |
| 348 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) |
| 349 | |
| 350 | #define amdgpu_sriov_runtime(adev) \ |
| 351 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) |
| 352 | |
| 353 | #define amdgpu_sriov_fullaccess(adev) \ |
| 354 | (amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) |
| 355 | |
| 356 | #define amdgpu_sriov_reg_indirect_en(adev) \ |
| 357 | (amdgpu_sriov_vf((adev)) && \ |
| 358 | ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS))) |
| 359 | |
| 360 | #define amdgpu_sriov_reg_indirect_ih(adev) \ |
| 361 | (amdgpu_sriov_vf((adev)) && \ |
| 362 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN))) |
| 363 | |
| 364 | #define amdgpu_sriov_reg_indirect_mmhub(adev) \ |
| 365 | (amdgpu_sriov_vf((adev)) && \ |
| 366 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN))) |
| 367 | |
| 368 | #define amdgpu_sriov_reg_indirect_gc(adev) \ |
| 369 | (amdgpu_sriov_vf((adev)) && \ |
| 370 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN))) |
| 371 | |
| 372 | #define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \ |
| 373 | (amdgpu_sriov_vf((adev)) && \ |
| 374 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN))) |
| 375 | |
| 376 | #define amdgpu_sriov_rlcg_error_report_enabled(adev) \ |
| 377 | (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) |
| 378 | |
| 379 | #define amdgpu_sriov_reg_access_sq_config(adev) \ |
| 380 | (amdgpu_sriov_vf((adev)) && \ |
| 381 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG))) |
| 382 | |
| 383 | #define amdgpu_passthrough(adev) \ |
| 384 | ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) |
| 385 | |
| 386 | #define amdgpu_sriov_vf_mmio_access_protection(adev) \ |
| 387 | ((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) |
| 388 | |
| 389 | #define amdgpu_sriov_ras_caps_en(adev) \ |
| 390 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS) |
| 391 | |
| 392 | #define amdgpu_sriov_ras_telemetry_en(adev) \ |
| 393 | (((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry) |
| 394 | |
| 395 | #define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \ |
| 396 | (amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk)) |
| 397 | |
| 398 | #define amdgpu_sriov_ras_cper_en(adev) \ |
| 399 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER) |
| 400 | |
| 401 | #define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \ |
| 402 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK) |
| 403 | |
| 404 | static inline bool is_virtual_machine(void) |
| 405 | { |
| 406 | #if defined(CONFIG_X86) |
| 407 | return boot_cpu_has(X86_FEATURE_HYPERVISOR); |
| 408 | #elif defined(CONFIG_ARM64) |
| 409 | return !is_kernel_in_hyp_mode(); |
| 410 | #else |
| 411 | return false; |
| 412 | #endif |
| 413 | } |
| 414 | |
| 415 | #define amdgpu_sriov_is_pp_one_vf(adev) \ |
| 416 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) |
| 417 | #define amdgpu_sriov_multi_vf_mode(adev) \ |
| 418 | (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) |
| 419 | #define amdgpu_sriov_is_debug(adev) \ |
| 420 | ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) |
| 421 | #define amdgpu_sriov_is_normal(adev) \ |
| 422 | ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) |
| 423 | #define amdgpu_sriov_is_av1_support(adev) \ |
| 424 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT) |
| 425 | #define amdgpu_sriov_is_vcn_rb_decouple(adev) \ |
| 426 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) |
| 427 | #define amdgpu_sriov_is_mes_info_enable(adev) \ |
| 428 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE) |
| 429 | |
| 430 | #define amdgpu_virt_xgmi_migrate_enabled(adev) \ |
| 431 | ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0) |
| 432 | |
| 433 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); |
| 434 | void amdgpu_virt_init_setting(struct amdgpu_device *adev); |
| 435 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); |
| 436 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); |
| 437 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); |
| 438 | void amdgpu_virt_request_init_data(struct amdgpu_device *adev); |
| 439 | void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev); |
| 440 | int amdgpu_virt_wait_reset(struct amdgpu_device *adev); |
| 441 | int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); |
| 442 | void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); |
| 443 | bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev); |
| 444 | void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); |
| 445 | void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); |
| 446 | void amdgpu_virt_exchange_data(struct amdgpu_device *adev); |
| 447 | void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); |
| 448 | void amdgpu_virt_init(struct amdgpu_device *adev); |
| 449 | |
| 450 | int amdgpu_virt_init_critical_region(struct amdgpu_device *adev); |
| 451 | int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev, |
| 452 | int data_id, uint8_t *binary, u32 *size); |
| 453 | |
| 454 | bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); |
| 455 | int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); |
| 456 | void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); |
| 457 | |
| 458 | enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev); |
| 459 | |
| 460 | void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, |
| 461 | struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, |
| 462 | struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); |
| 463 | void amdgpu_sriov_wreg(struct amdgpu_device *adev, |
| 464 | u32 offset, u32 value, |
| 465 | u32 acc_flags, u32 hwip, u32 xcc_id); |
| 466 | u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, |
| 467 | u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id); |
| 468 | bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, |
| 469 | uint32_t ucode_id); |
| 470 | void amdgpu_virt_pre_reset(struct amdgpu_device *adev); |
| 471 | void amdgpu_virt_post_reset(struct amdgpu_device *adev); |
| 472 | bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev); |
| 473 | bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, |
| 474 | u32 acc_flags, u32 hwip, |
| 475 | bool write, u32 *rlcg_flag); |
| 476 | u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id); |
| 477 | bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev); |
| 478 | int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, |
| 479 | struct ras_err_data *err_data); |
| 480 | int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update); |
| 481 | int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); |
| 482 | bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev, |
| 483 | enum amdgpu_ras_block block); |
| 484 | void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev); |
| 485 | int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit); |
| 486 | #endif |
| 487 | |