| 1 | /* |
| 2 | * Copyright 2020 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | */ |
| 22 | |
| 23 | #ifndef __SMU_CMN_H__ |
| 24 | #define __SMU_CMN_H__ |
| 25 | |
| 26 | #include "amdgpu_smu.h" |
| 27 | |
| 28 | #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4) |
| 29 | |
| 30 | #define FDO_PWM_MODE_STATIC 1 |
| 31 | #define FDO_PWM_MODE_STATIC_RPM 5 |
| 32 | |
| 33 | #define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE |
| 34 | #define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2 |
| 35 | #define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3 |
| 36 | #define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4 |
| 37 | #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 |
| 38 | #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 |
| 39 | #define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 |
| 40 | #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 |
| 41 | #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 |
| 42 | |
| 43 | #define SMU_IGNORE_IF_VERSION 0xFFFFFFFF |
| 44 | |
| 45 | #define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ |
| 46 | do { \ |
| 47 | typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ |
| 48 | struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \ |
| 49 | struct metrics_table_header *header = \ |
| 50 | (struct metrics_table_header *)tmp; \ |
| 51 | memset(header, 0xFF, sizeof(*tmp)); \ |
| 52 | header->format_revision = frev; \ |
| 53 | header->content_revision = crev; \ |
| 54 | header->structure_size = sizeof(*tmp); \ |
| 55 | } while (0) |
| 56 | |
| 57 | #define smu_cmn_init_partition_metrics(ptr, fr, cr) \ |
| 58 | do { \ |
| 59 | typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \ |
| 60 | (ptr)); \ |
| 61 | struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \ |
| 62 | struct metrics_table_header *header = \ |
| 63 | (struct metrics_table_header *)tmp; \ |
| 64 | memset(header, 0xFF, sizeof(*tmp)); \ |
| 65 | header->format_revision = fr; \ |
| 66 | header->content_revision = cr; \ |
| 67 | header->structure_size = sizeof(*tmp); \ |
| 68 | } while (0) |
| 69 | |
| 70 | #define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \ |
| 71 | do { \ |
| 72 | typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \ |
| 73 | (ptr)); \ |
| 74 | struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \ |
| 75 | struct metrics_table_header *header = \ |
| 76 | (struct metrics_table_header *)tmp; \ |
| 77 | memset(header, 0xFF, sizeof(*tmp)); \ |
| 78 | header->format_revision = fr; \ |
| 79 | header->content_revision = cr; \ |
| 80 | header->structure_size = sizeof(*tmp); \ |
| 81 | } while (0) |
| 82 | |
| 83 | #define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \ |
| 84 | do { \ |
| 85 | typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \ |
| 86 | (ptr)); \ |
| 87 | struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \ |
| 88 | struct metrics_table_header *header = \ |
| 89 | (struct metrics_table_header *)tmp; \ |
| 90 | memset(header, 0xFF, sizeof(*tmp)); \ |
| 91 | header->format_revision = fr; \ |
| 92 | header->content_revision = cr; \ |
| 93 | header->structure_size = sizeof(*tmp); \ |
| 94 | } while (0) |
| 95 | |
| 96 | extern const int link_speed[]; |
| 97 | |
| 98 | /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ |
| 99 | static inline int pcie_gen_to_speed(uint32_t gen) |
| 100 | { |
| 101 | return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]); |
| 102 | } |
| 103 | |
| 104 | int smu_cmn_send_msg_without_waiting(struct smu_context *smu, |
| 105 | uint16_t msg_index, |
| 106 | uint32_t param); |
| 107 | int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, |
| 108 | enum smu_message_type msg, |
| 109 | uint32_t param, |
| 110 | uint32_t *read_arg); |
| 111 | |
| 112 | int smu_cmn_send_smc_msg(struct smu_context *smu, |
| 113 | enum smu_message_type msg, |
| 114 | uint32_t *read_arg); |
| 115 | |
| 116 | int smu_cmn_send_debug_smc_msg(struct smu_context *smu, |
| 117 | uint32_t msg); |
| 118 | |
| 119 | int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, |
| 120 | uint32_t msg, uint32_t param); |
| 121 | |
| 122 | int smu_cmn_wait_for_response(struct smu_context *smu); |
| 123 | |
| 124 | int smu_cmn_to_asic_specific_index(struct smu_context *smu, |
| 125 | enum smu_cmn2asic_mapping_type type, |
| 126 | uint32_t index); |
| 127 | |
| 128 | int smu_cmn_feature_is_supported(struct smu_context *smu, |
| 129 | enum smu_feature_mask mask); |
| 130 | |
| 131 | int smu_cmn_feature_is_enabled(struct smu_context *smu, |
| 132 | enum smu_feature_mask mask); |
| 133 | |
| 134 | bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, |
| 135 | enum smu_clk_type clk_type); |
| 136 | |
| 137 | int smu_cmn_get_enabled_mask(struct smu_context *smu, |
| 138 | uint64_t *feature_mask); |
| 139 | |
| 140 | uint64_t smu_cmn_get_indep_throttler_status( |
| 141 | const unsigned long dep_status, |
| 142 | const uint8_t *throttler_map); |
| 143 | |
| 144 | int smu_cmn_feature_update_enable_state(struct smu_context *smu, |
| 145 | uint64_t feature_mask, |
| 146 | bool enabled); |
| 147 | |
| 148 | int smu_cmn_feature_set_enabled(struct smu_context *smu, |
| 149 | enum smu_feature_mask mask, |
| 150 | bool enable); |
| 151 | |
| 152 | size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, |
| 153 | char *buf); |
| 154 | |
| 155 | int smu_cmn_set_pp_feature_mask(struct smu_context *smu, |
| 156 | uint64_t new_mask); |
| 157 | |
| 158 | int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, |
| 159 | enum smu_feature_mask mask); |
| 160 | |
| 161 | int smu_cmn_get_smc_version(struct smu_context *smu, |
| 162 | uint32_t *if_version, |
| 163 | uint32_t *smu_version); |
| 164 | |
| 165 | int smu_cmn_update_table(struct smu_context *smu, |
| 166 | enum smu_table_id table_index, |
| 167 | int argument, |
| 168 | void *table_data, |
| 169 | bool drv2smu); |
| 170 | |
| 171 | int smu_cmn_write_watermarks_table(struct smu_context *smu); |
| 172 | |
| 173 | int smu_cmn_write_pptable(struct smu_context *smu); |
| 174 | |
| 175 | int smu_cmn_get_metrics_table(struct smu_context *smu, |
| 176 | void *metrics_table, |
| 177 | bool bypass_cache); |
| 178 | |
| 179 | int smu_cmn_get_combo_pptable(struct smu_context *smu); |
| 180 | |
| 181 | int smu_cmn_set_mp1_state(struct smu_context *smu, |
| 182 | enum pp_mp1_state mp1_state); |
| 183 | |
| 184 | /* |
| 185 | * Helper function to make sysfs_emit_at() happy. Align buf to |
| 186 | * the current page boundary and record the offset. |
| 187 | */ |
| 188 | static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset) |
| 189 | { |
| 190 | if (!*buf || !offset) |
| 191 | return; |
| 192 | |
| 193 | *offset = offset_in_page(*buf); |
| 194 | *buf -= *offset; |
| 195 | } |
| 196 | |
| 197 | bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); |
| 198 | void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy); |
| 199 | void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy); |
| 200 | |
| 201 | void smu_cmn_get_backend_workload_mask(struct smu_context *smu, |
| 202 | u32 workload_mask, |
| 203 | u32 *backend_workload_mask); |
| 204 | |
| 205 | /*SMU gpu metrics */ |
| 206 | |
| 207 | /* Attribute ID mapping */ |
| 208 | #define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X |
| 209 | /* Type ID mapping */ |
| 210 | #define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X |
| 211 | /* Unit ID mapping */ |
| 212 | #define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X |
| 213 | |
| 214 | /* Map TYPEID to C type */ |
| 215 | #define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID |
| 216 | |
| 217 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8 |
| 218 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8 |
| 219 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16 |
| 220 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16 |
| 221 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32 |
| 222 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32 |
| 223 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64 |
| 224 | #define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64 |
| 225 | |
| 226 | /* struct members */ |
| 227 | #define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \ |
| 228 | u64 NAME##_ftype; \ |
| 229 | SMU_CTYPE(TYPEID) NAME |
| 230 | |
| 231 | #define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ |
| 232 | u64 NAME##_ftype; \ |
| 233 | SMU_CTYPE(TYPEID) NAME[SIZE] |
| 234 | |
| 235 | /* Init functions for scalar/array fields - init to 0xFFs */ |
| 236 | #define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \ |
| 237 | do { \ |
| 238 | obj->NAME##_ftype = \ |
| 239 | AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \ |
| 240 | obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \ |
| 241 | count++; \ |
| 242 | } while (0) |
| 243 | |
| 244 | #define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ |
| 245 | do { \ |
| 246 | obj->NAME##_ftype = \ |
| 247 | AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \ |
| 248 | memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \ |
| 249 | count++; \ |
| 250 | } while (0) |
| 251 | |
| 252 | /* Declare Metrics Class and Template object */ |
| 253 | #define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \ |
| 254 | struct __packed CLASSNAME { \ |
| 255 | struct metrics_table_header header; \ |
| 256 | int attr_count; \ |
| 257 | SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \ |
| 258 | }; \ |
| 259 | static inline void CLASSNAME##_init(struct CLASSNAME *obj, \ |
| 260 | uint8_t frev, uint8_t crev) \ |
| 261 | { \ |
| 262 | int count = 0; \ |
| 263 | memset(obj, 0xFF, sizeof(*obj)); \ |
| 264 | obj->header.format_revision = frev; \ |
| 265 | obj->header.content_revision = crev; \ |
| 266 | obj->header.structure_size = sizeof(*obj); \ |
| 267 | SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \ |
| 268 | SMU_METRICS_INIT_ARRAY) \ |
| 269 | obj->attr_count = count; \ |
| 270 | } |
| 271 | |
| 272 | #endif |
| 273 | #endif |
| 274 | |