| 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | #include "pp_debug.h" |
| 24 | #include <linux/delay.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/pci.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <asm/div64.h> |
| 29 | #if IS_ENABLED(CONFIG_X86_64) |
| 30 | #include <asm/intel-family.h> |
| 31 | #endif |
| 32 | #include <drm/amdgpu_drm.h> |
| 33 | #include "ppatomctrl.h" |
| 34 | #include "atombios.h" |
| 35 | #include "pptable_v1_0.h" |
| 36 | #include "pppcielanes.h" |
| 37 | #include "amd_pcie_helpers.h" |
| 38 | #include "hardwaremanager.h" |
| 39 | #include "process_pptables_v1_0.h" |
| 40 | #include "cgs_common.h" |
| 41 | |
| 42 | #include "smu7_common.h" |
| 43 | |
| 44 | #include "hwmgr.h" |
| 45 | #include "smu7_hwmgr.h" |
| 46 | #include "smu_ucode_xfer_vi.h" |
| 47 | #include "smu7_powertune.h" |
| 48 | #include "smu7_dyn_defaults.h" |
| 49 | #include "smu7_thermal.h" |
| 50 | #include "smu7_clockpowergating.h" |
| 51 | #include "processpptables.h" |
| 52 | #include "pp_thermal.h" |
| 53 | #include "smu7_baco.h" |
| 54 | #include "smu7_smumgr.h" |
| 55 | #include "polaris10_smumgr.h" |
| 56 | |
| 57 | #include "ivsrcid/ivsrcid_vislands30.h" |
| 58 | |
| 59 | #define MC_CG_ARB_FREQ_F0 0x0a |
| 60 | #define MC_CG_ARB_FREQ_F1 0x0b |
| 61 | #define MC_CG_ARB_FREQ_F2 0x0c |
| 62 | #define MC_CG_ARB_FREQ_F3 0x0d |
| 63 | |
| 64 | #define MC_CG_SEQ_DRAMCONF_S0 0x05 |
| 65 | #define MC_CG_SEQ_DRAMCONF_S1 0x06 |
| 66 | #define MC_CG_SEQ_YCLK_SUSPEND 0x04 |
| 67 | #define MC_CG_SEQ_YCLK_RESUME 0x0a |
| 68 | |
| 69 | #define SMC_CG_IND_START 0xc0030000 |
| 70 | #define SMC_CG_IND_END 0xc0040000 |
| 71 | |
| 72 | #define MEM_FREQ_LOW_LATENCY 25000 |
| 73 | #define MEM_FREQ_HIGH_LATENCY 80000 |
| 74 | |
| 75 | #define MEM_LATENCY_HIGH 45 |
| 76 | #define MEM_LATENCY_LOW 35 |
| 77 | #define MEM_LATENCY_ERR 0xFFFF |
| 78 | |
| 79 | #define MC_SEQ_MISC0_GDDR5_SHIFT 28 |
| 80 | #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 |
| 81 | #define MC_SEQ_MISC0_GDDR5_VALUE 5 |
| 82 | |
| 83 | #define PCIE_BUS_CLK 10000 |
| 84 | #define TCLK (PCIE_BUS_CLK / 10) |
| 85 | |
| 86 | static struct profile_mode_setting smu7_profiling[7] = { |
| 87 | {0, 0, 0, 0, 0, 0, 0, 0}, |
| 88 | {1, 0, 100, 30, 1, 0, 100, 10}, |
| 89 | {1, 10, 0, 30, 0, 0, 0, 0}, |
| 90 | {0, 0, 0, 0, 1, 10, 16, 31}, |
| 91 | {1, 0, 11, 50, 1, 0, 100, 10}, |
| 92 | {1, 0, 5, 30, 0, 0, 0, 0}, |
| 93 | {0, 0, 0, 0, 0, 0, 0, 0}, |
| 94 | }; |
| 95 | |
| 96 | #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) |
| 97 | |
| 98 | #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 |
| 99 | #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L |
| 100 | #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L |
| 101 | #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 |
| 102 | #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 |
| 103 | |
| 104 | #define STRAP_EVV_REVISION_MSB 2211 |
| 105 | #define STRAP_EVV_REVISION_LSB 2208 |
| 106 | |
| 107 | /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ |
| 108 | enum DPM_EVENT_SRC { |
| 109 | DPM_EVENT_SRC_ANALOG = 0, |
| 110 | DPM_EVENT_SRC_EXTERNAL = 1, |
| 111 | DPM_EVENT_SRC_DIGITAL = 2, |
| 112 | DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, |
| 113 | DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 |
| 114 | }; |
| 115 | |
| 116 | #define ixDIDT_SQ_EDC_CTRL 0x0013 |
| 117 | #define ixDIDT_SQ_EDC_THRESHOLD 0x0014 |
| 118 | #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2 0x0015 |
| 119 | #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4 0x0016 |
| 120 | #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6 0x0017 |
| 121 | #define ixDIDT_SQ_EDC_STALL_PATTERN_7 0x0018 |
| 122 | |
| 123 | #define ixDIDT_TD_EDC_CTRL 0x0053 |
| 124 | #define ixDIDT_TD_EDC_THRESHOLD 0x0054 |
| 125 | #define ixDIDT_TD_EDC_STALL_PATTERN_1_2 0x0055 |
| 126 | #define ixDIDT_TD_EDC_STALL_PATTERN_3_4 0x0056 |
| 127 | #define ixDIDT_TD_EDC_STALL_PATTERN_5_6 0x0057 |
| 128 | #define ixDIDT_TD_EDC_STALL_PATTERN_7 0x0058 |
| 129 | |
| 130 | #define ixDIDT_TCP_EDC_CTRL 0x0073 |
| 131 | #define ixDIDT_TCP_EDC_THRESHOLD 0x0074 |
| 132 | #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2 0x0075 |
| 133 | #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4 0x0076 |
| 134 | #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6 0x0077 |
| 135 | #define ixDIDT_TCP_EDC_STALL_PATTERN_7 0x0078 |
| 136 | |
| 137 | #define ixDIDT_DB_EDC_CTRL 0x0033 |
| 138 | #define ixDIDT_DB_EDC_THRESHOLD 0x0034 |
| 139 | #define ixDIDT_DB_EDC_STALL_PATTERN_1_2 0x0035 |
| 140 | #define ixDIDT_DB_EDC_STALL_PATTERN_3_4 0x0036 |
| 141 | #define ixDIDT_DB_EDC_STALL_PATTERN_5_6 0x0037 |
| 142 | #define ixDIDT_DB_EDC_STALL_PATTERN_7 0x0038 |
| 143 | |
| 144 | uint32_t DIDTEDCConfig_P12[] = { |
| 145 | ixDIDT_SQ_EDC_STALL_PATTERN_1_2, |
| 146 | ixDIDT_SQ_EDC_STALL_PATTERN_3_4, |
| 147 | ixDIDT_SQ_EDC_STALL_PATTERN_5_6, |
| 148 | ixDIDT_SQ_EDC_STALL_PATTERN_7, |
| 149 | ixDIDT_SQ_EDC_THRESHOLD, |
| 150 | ixDIDT_SQ_EDC_CTRL, |
| 151 | ixDIDT_TD_EDC_STALL_PATTERN_1_2, |
| 152 | ixDIDT_TD_EDC_STALL_PATTERN_3_4, |
| 153 | ixDIDT_TD_EDC_STALL_PATTERN_5_6, |
| 154 | ixDIDT_TD_EDC_STALL_PATTERN_7, |
| 155 | ixDIDT_TD_EDC_THRESHOLD, |
| 156 | ixDIDT_TD_EDC_CTRL, |
| 157 | ixDIDT_TCP_EDC_STALL_PATTERN_1_2, |
| 158 | ixDIDT_TCP_EDC_STALL_PATTERN_3_4, |
| 159 | ixDIDT_TCP_EDC_STALL_PATTERN_5_6, |
| 160 | ixDIDT_TCP_EDC_STALL_PATTERN_7, |
| 161 | ixDIDT_TCP_EDC_THRESHOLD, |
| 162 | ixDIDT_TCP_EDC_CTRL, |
| 163 | ixDIDT_DB_EDC_STALL_PATTERN_1_2, |
| 164 | ixDIDT_DB_EDC_STALL_PATTERN_3_4, |
| 165 | ixDIDT_DB_EDC_STALL_PATTERN_5_6, |
| 166 | ixDIDT_DB_EDC_STALL_PATTERN_7, |
| 167 | ixDIDT_DB_EDC_THRESHOLD, |
| 168 | ixDIDT_DB_EDC_CTRL, |
| 169 | 0xFFFFFFFF // End of list |
| 170 | }; |
| 171 | |
| 172 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); |
| 173 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, |
| 174 | enum pp_clock_type type, uint32_t mask); |
| 175 | static int smu7_notify_has_display(struct pp_hwmgr *hwmgr); |
| 176 | |
| 177 | static struct smu7_power_state *cast_phw_smu7_power_state( |
| 178 | struct pp_hw_power_state *hw_ps) |
| 179 | { |
| 180 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
| 181 | "Invalid Powerstate Type!" , |
| 182 | return NULL); |
| 183 | |
| 184 | return (struct smu7_power_state *)hw_ps; |
| 185 | } |
| 186 | |
| 187 | static const struct smu7_power_state *cast_const_phw_smu7_power_state( |
| 188 | const struct pp_hw_power_state *hw_ps) |
| 189 | { |
| 190 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
| 191 | "Invalid Powerstate Type!" , |
| 192 | return NULL); |
| 193 | |
| 194 | return (const struct smu7_power_state *)hw_ps; |
| 195 | } |
| 196 | |
| 197 | /** |
| 198 | * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct |
| 199 | * |
| 200 | * @hwmgr: the address of the powerplay hardware manager. |
| 201 | * Return: always 0 |
| 202 | */ |
| 203 | static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) |
| 204 | { |
| 205 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); |
| 206 | |
| 207 | hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); |
| 208 | |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) |
| 213 | { |
| 214 | uint32_t speedCntl = 0; |
| 215 | |
| 216 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ |
| 217 | speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, |
| 218 | ixPCIE_LC_SPEED_CNTL); |
| 219 | return((uint16_t)PHM_GET_FIELD(speedCntl, |
| 220 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); |
| 221 | } |
| 222 | |
| 223 | static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) |
| 224 | { |
| 225 | uint32_t link_width; |
| 226 | |
| 227 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ |
| 228 | link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, |
| 229 | PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); |
| 230 | |
| 231 | PP_ASSERT_WITH_CODE((7 >= link_width), |
| 232 | "Invalid PCIe lane width!" , return 0); |
| 233 | |
| 234 | return decode_pcie_lane_width(num_lanes: link_width); |
| 235 | } |
| 236 | |
| 237 | /** |
| 238 | * smu7_enable_smc_voltage_controller - Enable voltage control |
| 239 | * |
| 240 | * @hwmgr: the address of the powerplay hardware manager. |
| 241 | * Return: always PP_Result_OK |
| 242 | */ |
| 243 | static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) |
| 244 | { |
| 245 | if (hwmgr->chip_id >= CHIP_POLARIS10 && |
| 246 | hwmgr->chip_id <= CHIP_VEGAM) { |
| 247 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 248 | CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); |
| 249 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 250 | CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); |
| 251 | } |
| 252 | |
| 253 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) |
| 254 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL); |
| 255 | |
| 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | /** |
| 260 | * smu7_voltage_control - Checks if we want to support voltage control |
| 261 | * |
| 262 | * @hwmgr: the address of the powerplay hardware manager. |
| 263 | */ |
| 264 | static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) |
| 265 | { |
| 266 | const struct smu7_hwmgr *data = |
| 267 | (const struct smu7_hwmgr *)(hwmgr->backend); |
| 268 | |
| 269 | return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); |
| 270 | } |
| 271 | |
| 272 | /** |
| 273 | * smu7_enable_voltage_control - Enable voltage control |
| 274 | * |
| 275 | * @hwmgr: the address of the powerplay hardware manager. |
| 276 | * Return: always 0 |
| 277 | */ |
| 278 | static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) |
| 279 | { |
| 280 | /* enable voltage control */ |
| 281 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 282 | GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); |
| 283 | |
| 284 | return 0; |
| 285 | } |
| 286 | |
| 287 | static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, |
| 288 | struct phm_clock_voltage_dependency_table *voltage_dependency_table |
| 289 | ) |
| 290 | { |
| 291 | uint32_t i; |
| 292 | |
| 293 | PP_ASSERT_WITH_CODE((NULL != voltage_table), |
| 294 | "Voltage Dependency Table empty." , return -EINVAL;); |
| 295 | |
| 296 | voltage_table->mask_low = 0; |
| 297 | voltage_table->phase_delay = 0; |
| 298 | voltage_table->count = voltage_dependency_table->count; |
| 299 | |
| 300 | for (i = 0; i < voltage_dependency_table->count; i++) { |
| 301 | voltage_table->entries[i].value = |
| 302 | voltage_dependency_table->entries[i].v; |
| 303 | voltage_table->entries[i].smio_low = 0; |
| 304 | } |
| 305 | |
| 306 | return 0; |
| 307 | } |
| 308 | |
| 309 | |
| 310 | /** |
| 311 | * smu7_construct_voltage_tables - Create Voltage Tables. |
| 312 | * |
| 313 | * @hwmgr: the address of the powerplay hardware manager. |
| 314 | * Return: always 0 |
| 315 | */ |
| 316 | static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) |
| 317 | { |
| 318 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 319 | struct phm_ppt_v1_information *table_info = |
| 320 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 321 | int result = 0; |
| 322 | uint32_t tmp; |
| 323 | |
| 324 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { |
| 325 | result = atomctrl_get_voltage_table_v3(hwmgr, |
| 326 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, |
| 327 | voltage_table: &(data->mvdd_voltage_table)); |
| 328 | PP_ASSERT_WITH_CODE((0 == result), |
| 329 | "Failed to retrieve MVDD table." , |
| 330 | return result); |
| 331 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { |
| 332 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 333 | result = phm_get_svi2_mvdd_voltage_table(vol_table: &(data->mvdd_voltage_table), |
| 334 | dep_table: table_info->vdd_dep_on_mclk); |
| 335 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 336 | result = phm_get_svi2_voltage_table_v0(voltage_table: &(data->mvdd_voltage_table), |
| 337 | voltage_dependency_table: hwmgr->dyn_state.mvdd_dependency_on_mclk); |
| 338 | |
| 339 | PP_ASSERT_WITH_CODE((0 == result), |
| 340 | "Failed to retrieve SVI2 MVDD table from dependency table." , |
| 341 | return result;); |
| 342 | } |
| 343 | |
| 344 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { |
| 345 | result = atomctrl_get_voltage_table_v3(hwmgr, |
| 346 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, |
| 347 | voltage_table: &(data->vddci_voltage_table)); |
| 348 | PP_ASSERT_WITH_CODE((0 == result), |
| 349 | "Failed to retrieve VDDCI table." , |
| 350 | return result); |
| 351 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { |
| 352 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 353 | result = phm_get_svi2_vddci_voltage_table(vol_table: &(data->vddci_voltage_table), |
| 354 | dep_table: table_info->vdd_dep_on_mclk); |
| 355 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 356 | result = phm_get_svi2_voltage_table_v0(voltage_table: &(data->vddci_voltage_table), |
| 357 | voltage_dependency_table: hwmgr->dyn_state.vddci_dependency_on_mclk); |
| 358 | PP_ASSERT_WITH_CODE((0 == result), |
| 359 | "Failed to retrieve SVI2 VDDCI table from dependency table." , |
| 360 | return result); |
| 361 | } |
| 362 | |
| 363 | if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { |
| 364 | /* VDDGFX has only SVI2 voltage control */ |
| 365 | result = phm_get_svi2_vdd_voltage_table(vol_table: &(data->vddgfx_voltage_table), |
| 366 | lookup_table: table_info->vddgfx_lookup_table); |
| 367 | PP_ASSERT_WITH_CODE((0 == result), |
| 368 | "Failed to retrieve SVI2 VDDGFX table from lookup table." , return result;); |
| 369 | } |
| 370 | |
| 371 | |
| 372 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { |
| 373 | result = atomctrl_get_voltage_table_v3(hwmgr, |
| 374 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, |
| 375 | voltage_table: &data->vddc_voltage_table); |
| 376 | PP_ASSERT_WITH_CODE((0 == result), |
| 377 | "Failed to retrieve VDDC table." , return result;); |
| 378 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { |
| 379 | |
| 380 | if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 381 | result = phm_get_svi2_voltage_table_v0(voltage_table: &data->vddc_voltage_table, |
| 382 | voltage_dependency_table: hwmgr->dyn_state.vddc_dependency_on_mclk); |
| 383 | else if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 384 | result = phm_get_svi2_vdd_voltage_table(vol_table: &(data->vddc_voltage_table), |
| 385 | lookup_table: table_info->vddc_lookup_table); |
| 386 | |
| 387 | PP_ASSERT_WITH_CODE((0 == result), |
| 388 | "Failed to retrieve SVI2 VDDC table from dependency table." , return result;); |
| 389 | } |
| 390 | |
| 391 | tmp = smum_get_mac_definition(hwmgr, value: SMU_MAX_LEVELS_VDDC); |
| 392 | PP_ASSERT_WITH_CODE( |
| 393 | (data->vddc_voltage_table.count <= tmp), |
| 394 | "Too many voltage values for VDDC. Trimming to fit state table." , |
| 395 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 396 | &(data->vddc_voltage_table))); |
| 397 | |
| 398 | tmp = smum_get_mac_definition(hwmgr, value: SMU_MAX_LEVELS_VDDGFX); |
| 399 | PP_ASSERT_WITH_CODE( |
| 400 | (data->vddgfx_voltage_table.count <= tmp), |
| 401 | "Too many voltage values for VDDC. Trimming to fit state table." , |
| 402 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 403 | &(data->vddgfx_voltage_table))); |
| 404 | |
| 405 | tmp = smum_get_mac_definition(hwmgr, value: SMU_MAX_LEVELS_VDDCI); |
| 406 | PP_ASSERT_WITH_CODE( |
| 407 | (data->vddci_voltage_table.count <= tmp), |
| 408 | "Too many voltage values for VDDCI. Trimming to fit state table." , |
| 409 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 410 | &(data->vddci_voltage_table))); |
| 411 | |
| 412 | tmp = smum_get_mac_definition(hwmgr, value: SMU_MAX_LEVELS_MVDD); |
| 413 | PP_ASSERT_WITH_CODE( |
| 414 | (data->mvdd_voltage_table.count <= tmp), |
| 415 | "Too many voltage values for MVDD. Trimming to fit state table." , |
| 416 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 417 | &(data->mvdd_voltage_table))); |
| 418 | |
| 419 | return 0; |
| 420 | } |
| 421 | |
| 422 | /** |
| 423 | * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters |
| 424 | * |
| 425 | * @hwmgr: the address of the powerplay hardware manager. |
| 426 | * Return: always 0 |
| 427 | */ |
| 428 | static int smu7_program_static_screen_threshold_parameters( |
| 429 | struct pp_hwmgr *hwmgr) |
| 430 | { |
| 431 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 432 | |
| 433 | /* Set static screen threshold unit */ |
| 434 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 435 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, |
| 436 | data->static_screen_threshold_unit); |
| 437 | /* Set static screen threshold */ |
| 438 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 439 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, |
| 440 | data->static_screen_threshold); |
| 441 | |
| 442 | return 0; |
| 443 | } |
| 444 | |
| 445 | /** |
| 446 | * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching. |
| 447 | * |
| 448 | * @hwmgr: the address of the powerplay hardware manager. |
| 449 | * Return: always 0 |
| 450 | */ |
| 451 | static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) |
| 452 | { |
| 453 | uint32_t display_gap = |
| 454 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 455 | ixCG_DISPLAY_GAP_CNTL); |
| 456 | |
| 457 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, |
| 458 | DISP_GAP, DISPLAY_GAP_IGNORE); |
| 459 | |
| 460 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, |
| 461 | DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); |
| 462 | |
| 463 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 464 | ixCG_DISPLAY_GAP_CNTL, display_gap); |
| 465 | |
| 466 | return 0; |
| 467 | } |
| 468 | |
| 469 | /** |
| 470 | * smu7_program_voting_clients - Programs activity state transition voting clients |
| 471 | * |
| 472 | * @hwmgr: the address of the powerplay hardware manager. |
| 473 | * Return: always 0 |
| 474 | */ |
| 475 | static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) |
| 476 | { |
| 477 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 478 | int i; |
| 479 | |
| 480 | /* Clear reset for voting clients before enabling DPM */ |
| 481 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 482 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); |
| 483 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 484 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); |
| 485 | |
| 486 | for (i = 0; i < 8; i++) |
| 487 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 488 | ixCG_FREQ_TRAN_VOTING_0 + i * 4, |
| 489 | data->voting_rights_clients[i]); |
| 490 | return 0; |
| 491 | } |
| 492 | |
| 493 | static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) |
| 494 | { |
| 495 | int i; |
| 496 | |
| 497 | /* Reset voting clients before disabling DPM */ |
| 498 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 499 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); |
| 500 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 501 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); |
| 502 | |
| 503 | for (i = 0; i < 8; i++) |
| 504 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 505 | ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); |
| 506 | |
| 507 | return 0; |
| 508 | } |
| 509 | |
| 510 | /* Copy one arb setting to another and then switch the active set. |
| 511 | * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. |
| 512 | */ |
| 513 | static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, |
| 514 | uint32_t arb_src, uint32_t arb_dest) |
| 515 | { |
| 516 | uint32_t mc_arb_dram_timing; |
| 517 | uint32_t mc_arb_dram_timing2; |
| 518 | uint32_t burst_time; |
| 519 | uint32_t mc_cg_config; |
| 520 | |
| 521 | switch (arb_src) { |
| 522 | case MC_CG_ARB_FREQ_F0: |
| 523 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); |
| 524 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); |
| 525 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); |
| 526 | break; |
| 527 | case MC_CG_ARB_FREQ_F1: |
| 528 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); |
| 529 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); |
| 530 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); |
| 531 | break; |
| 532 | default: |
| 533 | return -EINVAL; |
| 534 | } |
| 535 | |
| 536 | switch (arb_dest) { |
| 537 | case MC_CG_ARB_FREQ_F0: |
| 538 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); |
| 539 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); |
| 540 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); |
| 541 | break; |
| 542 | case MC_CG_ARB_FREQ_F1: |
| 543 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); |
| 544 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); |
| 545 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); |
| 546 | break; |
| 547 | default: |
| 548 | return -EINVAL; |
| 549 | } |
| 550 | |
| 551 | mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); |
| 552 | mc_cg_config |= 0x0000000F; |
| 553 | cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); |
| 554 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); |
| 555 | |
| 556 | return 0; |
| 557 | } |
| 558 | |
| 559 | static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) |
| 560 | { |
| 561 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL); |
| 562 | } |
| 563 | |
| 564 | /** |
| 565 | * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1 |
| 566 | * |
| 567 | * @hwmgr: the address of the powerplay hardware manager. |
| 568 | * Return: always 0 |
| 569 | * This function is to be called from the SetPowerState table. |
| 570 | */ |
| 571 | static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) |
| 572 | { |
| 573 | return smu7_copy_and_switch_arb_sets(hwmgr, |
| 574 | MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); |
| 575 | } |
| 576 | |
| 577 | static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) |
| 578 | { |
| 579 | uint32_t tmp; |
| 580 | |
| 581 | tmp = (cgs_read_ind_register(hwmgr->device, |
| 582 | CGS_IND_REG__SMC, ixSMC_SCRATCH9) & |
| 583 | 0x0000ff00) >> 8; |
| 584 | |
| 585 | if (tmp == MC_CG_ARB_FREQ_F0) |
| 586 | return 0; |
| 587 | |
| 588 | return smu7_copy_and_switch_arb_sets(hwmgr, |
| 589 | arb_src: tmp, MC_CG_ARB_FREQ_F0); |
| 590 | } |
| 591 | |
| 592 | static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr) |
| 593 | { |
| 594 | struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); |
| 595 | uint16_t pcie_gen = 0; |
| 596 | |
| 597 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 && |
| 598 | adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4) |
| 599 | pcie_gen = 3; |
| 600 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 && |
| 601 | adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) |
| 602 | pcie_gen = 2; |
| 603 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 && |
| 604 | adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2) |
| 605 | pcie_gen = 1; |
| 606 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 && |
| 607 | adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1) |
| 608 | pcie_gen = 0; |
| 609 | |
| 610 | return pcie_gen; |
| 611 | } |
| 612 | |
| 613 | static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr) |
| 614 | { |
| 615 | struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); |
| 616 | uint16_t pcie_width = 0; |
| 617 | |
| 618 | if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) |
| 619 | pcie_width = 16; |
| 620 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) |
| 621 | pcie_width = 12; |
| 622 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) |
| 623 | pcie_width = 8; |
| 624 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) |
| 625 | pcie_width = 4; |
| 626 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) |
| 627 | pcie_width = 2; |
| 628 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) |
| 629 | pcie_width = 1; |
| 630 | |
| 631 | return pcie_width; |
| 632 | } |
| 633 | |
| 634 | static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) |
| 635 | { |
| 636 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 637 | |
| 638 | struct phm_ppt_v1_information *table_info = |
| 639 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 640 | struct phm_ppt_v1_pcie_table *pcie_table = NULL; |
| 641 | |
| 642 | uint32_t i, max_entry; |
| 643 | uint32_t tmp; |
| 644 | |
| 645 | PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || |
| 646 | data->use_pcie_power_saving_levels), "No pcie performance levels!" , |
| 647 | return -EINVAL); |
| 648 | |
| 649 | if (table_info != NULL) |
| 650 | pcie_table = table_info->pcie_table; |
| 651 | |
| 652 | if (data->use_pcie_performance_levels && |
| 653 | !data->use_pcie_power_saving_levels) { |
| 654 | data->pcie_gen_power_saving = data->pcie_gen_performance; |
| 655 | data->pcie_lane_power_saving = data->pcie_lane_performance; |
| 656 | } else if (!data->use_pcie_performance_levels && |
| 657 | data->use_pcie_power_saving_levels) { |
| 658 | data->pcie_gen_performance = data->pcie_gen_power_saving; |
| 659 | data->pcie_lane_performance = data->pcie_lane_power_saving; |
| 660 | } |
| 661 | tmp = smum_get_mac_definition(hwmgr, value: SMU_MAX_LEVELS_LINK); |
| 662 | phm_reset_single_dpm_table(table: &data->dpm_table.pcie_speed_table, |
| 663 | count: tmp, |
| 664 | MAX_REGULAR_DPM_NUMBER); |
| 665 | |
| 666 | if (pcie_table != NULL) { |
| 667 | /* max_entry is used to make sure we reserve one PCIE level |
| 668 | * for boot level (fix for A+A PSPP issue). |
| 669 | * If PCIE table from PPTable have ULV entry + 8 entries, |
| 670 | * then ignore the last entry.*/ |
| 671 | max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; |
| 672 | for (i = 1; i < max_entry; i++) { |
| 673 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: i - 1, |
| 674 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 675 | ns_pcie_gen: pcie_table->entries[i].gen_speed), |
| 676 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 677 | ns_pcie_lanes: pcie_table->entries[i].lane_width)); |
| 678 | } |
| 679 | data->dpm_table.pcie_speed_table.count = max_entry - 1; |
| 680 | smum_update_smc_table(hwmgr, type: SMU_BIF_TABLE); |
| 681 | } else { |
| 682 | /* Hardcode Pcie Table */ |
| 683 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: 0, |
| 684 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 685 | PP_Min_PCIEGen), |
| 686 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 687 | PP_Max_PCIELane)); |
| 688 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: 1, |
| 689 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 690 | PP_Min_PCIEGen), |
| 691 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 692 | PP_Max_PCIELane)); |
| 693 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: 2, |
| 694 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 695 | PP_Max_PCIEGen), |
| 696 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 697 | PP_Max_PCIELane)); |
| 698 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: 3, |
| 699 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 700 | PP_Max_PCIEGen), |
| 701 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 702 | PP_Max_PCIELane)); |
| 703 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: 4, |
| 704 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 705 | PP_Max_PCIEGen), |
| 706 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 707 | PP_Max_PCIELane)); |
| 708 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: 5, |
| 709 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 710 | PP_Max_PCIEGen), |
| 711 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 712 | PP_Max_PCIELane)); |
| 713 | |
| 714 | data->dpm_table.pcie_speed_table.count = 6; |
| 715 | } |
| 716 | /* Populate last level for boot PCIE level, but do not increment count. */ |
| 717 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
| 718 | for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) |
| 719 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, index: i, |
| 720 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 721 | PP_Max_PCIEGen), |
| 722 | pcie_lanes: data->vbios_boot_state.pcie_lane_bootup_value); |
| 723 | } else { |
| 724 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, |
| 725 | index: data->dpm_table.pcie_speed_table.count, |
| 726 | pcie_gen: get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 727 | PP_Min_PCIEGen), |
| 728 | pcie_lanes: get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 729 | PP_Max_PCIELane)); |
| 730 | |
| 731 | if (data->pcie_dpm_key_disabled) |
| 732 | phm_setup_pcie_table_entry(table: &data->dpm_table.pcie_speed_table, |
| 733 | index: data->dpm_table.pcie_speed_table.count, |
| 734 | pcie_gen: smu7_override_pcie_speed(hwmgr), pcie_lanes: smu7_override_pcie_width(hwmgr)); |
| 735 | } |
| 736 | return 0; |
| 737 | } |
| 738 | |
| 739 | static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) |
| 740 | { |
| 741 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 742 | |
| 743 | memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); |
| 744 | |
| 745 | phm_reset_single_dpm_table( |
| 746 | table: &data->dpm_table.sclk_table, |
| 747 | count: smum_get_mac_definition(hwmgr, |
| 748 | value: SMU_MAX_LEVELS_GRAPHICS), |
| 749 | MAX_REGULAR_DPM_NUMBER); |
| 750 | phm_reset_single_dpm_table( |
| 751 | table: &data->dpm_table.mclk_table, |
| 752 | count: smum_get_mac_definition(hwmgr, |
| 753 | value: SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); |
| 754 | |
| 755 | phm_reset_single_dpm_table( |
| 756 | table: &data->dpm_table.vddc_table, |
| 757 | count: smum_get_mac_definition(hwmgr, |
| 758 | value: SMU_MAX_LEVELS_VDDC), |
| 759 | MAX_REGULAR_DPM_NUMBER); |
| 760 | phm_reset_single_dpm_table( |
| 761 | table: &data->dpm_table.vddci_table, |
| 762 | count: smum_get_mac_definition(hwmgr, |
| 763 | value: SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); |
| 764 | |
| 765 | phm_reset_single_dpm_table( |
| 766 | table: &data->dpm_table.mvdd_table, |
| 767 | count: smum_get_mac_definition(hwmgr, |
| 768 | value: SMU_MAX_LEVELS_MVDD), |
| 769 | MAX_REGULAR_DPM_NUMBER); |
| 770 | return 0; |
| 771 | } |
| 772 | /* |
| 773 | * This function is to initialize all DPM state tables |
| 774 | * for SMU7 based on the dependency table. |
| 775 | * Dynamic state patching function will then trim these |
| 776 | * state tables to the allowed range based |
| 777 | * on the power policy or external client requests, |
| 778 | * such as UVD request, etc. |
| 779 | */ |
| 780 | |
| 781 | static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) |
| 782 | { |
| 783 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 784 | struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = |
| 785 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
| 786 | struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = |
| 787 | hwmgr->dyn_state.vddc_dependency_on_mclk; |
| 788 | struct phm_cac_leakage_table *std_voltage_table = |
| 789 | hwmgr->dyn_state.cac_leakage_table; |
| 790 | uint32_t i; |
| 791 | |
| 792 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, |
| 793 | "SCLK dependency table is missing. This table is mandatory" , return -EINVAL); |
| 794 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, |
| 795 | "SCLK dependency table has to have is missing. This table is mandatory" , return -EINVAL); |
| 796 | |
| 797 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, |
| 798 | "MCLK dependency table is missing. This table is mandatory" , return -EINVAL); |
| 799 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, |
| 800 | "VMCLK dependency table has to have is missing. This table is mandatory" , return -EINVAL); |
| 801 | |
| 802 | |
| 803 | /* Initialize Sclk DPM table based on allow Sclk values*/ |
| 804 | data->dpm_table.sclk_table.count = 0; |
| 805 | |
| 806 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { |
| 807 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != |
| 808 | allowed_vdd_sclk_table->entries[i].clk) { |
| 809 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = |
| 810 | allowed_vdd_sclk_table->entries[i].clk; |
| 811 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; |
| 812 | data->dpm_table.sclk_table.count++; |
| 813 | } |
| 814 | } |
| 815 | |
| 816 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, |
| 817 | "MCLK dependency table is missing. This table is mandatory" , return -EINVAL); |
| 818 | /* Initialize Mclk DPM table based on allow Mclk values */ |
| 819 | data->dpm_table.mclk_table.count = 0; |
| 820 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { |
| 821 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != |
| 822 | allowed_vdd_mclk_table->entries[i].clk) { |
| 823 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = |
| 824 | allowed_vdd_mclk_table->entries[i].clk; |
| 825 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; |
| 826 | data->dpm_table.mclk_table.count++; |
| 827 | } |
| 828 | } |
| 829 | |
| 830 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ |
| 831 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { |
| 832 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; |
| 833 | data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; |
| 834 | /* param1 is for corresponding std voltage */ |
| 835 | data->dpm_table.vddc_table.dpm_levels[i].enabled = true; |
| 836 | } |
| 837 | |
| 838 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; |
| 839 | allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; |
| 840 | |
| 841 | if (NULL != allowed_vdd_mclk_table) { |
| 842 | /* Initialize Vddci DPM table based on allow Mclk values */ |
| 843 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { |
| 844 | data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; |
| 845 | data->dpm_table.vddci_table.dpm_levels[i].enabled = true; |
| 846 | } |
| 847 | data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; |
| 848 | } |
| 849 | |
| 850 | allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; |
| 851 | |
| 852 | if (NULL != allowed_vdd_mclk_table) { |
| 853 | /* |
| 854 | * Initialize MVDD DPM table based on allow Mclk |
| 855 | * values |
| 856 | */ |
| 857 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { |
| 858 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; |
| 859 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; |
| 860 | } |
| 861 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; |
| 862 | } |
| 863 | |
| 864 | return 0; |
| 865 | } |
| 866 | |
| 867 | static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) |
| 868 | { |
| 869 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 870 | struct phm_ppt_v1_information *table_info = |
| 871 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 872 | uint32_t i; |
| 873 | |
| 874 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; |
| 875 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
| 876 | |
| 877 | if (table_info == NULL) |
| 878 | return -EINVAL; |
| 879 | |
| 880 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 881 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 882 | |
| 883 | PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, |
| 884 | "SCLK dependency table is missing." , |
| 885 | return -EINVAL); |
| 886 | PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, |
| 887 | "SCLK dependency table count is 0." , |
| 888 | return -EINVAL); |
| 889 | |
| 890 | PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, |
| 891 | "MCLK dependency table is missing." , |
| 892 | return -EINVAL); |
| 893 | PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, |
| 894 | "MCLK dependency table count is 0" , |
| 895 | return -EINVAL); |
| 896 | |
| 897 | /* Initialize Sclk DPM table based on allow Sclk values */ |
| 898 | data->dpm_table.sclk_table.count = 0; |
| 899 | for (i = 0; i < dep_sclk_table->count; i++) { |
| 900 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != |
| 901 | dep_sclk_table->entries[i].clk) { |
| 902 | |
| 903 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = |
| 904 | dep_sclk_table->entries[i].clk; |
| 905 | |
| 906 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = |
| 907 | i == 0; |
| 908 | data->dpm_table.sclk_table.count++; |
| 909 | } |
| 910 | } |
| 911 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) |
| 912 | hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; |
| 913 | /* Initialize Mclk DPM table based on allow Mclk values */ |
| 914 | data->dpm_table.mclk_table.count = 0; |
| 915 | for (i = 0; i < dep_mclk_table->count; i++) { |
| 916 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels |
| 917 | [data->dpm_table.mclk_table.count - 1].value != |
| 918 | dep_mclk_table->entries[i].clk) { |
| 919 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = |
| 920 | dep_mclk_table->entries[i].clk; |
| 921 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = |
| 922 | i == 0; |
| 923 | data->dpm_table.mclk_table.count++; |
| 924 | } |
| 925 | } |
| 926 | |
| 927 | if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) |
| 928 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; |
| 929 | return 0; |
| 930 | } |
| 931 | |
| 932 | static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) |
| 933 | { |
| 934 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 935 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 936 | struct phm_ppt_v1_information *table_info = |
| 937 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 938 | uint32_t i; |
| 939 | |
| 940 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; |
| 941 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
| 942 | struct phm_odn_performance_level *entries; |
| 943 | |
| 944 | if (table_info == NULL) |
| 945 | return -EINVAL; |
| 946 | |
| 947 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 948 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 949 | |
| 950 | odn_table->odn_core_clock_dpm_levels.num_of_pl = |
| 951 | data->golden_dpm_table.sclk_table.count; |
| 952 | entries = odn_table->odn_core_clock_dpm_levels.entries; |
| 953 | for (i = 0; i < data->golden_dpm_table.sclk_table.count; i++) { |
| 954 | entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; |
| 955 | entries[i].enabled = true; |
| 956 | entries[i].vddc = dep_sclk_table->entries[i].vddc; |
| 957 | } |
| 958 | |
| 959 | smu_get_voltage_dependency_table_ppt_v1(allowed_dep_table: dep_sclk_table, |
| 960 | dep_table: (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); |
| 961 | |
| 962 | odn_table->odn_memory_clock_dpm_levels.num_of_pl = |
| 963 | data->golden_dpm_table.mclk_table.count; |
| 964 | entries = odn_table->odn_memory_clock_dpm_levels.entries; |
| 965 | for (i = 0; i < data->golden_dpm_table.mclk_table.count; i++) { |
| 966 | entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; |
| 967 | entries[i].enabled = true; |
| 968 | entries[i].vddc = dep_mclk_table->entries[i].vddc; |
| 969 | } |
| 970 | |
| 971 | smu_get_voltage_dependency_table_ppt_v1(allowed_dep_table: dep_mclk_table, |
| 972 | dep_table: (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); |
| 973 | |
| 974 | return 0; |
| 975 | } |
| 976 | |
| 977 | static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) |
| 978 | { |
| 979 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 980 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; |
| 981 | struct phm_ppt_v1_information *table_info = |
| 982 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 983 | uint32_t min_vddc = 0; |
| 984 | uint32_t max_vddc = 0; |
| 985 | |
| 986 | if (!table_info) |
| 987 | return; |
| 988 | |
| 989 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 990 | |
| 991 | atomctrl_get_voltage_range(hwmgr, max_vddc: &max_vddc, min_vddc: &min_vddc); |
| 992 | |
| 993 | if (min_vddc == 0 || min_vddc > 2000 |
| 994 | || min_vddc > dep_sclk_table->entries[0].vddc) |
| 995 | min_vddc = dep_sclk_table->entries[0].vddc; |
| 996 | |
| 997 | if (max_vddc == 0 || max_vddc > 2000 |
| 998 | || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) |
| 999 | max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; |
| 1000 | |
| 1001 | data->odn_dpm_table.min_vddc = min_vddc; |
| 1002 | data->odn_dpm_table.max_vddc = max_vddc; |
| 1003 | } |
| 1004 | |
| 1005 | static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) |
| 1006 | { |
| 1007 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1008 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 1009 | struct phm_ppt_v1_information *table_info = |
| 1010 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1011 | uint32_t i; |
| 1012 | |
| 1013 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; |
| 1014 | struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; |
| 1015 | |
| 1016 | if (table_info == NULL) |
| 1017 | return; |
| 1018 | |
| 1019 | for (i = 0; i < data->dpm_table.sclk_table.count; i++) { |
| 1020 | if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != |
| 1021 | data->dpm_table.sclk_table.dpm_levels[i].value) { |
| 1022 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
| 1023 | break; |
| 1024 | } |
| 1025 | } |
| 1026 | |
| 1027 | for (i = 0; i < data->dpm_table.mclk_table.count; i++) { |
| 1028 | if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != |
| 1029 | data->dpm_table.mclk_table.dpm_levels[i].value) { |
| 1030 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; |
| 1031 | break; |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | dep_table = table_info->vdd_dep_on_mclk; |
| 1036 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); |
| 1037 | |
| 1038 | for (i = 0; i < dep_table->count; i++) { |
| 1039 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
| 1040 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; |
| 1041 | return; |
| 1042 | } |
| 1043 | } |
| 1044 | |
| 1045 | dep_table = table_info->vdd_dep_on_sclk; |
| 1046 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); |
| 1047 | for (i = 0; i < dep_table->count; i++) { |
| 1048 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
| 1049 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; |
| 1050 | return; |
| 1051 | } |
| 1052 | } |
| 1053 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
| 1054 | data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; |
| 1055 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; |
| 1056 | } |
| 1057 | } |
| 1058 | |
| 1059 | static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) |
| 1060 | { |
| 1061 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1062 | |
| 1063 | smu7_reset_dpm_tables(hwmgr); |
| 1064 | |
| 1065 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 1066 | smu7_setup_dpm_tables_v1(hwmgr); |
| 1067 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 1068 | smu7_setup_dpm_tables_v0(hwmgr); |
| 1069 | |
| 1070 | smu7_setup_default_pcie_table(hwmgr); |
| 1071 | |
| 1072 | /* save a copy of the default DPM table */ |
| 1073 | memcpy(&(data->golden_dpm_table), &(data->dpm_table), |
| 1074 | sizeof(struct smu7_dpm_table)); |
| 1075 | |
| 1076 | /* initialize ODN table */ |
| 1077 | if (hwmgr->od_enabled) { |
| 1078 | if (data->odn_dpm_table.max_vddc) { |
| 1079 | smu7_check_dpm_table_updated(hwmgr); |
| 1080 | } else { |
| 1081 | smu7_setup_voltage_range_from_vbios(hwmgr); |
| 1082 | smu7_odn_initial_default_setting(hwmgr); |
| 1083 | } |
| 1084 | } |
| 1085 | return 0; |
| 1086 | } |
| 1087 | |
| 1088 | static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) |
| 1089 | { |
| 1090 | |
| 1091 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1092 | c: PHM_PlatformCaps_RegulatorHot)) |
| 1093 | return smum_send_msg_to_smc(hwmgr, |
| 1094 | PPSMC_MSG_EnableVRHotGPIOInterrupt, |
| 1095 | NULL); |
| 1096 | |
| 1097 | return 0; |
| 1098 | } |
| 1099 | |
| 1100 | static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) |
| 1101 | { |
| 1102 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, |
| 1103 | SCLK_PWRMGT_OFF, 0); |
| 1104 | return 0; |
| 1105 | } |
| 1106 | |
| 1107 | static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) |
| 1108 | { |
| 1109 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1110 | |
| 1111 | if (data->ulv_supported) |
| 1112 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL); |
| 1113 | |
| 1114 | return 0; |
| 1115 | } |
| 1116 | |
| 1117 | static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) |
| 1118 | { |
| 1119 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1120 | |
| 1121 | if (data->ulv_supported) |
| 1122 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL); |
| 1123 | |
| 1124 | return 0; |
| 1125 | } |
| 1126 | |
| 1127 | static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) |
| 1128 | { |
| 1129 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1130 | c: PHM_PlatformCaps_SclkDeepSleep)) { |
| 1131 | if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL)) |
| 1132 | PP_ASSERT_WITH_CODE(false, |
| 1133 | "Attempt to enable Master Deep Sleep switch failed!" , |
| 1134 | return -EINVAL); |
| 1135 | } else { |
| 1136 | if (smum_send_msg_to_smc(hwmgr, |
| 1137 | PPSMC_MSG_MASTER_DeepSleep_OFF, |
| 1138 | NULL)) { |
| 1139 | PP_ASSERT_WITH_CODE(false, |
| 1140 | "Attempt to disable Master Deep Sleep switch failed!" , |
| 1141 | return -EINVAL); |
| 1142 | } |
| 1143 | } |
| 1144 | |
| 1145 | return 0; |
| 1146 | } |
| 1147 | |
| 1148 | static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) |
| 1149 | { |
| 1150 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1151 | c: PHM_PlatformCaps_SclkDeepSleep)) { |
| 1152 | if (smum_send_msg_to_smc(hwmgr, |
| 1153 | PPSMC_MSG_MASTER_DeepSleep_OFF, |
| 1154 | NULL)) { |
| 1155 | PP_ASSERT_WITH_CODE(false, |
| 1156 | "Attempt to disable Master Deep Sleep switch failed!" , |
| 1157 | return -EINVAL); |
| 1158 | } |
| 1159 | } |
| 1160 | |
| 1161 | return 0; |
| 1162 | } |
| 1163 | |
| 1164 | static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) |
| 1165 | { |
| 1166 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1167 | uint32_t soft_register_value = 0; |
| 1168 | uint32_t handshake_disables_offset = data->soft_regs_start |
| 1169 | + smum_get_offsetof(hwmgr, |
| 1170 | type: SMU_SoftRegisters, member: HandshakeDisables); |
| 1171 | |
| 1172 | soft_register_value = cgs_read_ind_register(hwmgr->device, |
| 1173 | CGS_IND_REG__SMC, handshake_disables_offset); |
| 1174 | soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; |
| 1175 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1176 | handshake_disables_offset, soft_register_value); |
| 1177 | return 0; |
| 1178 | } |
| 1179 | |
| 1180 | static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) |
| 1181 | { |
| 1182 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1183 | uint32_t soft_register_value = 0; |
| 1184 | uint32_t handshake_disables_offset = data->soft_regs_start |
| 1185 | + smum_get_offsetof(hwmgr, |
| 1186 | type: SMU_SoftRegisters, member: HandshakeDisables); |
| 1187 | |
| 1188 | soft_register_value = cgs_read_ind_register(hwmgr->device, |
| 1189 | CGS_IND_REG__SMC, handshake_disables_offset); |
| 1190 | soft_register_value |= smum_get_mac_definition(hwmgr, |
| 1191 | value: SMU_UVD_MCLK_HANDSHAKE_DISABLE); |
| 1192 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1193 | handshake_disables_offset, soft_register_value); |
| 1194 | return 0; |
| 1195 | } |
| 1196 | |
| 1197 | static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 1198 | { |
| 1199 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1200 | |
| 1201 | /* enable SCLK dpm */ |
| 1202 | if (!data->sclk_dpm_key_disabled) { |
| 1203 | if (hwmgr->chip_id >= CHIP_POLARIS10 && |
| 1204 | hwmgr->chip_id <= CHIP_VEGAM) |
| 1205 | smu7_disable_sclk_vce_handshake(hwmgr); |
| 1206 | |
| 1207 | PP_ASSERT_WITH_CODE( |
| 1208 | (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)), |
| 1209 | "Failed to enable SCLK DPM during DPM Start Function!" , |
| 1210 | return -EINVAL); |
| 1211 | } |
| 1212 | |
| 1213 | /* enable MCLK dpm */ |
| 1214 | if (0 == data->mclk_dpm_key_disabled) { |
| 1215 | if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) |
| 1216 | smu7_disable_handshake_uvd(hwmgr); |
| 1217 | |
| 1218 | PP_ASSERT_WITH_CODE( |
| 1219 | (0 == smum_send_msg_to_smc(hwmgr, |
| 1220 | PPSMC_MSG_MCLKDPM_Enable, |
| 1221 | NULL)), |
| 1222 | "Failed to enable MCLK DPM during DPM Start Function!" , |
| 1223 | return -EINVAL); |
| 1224 | |
| 1225 | if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) || |
| 1226 | (hwmgr->chip_id == CHIP_POLARIS10) || |
| 1227 | (hwmgr->chip_id == CHIP_POLARIS11) || |
| 1228 | (hwmgr->chip_id == CHIP_POLARIS12) || |
| 1229 | (hwmgr->chip_id == CHIP_TONGA) || |
| 1230 | (hwmgr->chip_id == CHIP_TOPAZ)) |
| 1231 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); |
| 1232 | |
| 1233 | |
| 1234 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
| 1235 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); |
| 1236 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); |
| 1237 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); |
| 1238 | udelay(usec: 10); |
| 1239 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); |
| 1240 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); |
| 1241 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); |
| 1242 | } else { |
| 1243 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); |
| 1244 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); |
| 1245 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); |
| 1246 | udelay(usec: 10); |
| 1247 | if (hwmgr->chip_id == CHIP_VEGAM) { |
| 1248 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); |
| 1249 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); |
| 1250 | } else { |
| 1251 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); |
| 1252 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); |
| 1253 | } |
| 1254 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); |
| 1255 | } |
| 1256 | } |
| 1257 | |
| 1258 | return 0; |
| 1259 | } |
| 1260 | |
| 1261 | static int smu7_start_dpm(struct pp_hwmgr *hwmgr) |
| 1262 | { |
| 1263 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1264 | |
| 1265 | /*enable general power management */ |
| 1266 | |
| 1267 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1268 | GLOBAL_PWRMGT_EN, 1); |
| 1269 | |
| 1270 | /* enable sclk deep sleep */ |
| 1271 | |
| 1272 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, |
| 1273 | DYNAMIC_PM_EN, 1); |
| 1274 | |
| 1275 | /* prepare for PCIE DPM */ |
| 1276 | |
| 1277 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1278 | data->soft_regs_start + |
| 1279 | smum_get_offsetof(hwmgr, SMU_SoftRegisters, |
| 1280 | VoltageChangeTimeout), 0x1000); |
| 1281 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, |
| 1282 | SWRST_COMMAND_1, RESETLC, 0x0); |
| 1283 | |
| 1284 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) |
| 1285 | cgs_write_register(hwmgr->device, 0x1488, |
| 1286 | (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); |
| 1287 | |
| 1288 | if (smu7_enable_sclk_mclk_dpm(hwmgr)) { |
| 1289 | pr_err("Failed to enable Sclk DPM and Mclk DPM!" ); |
| 1290 | return -EINVAL; |
| 1291 | } |
| 1292 | |
| 1293 | /* enable PCIE dpm */ |
| 1294 | if (0 == data->pcie_dpm_key_disabled) { |
| 1295 | PP_ASSERT_WITH_CODE( |
| 1296 | (0 == smum_send_msg_to_smc(hwmgr, |
| 1297 | PPSMC_MSG_PCIeDPM_Enable, |
| 1298 | NULL)), |
| 1299 | "Failed to enable pcie DPM during DPM Start Function!" , |
| 1300 | return -EINVAL); |
| 1301 | } else { |
| 1302 | PP_ASSERT_WITH_CODE( |
| 1303 | (0 == smum_send_msg_to_smc(hwmgr, |
| 1304 | PPSMC_MSG_PCIeDPM_Disable, |
| 1305 | NULL)), |
| 1306 | "Failed to disable pcie DPM during DPM Start Function!" , |
| 1307 | return -EINVAL); |
| 1308 | } |
| 1309 | |
| 1310 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1311 | c: PHM_PlatformCaps_Falcon_QuickTransition)) { |
| 1312 | PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, |
| 1313 | PPSMC_MSG_EnableACDCGPIOInterrupt, |
| 1314 | NULL)), |
| 1315 | "Failed to enable AC DC GPIO Interrupt!" , |
| 1316 | ); |
| 1317 | } |
| 1318 | |
| 1319 | return 0; |
| 1320 | } |
| 1321 | |
| 1322 | static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 1323 | { |
| 1324 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1325 | |
| 1326 | /* disable SCLK dpm */ |
| 1327 | if (!data->sclk_dpm_key_disabled) { |
| 1328 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 1329 | "Trying to disable SCLK DPM when DPM is disabled" , |
| 1330 | return 0); |
| 1331 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL); |
| 1332 | } |
| 1333 | |
| 1334 | /* disable MCLK dpm */ |
| 1335 | if (!data->mclk_dpm_key_disabled) { |
| 1336 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 1337 | "Trying to disable MCLK DPM when DPM is disabled" , |
| 1338 | return 0); |
| 1339 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL); |
| 1340 | } |
| 1341 | |
| 1342 | return 0; |
| 1343 | } |
| 1344 | |
| 1345 | static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) |
| 1346 | { |
| 1347 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1348 | |
| 1349 | /* disable general power management */ |
| 1350 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1351 | GLOBAL_PWRMGT_EN, 0); |
| 1352 | /* disable sclk deep sleep */ |
| 1353 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, |
| 1354 | DYNAMIC_PM_EN, 0); |
| 1355 | |
| 1356 | /* disable PCIE dpm */ |
| 1357 | if (!data->pcie_dpm_key_disabled) { |
| 1358 | PP_ASSERT_WITH_CODE( |
| 1359 | (smum_send_msg_to_smc(hwmgr, |
| 1360 | PPSMC_MSG_PCIeDPM_Disable, |
| 1361 | NULL) == 0), |
| 1362 | "Failed to disable pcie DPM during DPM Stop Function!" , |
| 1363 | return -EINVAL); |
| 1364 | } |
| 1365 | |
| 1366 | smu7_disable_sclk_mclk_dpm(hwmgr); |
| 1367 | |
| 1368 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 1369 | "Trying to disable voltage DPM when DPM is disabled" , |
| 1370 | return 0); |
| 1371 | |
| 1372 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL); |
| 1373 | |
| 1374 | return 0; |
| 1375 | } |
| 1376 | |
| 1377 | static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) |
| 1378 | { |
| 1379 | bool protection; |
| 1380 | enum DPM_EVENT_SRC src; |
| 1381 | |
| 1382 | switch (sources) { |
| 1383 | default: |
| 1384 | pr_err("Unknown throttling event sources." ); |
| 1385 | fallthrough; |
| 1386 | case 0: |
| 1387 | protection = false; |
| 1388 | /* src is unused */ |
| 1389 | break; |
| 1390 | case (1 << PHM_AutoThrottleSource_Thermal): |
| 1391 | protection = true; |
| 1392 | src = DPM_EVENT_SRC_DIGITAL; |
| 1393 | break; |
| 1394 | case (1 << PHM_AutoThrottleSource_External): |
| 1395 | protection = true; |
| 1396 | src = DPM_EVENT_SRC_EXTERNAL; |
| 1397 | break; |
| 1398 | case (1 << PHM_AutoThrottleSource_External) | |
| 1399 | (1 << PHM_AutoThrottleSource_Thermal): |
| 1400 | protection = true; |
| 1401 | src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; |
| 1402 | break; |
| 1403 | } |
| 1404 | /* Order matters - don't enable thermal protection for the wrong source. */ |
| 1405 | if (protection) { |
| 1406 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, |
| 1407 | DPM_EVENT_SRC, src); |
| 1408 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1409 | THERMAL_PROTECTION_DIS, |
| 1410 | !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1411 | PHM_PlatformCaps_ThermalController)); |
| 1412 | } else |
| 1413 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1414 | THERMAL_PROTECTION_DIS, 1); |
| 1415 | } |
| 1416 | |
| 1417 | static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, |
| 1418 | PHM_AutoThrottleSource source) |
| 1419 | { |
| 1420 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1421 | |
| 1422 | if (!(data->active_auto_throttle_sources & (1 << source))) { |
| 1423 | data->active_auto_throttle_sources |= 1 << source; |
| 1424 | smu7_set_dpm_event_sources(hwmgr, sources: data->active_auto_throttle_sources); |
| 1425 | } |
| 1426 | return 0; |
| 1427 | } |
| 1428 | |
| 1429 | static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) |
| 1430 | { |
| 1431 | return smu7_enable_auto_throttle_source(hwmgr, source: PHM_AutoThrottleSource_Thermal); |
| 1432 | } |
| 1433 | |
| 1434 | static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, |
| 1435 | PHM_AutoThrottleSource source) |
| 1436 | { |
| 1437 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1438 | |
| 1439 | if (data->active_auto_throttle_sources & (1 << source)) { |
| 1440 | data->active_auto_throttle_sources &= ~(1 << source); |
| 1441 | smu7_set_dpm_event_sources(hwmgr, sources: data->active_auto_throttle_sources); |
| 1442 | } |
| 1443 | return 0; |
| 1444 | } |
| 1445 | |
| 1446 | static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) |
| 1447 | { |
| 1448 | return smu7_disable_auto_throttle_source(hwmgr, source: PHM_AutoThrottleSource_Thermal); |
| 1449 | } |
| 1450 | |
| 1451 | static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) |
| 1452 | { |
| 1453 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1454 | data->pcie_performance_request = true; |
| 1455 | |
| 1456 | return 0; |
| 1457 | } |
| 1458 | |
| 1459 | static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr, |
| 1460 | uint32_t *cac_config_regs, |
| 1461 | AtomCtrl_EDCLeakgeTable *edc_leakage_table) |
| 1462 | { |
| 1463 | uint32_t data, i = 0; |
| 1464 | |
| 1465 | while (cac_config_regs[i] != 0xFFFFFFFF) { |
| 1466 | data = edc_leakage_table->DIDT_REG[i]; |
| 1467 | cgs_write_ind_register(hwmgr->device, |
| 1468 | CGS_IND_REG__DIDT, |
| 1469 | cac_config_regs[i], |
| 1470 | data); |
| 1471 | i++; |
| 1472 | } |
| 1473 | |
| 1474 | return 0; |
| 1475 | } |
| 1476 | |
| 1477 | static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr) |
| 1478 | { |
| 1479 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1480 | int ret = 0; |
| 1481 | |
| 1482 | if (!data->disable_edc_leakage_controller && |
| 1483 | data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset && |
| 1484 | data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) { |
| 1485 | ret = smu7_program_edc_didt_registers(hwmgr, |
| 1486 | cac_config_regs: DIDTEDCConfig_P12, |
| 1487 | edc_leakage_table: &data->edc_leakage_table); |
| 1488 | if (ret) |
| 1489 | return ret; |
| 1490 | |
| 1491 | ret = smum_send_msg_to_smc(hwmgr, |
| 1492 | msg: (PPSMC_Msg)PPSMC_MSG_EnableEDCController, |
| 1493 | NULL); |
| 1494 | } else { |
| 1495 | ret = smum_send_msg_to_smc(hwmgr, |
| 1496 | msg: (PPSMC_Msg)PPSMC_MSG_DisableEDCController, |
| 1497 | NULL); |
| 1498 | } |
| 1499 | |
| 1500 | return ret; |
| 1501 | } |
| 1502 | |
| 1503 | static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) |
| 1504 | { |
| 1505 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1506 | struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; |
| 1507 | int32_t tmp_sclk, count, percentage; |
| 1508 | |
| 1509 | if (golden_dpm_table->mclk_table.count == 1) { |
| 1510 | percentage = 70; |
| 1511 | hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value; |
| 1512 | } else { |
| 1513 | percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / |
| 1514 | golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; |
| 1515 | hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; |
| 1516 | } |
| 1517 | |
| 1518 | tmp_sclk = hwmgr->pstate_mclk * percentage / 100; |
| 1519 | |
| 1520 | if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 1521 | struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk = |
| 1522 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
| 1523 | |
| 1524 | for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) { |
| 1525 | if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) { |
| 1526 | hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk; |
| 1527 | break; |
| 1528 | } |
| 1529 | } |
| 1530 | if (count < 0) |
| 1531 | hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk; |
| 1532 | |
| 1533 | hwmgr->pstate_sclk_peak = |
| 1534 | vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk; |
| 1535 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 1536 | struct phm_ppt_v1_information *table_info = |
| 1537 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1538 | struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk = |
| 1539 | table_info->vdd_dep_on_sclk; |
| 1540 | |
| 1541 | for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) { |
| 1542 | if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) { |
| 1543 | hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk; |
| 1544 | break; |
| 1545 | } |
| 1546 | } |
| 1547 | if (count < 0) |
| 1548 | hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk; |
| 1549 | |
| 1550 | hwmgr->pstate_sclk_peak = |
| 1551 | vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk; |
| 1552 | } |
| 1553 | |
| 1554 | hwmgr->pstate_mclk_peak = |
| 1555 | golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; |
| 1556 | |
| 1557 | /* make sure the output is in Mhz */ |
| 1558 | hwmgr->pstate_sclk /= 100; |
| 1559 | hwmgr->pstate_mclk /= 100; |
| 1560 | hwmgr->pstate_sclk_peak /= 100; |
| 1561 | hwmgr->pstate_mclk_peak /= 100; |
| 1562 | } |
| 1563 | |
| 1564 | static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
| 1565 | { |
| 1566 | int tmp_result = 0; |
| 1567 | int result = 0; |
| 1568 | |
| 1569 | if (smu7_voltage_control(hwmgr)) { |
| 1570 | tmp_result = smu7_enable_voltage_control(hwmgr); |
| 1571 | PP_ASSERT_WITH_CODE(tmp_result == 0, |
| 1572 | "Failed to enable voltage control!" , |
| 1573 | result = tmp_result); |
| 1574 | |
| 1575 | tmp_result = smu7_construct_voltage_tables(hwmgr); |
| 1576 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1577 | "Failed to construct voltage tables!" , |
| 1578 | result = tmp_result); |
| 1579 | } |
| 1580 | smum_initialize_mc_reg_table(hwmgr); |
| 1581 | |
| 1582 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1583 | c: PHM_PlatformCaps_EngineSpreadSpectrumSupport)) |
| 1584 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1585 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); |
| 1586 | |
| 1587 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1588 | c: PHM_PlatformCaps_ThermalController)) |
| 1589 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1590 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); |
| 1591 | |
| 1592 | tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); |
| 1593 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1594 | "Failed to program static screen threshold parameters!" , |
| 1595 | result = tmp_result); |
| 1596 | |
| 1597 | tmp_result = smu7_enable_display_gap(hwmgr); |
| 1598 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1599 | "Failed to enable display gap!" , result = tmp_result); |
| 1600 | |
| 1601 | tmp_result = smu7_program_voting_clients(hwmgr); |
| 1602 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1603 | "Failed to program voting clients!" , result = tmp_result); |
| 1604 | |
| 1605 | tmp_result = smum_process_firmware_header(hwmgr); |
| 1606 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1607 | "Failed to process firmware header!" , result = tmp_result); |
| 1608 | |
| 1609 | if (hwmgr->chip_id != CHIP_VEGAM) { |
| 1610 | tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); |
| 1611 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1612 | "Failed to initialize switch from ArbF0 to F1!" , |
| 1613 | result = tmp_result); |
| 1614 | } |
| 1615 | |
| 1616 | result = smu7_setup_default_dpm_tables(hwmgr); |
| 1617 | PP_ASSERT_WITH_CODE(0 == result, |
| 1618 | "Failed to setup default DPM tables!" , return result); |
| 1619 | |
| 1620 | tmp_result = smum_init_smc_table(hwmgr); |
| 1621 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1622 | "Failed to initialize SMC table!" , result = tmp_result); |
| 1623 | |
| 1624 | tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); |
| 1625 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1626 | "Failed to enable VR hot GPIO interrupt!" , result = tmp_result); |
| 1627 | |
| 1628 | if (hwmgr->chip_id >= CHIP_POLARIS10 && |
| 1629 | hwmgr->chip_id <= CHIP_VEGAM) { |
| 1630 | tmp_result = smu7_notify_has_display(hwmgr); |
| 1631 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1632 | "Failed to enable display setting!" , result = tmp_result); |
| 1633 | } else { |
| 1634 | smum_send_msg_to_smc(hwmgr, msg: (PPSMC_Msg)PPSMC_NoDisplay, NULL); |
| 1635 | } |
| 1636 | |
| 1637 | if (hwmgr->chip_id >= CHIP_POLARIS10 && |
| 1638 | hwmgr->chip_id <= CHIP_VEGAM) { |
| 1639 | tmp_result = smu7_populate_edc_leakage_registers(hwmgr); |
| 1640 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1641 | "Failed to populate edc leakage registers!" , result = tmp_result); |
| 1642 | } |
| 1643 | |
| 1644 | tmp_result = smu7_enable_sclk_control(hwmgr); |
| 1645 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1646 | "Failed to enable SCLK control!" , result = tmp_result); |
| 1647 | |
| 1648 | tmp_result = smu7_enable_smc_voltage_controller(hwmgr); |
| 1649 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1650 | "Failed to enable voltage control!" , result = tmp_result); |
| 1651 | |
| 1652 | tmp_result = smu7_enable_ulv(hwmgr); |
| 1653 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1654 | "Failed to enable ULV!" , result = tmp_result); |
| 1655 | |
| 1656 | tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); |
| 1657 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1658 | "Failed to enable deep sleep master switch!" , result = tmp_result); |
| 1659 | |
| 1660 | tmp_result = smu7_enable_didt_config(hwmgr); |
| 1661 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1662 | "Failed to enable deep sleep master switch!" , result = tmp_result); |
| 1663 | |
| 1664 | tmp_result = smu7_start_dpm(hwmgr); |
| 1665 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1666 | "Failed to start DPM!" , result = tmp_result); |
| 1667 | |
| 1668 | tmp_result = smu7_enable_smc_cac(hwmgr); |
| 1669 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1670 | "Failed to enable SMC CAC!" , result = tmp_result); |
| 1671 | |
| 1672 | tmp_result = smu7_enable_power_containment(hwmgr); |
| 1673 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1674 | "Failed to enable power containment!" , result = tmp_result); |
| 1675 | |
| 1676 | tmp_result = smu7_power_control_set_level(hwmgr); |
| 1677 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1678 | "Failed to power control set level!" , result = tmp_result); |
| 1679 | |
| 1680 | tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); |
| 1681 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1682 | "Failed to enable thermal auto throttle!" , result = tmp_result); |
| 1683 | |
| 1684 | tmp_result = smu7_pcie_performance_request(hwmgr); |
| 1685 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1686 | "pcie performance request failed!" , result = tmp_result); |
| 1687 | |
| 1688 | smu7_populate_umdpstate_clocks(hwmgr); |
| 1689 | |
| 1690 | return 0; |
| 1691 | } |
| 1692 | |
| 1693 | static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) |
| 1694 | { |
| 1695 | if (!hwmgr->avfs_supported) |
| 1696 | return 0; |
| 1697 | |
| 1698 | if (enable) { |
| 1699 | if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 1700 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { |
| 1701 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( |
| 1702 | hwmgr, PPSMC_MSG_EnableAvfs, NULL), |
| 1703 | "Failed to enable AVFS!" , |
| 1704 | return -EINVAL); |
| 1705 | } |
| 1706 | } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 1707 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { |
| 1708 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( |
| 1709 | hwmgr, PPSMC_MSG_DisableAvfs, NULL), |
| 1710 | "Failed to disable AVFS!" , |
| 1711 | return -EINVAL); |
| 1712 | } |
| 1713 | |
| 1714 | return 0; |
| 1715 | } |
| 1716 | |
| 1717 | static int smu7_update_avfs(struct pp_hwmgr *hwmgr) |
| 1718 | { |
| 1719 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1720 | |
| 1721 | if (!hwmgr->avfs_supported) |
| 1722 | return 0; |
| 1723 | |
| 1724 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
| 1725 | smu7_avfs_control(hwmgr, enable: false); |
| 1726 | } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { |
| 1727 | smu7_avfs_control(hwmgr, enable: false); |
| 1728 | smu7_avfs_control(hwmgr, enable: true); |
| 1729 | } else { |
| 1730 | smu7_avfs_control(hwmgr, enable: true); |
| 1731 | } |
| 1732 | |
| 1733 | return 0; |
| 1734 | } |
| 1735 | |
| 1736 | static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) |
| 1737 | { |
| 1738 | int tmp_result, result = 0; |
| 1739 | |
| 1740 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1741 | c: PHM_PlatformCaps_ThermalController)) |
| 1742 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1743 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); |
| 1744 | |
| 1745 | tmp_result = smu7_disable_power_containment(hwmgr); |
| 1746 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1747 | "Failed to disable power containment!" , result = tmp_result); |
| 1748 | |
| 1749 | tmp_result = smu7_disable_smc_cac(hwmgr); |
| 1750 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1751 | "Failed to disable SMC CAC!" , result = tmp_result); |
| 1752 | |
| 1753 | tmp_result = smu7_disable_didt_config(hwmgr); |
| 1754 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1755 | "Failed to disable DIDT!" , result = tmp_result); |
| 1756 | |
| 1757 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1758 | CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); |
| 1759 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1760 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); |
| 1761 | |
| 1762 | tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); |
| 1763 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1764 | "Failed to disable thermal auto throttle!" , result = tmp_result); |
| 1765 | |
| 1766 | tmp_result = smu7_avfs_control(hwmgr, enable: false); |
| 1767 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1768 | "Failed to disable AVFS!" , result = tmp_result); |
| 1769 | |
| 1770 | tmp_result = smu7_stop_dpm(hwmgr); |
| 1771 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1772 | "Failed to stop DPM!" , result = tmp_result); |
| 1773 | |
| 1774 | tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); |
| 1775 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1776 | "Failed to disable deep sleep master switch!" , result = tmp_result); |
| 1777 | |
| 1778 | tmp_result = smu7_disable_ulv(hwmgr); |
| 1779 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1780 | "Failed to disable ULV!" , result = tmp_result); |
| 1781 | |
| 1782 | tmp_result = smu7_clear_voting_clients(hwmgr); |
| 1783 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1784 | "Failed to clear voting clients!" , result = tmp_result); |
| 1785 | |
| 1786 | tmp_result = smu7_reset_to_default(hwmgr); |
| 1787 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1788 | "Failed to reset to default!" , result = tmp_result); |
| 1789 | |
| 1790 | tmp_result = smum_stop_smc(hwmgr); |
| 1791 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1792 | "Failed to stop smc!" , result = tmp_result); |
| 1793 | |
| 1794 | tmp_result = smu7_force_switch_to_arbf0(hwmgr); |
| 1795 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1796 | "Failed to force to switch arbf0!" , result = tmp_result); |
| 1797 | |
| 1798 | return result; |
| 1799 | } |
| 1800 | |
| 1801 | static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) |
| 1802 | { |
| 1803 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1804 | struct phm_ppt_v1_information *table_info = |
| 1805 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1806 | struct amdgpu_device *adev = hwmgr->adev; |
| 1807 | uint8_t tmp1, tmp2; |
| 1808 | uint16_t tmp3 = 0; |
| 1809 | |
| 1810 | data->dll_default_on = false; |
| 1811 | data->mclk_dpm0_activity_target = 0xa; |
| 1812 | data->vddc_vddgfx_delta = 300; |
| 1813 | data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; |
| 1814 | data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; |
| 1815 | data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; |
| 1816 | data->voting_rights_clients[1] = SMU7_VOTINGRIGHTSCLIENTS_DFLT1; |
| 1817 | data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; |
| 1818 | data->voting_rights_clients[3] = SMU7_VOTINGRIGHTSCLIENTS_DFLT3; |
| 1819 | data->voting_rights_clients[4] = SMU7_VOTINGRIGHTSCLIENTS_DFLT4; |
| 1820 | data->voting_rights_clients[5] = SMU7_VOTINGRIGHTSCLIENTS_DFLT5; |
| 1821 | data->voting_rights_clients[6] = SMU7_VOTINGRIGHTSCLIENTS_DFLT6; |
| 1822 | data->voting_rights_clients[7] = SMU7_VOTINGRIGHTSCLIENTS_DFLT7; |
| 1823 | |
| 1824 | data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; |
| 1825 | data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; |
| 1826 | data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); |
| 1827 | /* need to set voltage control types before EVV patching */ |
| 1828 | data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; |
| 1829 | data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; |
| 1830 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; |
| 1831 | data->enable_tdc_limit_feature = true; |
| 1832 | data->enable_pkg_pwr_tracking_feature = true; |
| 1833 | data->force_pcie_gen = PP_PCIEGenInvalid; |
| 1834 | data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; |
| 1835 | data->current_profile_setting.bupdate_sclk = 1; |
| 1836 | data->current_profile_setting.sclk_up_hyst = 0; |
| 1837 | data->current_profile_setting.sclk_down_hyst = 100; |
| 1838 | data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; |
| 1839 | data->current_profile_setting.bupdate_mclk = 1; |
| 1840 | if (hwmgr->chip_id >= CHIP_POLARIS10) { |
| 1841 | if (adev->gmc.vram_width == 256) { |
| 1842 | data->current_profile_setting.mclk_up_hyst = 10; |
| 1843 | data->current_profile_setting.mclk_down_hyst = 60; |
| 1844 | data->current_profile_setting.mclk_activity = 25; |
| 1845 | } else if (adev->gmc.vram_width == 128) { |
| 1846 | data->current_profile_setting.mclk_up_hyst = 5; |
| 1847 | data->current_profile_setting.mclk_down_hyst = 16; |
| 1848 | data->current_profile_setting.mclk_activity = 20; |
| 1849 | } else if (adev->gmc.vram_width == 64) { |
| 1850 | data->current_profile_setting.mclk_up_hyst = 3; |
| 1851 | data->current_profile_setting.mclk_down_hyst = 16; |
| 1852 | data->current_profile_setting.mclk_activity = 20; |
| 1853 | } |
| 1854 | } else { |
| 1855 | data->current_profile_setting.mclk_up_hyst = 0; |
| 1856 | data->current_profile_setting.mclk_down_hyst = 100; |
| 1857 | data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; |
| 1858 | } |
| 1859 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; |
| 1860 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; |
| 1861 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; |
| 1862 | |
| 1863 | if (hwmgr->chip_id == CHIP_HAWAII) { |
| 1864 | data->thermal_temp_setting.temperature_low = 94500; |
| 1865 | data->thermal_temp_setting.temperature_high = 95000; |
| 1866 | data->thermal_temp_setting.temperature_shutdown = 104000; |
| 1867 | } else { |
| 1868 | data->thermal_temp_setting.temperature_low = 99500; |
| 1869 | data->thermal_temp_setting.temperature_high = 100000; |
| 1870 | data->thermal_temp_setting.temperature_shutdown = 104000; |
| 1871 | } |
| 1872 | |
| 1873 | data->fast_watermark_threshold = 100; |
| 1874 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1875 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
| 1876 | data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
| 1877 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1878 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) |
| 1879 | data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; |
| 1880 | |
| 1881 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1882 | c: PHM_PlatformCaps_ControlVDDGFX)) { |
| 1883 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1884 | VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { |
| 1885 | data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
| 1886 | } |
| 1887 | } |
| 1888 | |
| 1889 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1890 | c: PHM_PlatformCaps_EnableMVDDControl)) { |
| 1891 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1892 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) |
| 1893 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; |
| 1894 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1895 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) |
| 1896 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
| 1897 | } |
| 1898 | |
| 1899 | if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) |
| 1900 | phm_cap_unset(caps: hwmgr->platform_descriptor.platformCaps, |
| 1901 | c: PHM_PlatformCaps_ControlVDDGFX); |
| 1902 | |
| 1903 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 1904 | c: PHM_PlatformCaps_ControlVDDCI)) { |
| 1905 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1906 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) |
| 1907 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; |
| 1908 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1909 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) |
| 1910 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
| 1911 | } |
| 1912 | |
| 1913 | if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) |
| 1914 | phm_cap_unset(caps: hwmgr->platform_descriptor.platformCaps, |
| 1915 | c: PHM_PlatformCaps_EnableMVDDControl); |
| 1916 | |
| 1917 | if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) |
| 1918 | phm_cap_unset(caps: hwmgr->platform_descriptor.platformCaps, |
| 1919 | c: PHM_PlatformCaps_ControlVDDCI); |
| 1920 | |
| 1921 | data->vddc_phase_shed_control = 1; |
| 1922 | if ((hwmgr->chip_id == CHIP_POLARIS12) || |
| 1923 | ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) || |
| 1924 | ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || |
| 1925 | ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) || |
| 1926 | ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { |
| 1927 | if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 1928 | atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, svd_gpio_id: &tmp1, svc_gpio_id: &tmp2, |
| 1929 | load_line: &tmp3); |
| 1930 | tmp3 = (tmp3 >> 5) & 0x3; |
| 1931 | data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; |
| 1932 | } |
| 1933 | } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
| 1934 | data->vddc_phase_shed_control = 1; |
| 1935 | } |
| 1936 | |
| 1937 | if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK) |
| 1938 | && (table_info->cac_dtp_table->usClockStretchAmount != 0)) |
| 1939 | phm_cap_set(caps: hwmgr->platform_descriptor.platformCaps, |
| 1940 | c: PHM_PlatformCaps_ClockStretcher); |
| 1941 | |
| 1942 | data->pcie_gen_performance.max = PP_PCIEGen1; |
| 1943 | data->pcie_gen_performance.min = PP_PCIEGen3; |
| 1944 | data->pcie_gen_power_saving.max = PP_PCIEGen1; |
| 1945 | data->pcie_gen_power_saving.min = PP_PCIEGen3; |
| 1946 | data->pcie_lane_performance.max = 0; |
| 1947 | data->pcie_lane_performance.min = 16; |
| 1948 | data->pcie_lane_power_saving.max = 0; |
| 1949 | data->pcie_lane_power_saving.min = 16; |
| 1950 | |
| 1951 | |
| 1952 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD) |
| 1953 | phm_cap_set(caps: hwmgr->platform_descriptor.platformCaps, |
| 1954 | c: PHM_PlatformCaps_UVDPowerGating); |
| 1955 | if (adev->pg_flags & AMD_PG_SUPPORT_VCE) |
| 1956 | phm_cap_set(caps: hwmgr->platform_descriptor.platformCaps, |
| 1957 | c: PHM_PlatformCaps_VCEPowerGating); |
| 1958 | |
| 1959 | data->disable_edc_leakage_controller = true; |
| 1960 | if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) || |
| 1961 | ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) || |
| 1962 | (adev->asic_type == CHIP_POLARIS12) || |
| 1963 | (adev->asic_type == CHIP_VEGAM)) |
| 1964 | data->disable_edc_leakage_controller = false; |
| 1965 | |
| 1966 | if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) { |
| 1967 | phm_cap_unset(caps: hwmgr->platform_descriptor.platformCaps, |
| 1968 | c: PHM_PlatformCaps_MemorySpreadSpectrumSupport); |
| 1969 | phm_cap_unset(caps: hwmgr->platform_descriptor.platformCaps, |
| 1970 | c: PHM_PlatformCaps_EngineSpreadSpectrumSupport); |
| 1971 | } |
| 1972 | |
| 1973 | if ((adev->pdev->device == 0x699F) && |
| 1974 | (adev->pdev->revision == 0xCF)) { |
| 1975 | phm_cap_unset(caps: hwmgr->platform_descriptor.platformCaps, |
| 1976 | c: PHM_PlatformCaps_PowerContainment); |
| 1977 | data->enable_tdc_limit_feature = false; |
| 1978 | data->enable_pkg_pwr_tracking_feature = false; |
| 1979 | data->disable_edc_leakage_controller = true; |
| 1980 | phm_cap_unset(caps: hwmgr->platform_descriptor.platformCaps, |
| 1981 | c: PHM_PlatformCaps_ClockStretcher); |
| 1982 | } |
| 1983 | } |
| 1984 | |
| 1985 | static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr) |
| 1986 | { |
| 1987 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1988 | struct amdgpu_device *adev = hwmgr->adev; |
| 1989 | uint32_t asicrev1, evv_revision, max = 0, min = 0; |
| 1990 | |
| 1991 | atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB, |
| 1992 | efuse: &evv_revision); |
| 1993 | |
| 1994 | atomctrl_read_efuse(hwmgr, start_index: 568, end_index: 579, efuse: &asicrev1); |
| 1995 | |
| 1996 | if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) || |
| 1997 | ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) { |
| 1998 | min = 1200; |
| 1999 | max = 2500; |
| 2000 | } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || |
| 2001 | ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { |
| 2002 | min = 900; |
| 2003 | max = 2100; |
| 2004 | } else if (hwmgr->chip_id == CHIP_POLARIS10) { |
| 2005 | if (adev->pdev->subsystem_vendor == 0x106B) { |
| 2006 | min = 1000; |
| 2007 | max = 2300; |
| 2008 | } else { |
| 2009 | if (evv_revision == 0) { |
| 2010 | min = 1000; |
| 2011 | max = 2300; |
| 2012 | } else if (evv_revision == 1) { |
| 2013 | if (asicrev1 == 326) { |
| 2014 | min = 1200; |
| 2015 | max = 2500; |
| 2016 | /* TODO: PATCH RO in VBIOS */ |
| 2017 | } else { |
| 2018 | min = 1200; |
| 2019 | max = 2000; |
| 2020 | } |
| 2021 | } else if (evv_revision == 2) { |
| 2022 | min = 1200; |
| 2023 | max = 2500; |
| 2024 | } |
| 2025 | } |
| 2026 | } else { |
| 2027 | min = 1100; |
| 2028 | max = 2100; |
| 2029 | } |
| 2030 | |
| 2031 | data->ro_range_minimum = min; |
| 2032 | data->ro_range_maximum = max; |
| 2033 | |
| 2034 | /* TODO: PATCH RO in VBIOS here */ |
| 2035 | |
| 2036 | return 0; |
| 2037 | } |
| 2038 | |
| 2039 | /** |
| 2040 | * smu7_get_evv_voltages - Get Leakage VDDC based on leakage ID. |
| 2041 | * |
| 2042 | * @hwmgr: the address of the powerplay hardware manager. |
| 2043 | * Return: always 0 |
| 2044 | */ |
| 2045 | static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) |
| 2046 | { |
| 2047 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2048 | uint16_t vv_id; |
| 2049 | uint16_t vddc = 0; |
| 2050 | uint16_t vddgfx = 0; |
| 2051 | uint16_t i, j; |
| 2052 | uint32_t sclk = 0; |
| 2053 | struct phm_ppt_v1_information *table_info = |
| 2054 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 2055 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; |
| 2056 | |
| 2057 | if (hwmgr->chip_id == CHIP_POLARIS10 || |
| 2058 | hwmgr->chip_id == CHIP_POLARIS11 || |
| 2059 | hwmgr->chip_id == CHIP_POLARIS12) |
| 2060 | smu7_calculate_ro_range(hwmgr); |
| 2061 | |
| 2062 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { |
| 2063 | vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; |
| 2064 | |
| 2065 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 2066 | if ((hwmgr->pp_table_version == PP_TABLE_V1) |
| 2067 | && !phm_get_sclk_for_voltage_evv(hwmgr, |
| 2068 | lookup_table: table_info->vddgfx_lookup_table, virtual_voltage_id: vv_id, sclk: &sclk)) { |
| 2069 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 2070 | c: PHM_PlatformCaps_ClockStretcher)) { |
| 2071 | sclk_table = table_info->vdd_dep_on_sclk; |
| 2072 | |
| 2073 | for (j = 1; j < sclk_table->count; j++) { |
| 2074 | if (sclk_table->entries[j].clk == sclk && |
| 2075 | sclk_table->entries[j].cks_enable == 0) { |
| 2076 | sclk += 5000; |
| 2077 | break; |
| 2078 | } |
| 2079 | } |
| 2080 | } |
| 2081 | if (0 == atomctrl_get_voltage_evv_on_sclk |
| 2082 | (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, |
| 2083 | virtual_voltage_Id: vv_id, voltage: &vddgfx)) { |
| 2084 | /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ |
| 2085 | PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!" , return -EINVAL); |
| 2086 | |
| 2087 | /* the voltage should not be zero nor equal to leakage ID */ |
| 2088 | if (vddgfx != 0 && vddgfx != vv_id) { |
| 2089 | data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; |
| 2090 | data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; |
| 2091 | data->vddcgfx_leakage.count++; |
| 2092 | } |
| 2093 | } else { |
| 2094 | pr_info("Error retrieving EVV voltage value!\n" ); |
| 2095 | } |
| 2096 | } |
| 2097 | } else { |
| 2098 | if ((hwmgr->pp_table_version == PP_TABLE_V0) |
| 2099 | || !phm_get_sclk_for_voltage_evv(hwmgr, |
| 2100 | lookup_table: table_info->vddc_lookup_table, virtual_voltage_id: vv_id, sclk: &sclk)) { |
| 2101 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 2102 | c: PHM_PlatformCaps_ClockStretcher)) { |
| 2103 | if (table_info == NULL) |
| 2104 | return -EINVAL; |
| 2105 | sclk_table = table_info->vdd_dep_on_sclk; |
| 2106 | |
| 2107 | for (j = 1; j < sclk_table->count; j++) { |
| 2108 | if (sclk_table->entries[j].clk == sclk && |
| 2109 | sclk_table->entries[j].cks_enable == 0) { |
| 2110 | sclk += 5000; |
| 2111 | break; |
| 2112 | } |
| 2113 | } |
| 2114 | } |
| 2115 | |
| 2116 | if (phm_get_voltage_evv_on_sclk(hwmgr, |
| 2117 | VOLTAGE_TYPE_VDDC, |
| 2118 | sclk, id: vv_id, voltage: &vddc) == 0) { |
| 2119 | if (vddc >= 2000 || vddc == 0) |
| 2120 | return -EINVAL; |
| 2121 | } else { |
| 2122 | pr_debug("failed to retrieving EVV voltage!\n" ); |
| 2123 | continue; |
| 2124 | } |
| 2125 | |
| 2126 | /* the voltage should not be zero nor equal to leakage ID */ |
| 2127 | if (vddc != 0 && vddc != vv_id) { |
| 2128 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); |
| 2129 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; |
| 2130 | data->vddc_leakage.count++; |
| 2131 | } |
| 2132 | } |
| 2133 | } |
| 2134 | } |
| 2135 | |
| 2136 | return 0; |
| 2137 | } |
| 2138 | |
| 2139 | /** |
| 2140 | * smu7_patch_ppt_v1_with_vdd_leakage - Change virtual leakage voltage to actual value. |
| 2141 | * |
| 2142 | * @hwmgr: the address of the powerplay hardware manager. |
| 2143 | * @voltage: pointer to changing voltage |
| 2144 | * @leakage_table: pointer to leakage table |
| 2145 | */ |
| 2146 | static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, |
| 2147 | uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) |
| 2148 | { |
| 2149 | uint32_t index; |
| 2150 | |
| 2151 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ |
| 2152 | for (index = 0; index < leakage_table->count; index++) { |
| 2153 | /* if this voltage matches a leakage voltage ID */ |
| 2154 | /* patch with actual leakage voltage */ |
| 2155 | if (leakage_table->leakage_id[index] == *voltage) { |
| 2156 | *voltage = leakage_table->actual_voltage[index]; |
| 2157 | break; |
| 2158 | } |
| 2159 | } |
| 2160 | |
| 2161 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) |
| 2162 | pr_info("Voltage value looks like a Leakage ID but it's not patched\n" ); |
| 2163 | } |
| 2164 | |
| 2165 | /** |
| 2166 | * smu7_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages. |
| 2167 | * |
| 2168 | * @hwmgr: the address of the powerplay hardware manager. |
| 2169 | * @lookup_table: pointer to voltage lookup table |
| 2170 | * @leakage_table: pointer to leakage table |
| 2171 | * Return: always 0 |
| 2172 | */ |
| 2173 | static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, |
| 2174 | phm_ppt_v1_voltage_lookup_table *lookup_table, |
| 2175 | struct smu7_leakage_voltage *leakage_table) |
| 2176 | { |
| 2177 | uint32_t i; |
| 2178 | |
| 2179 | for (i = 0; i < lookup_table->count; i++) |
| 2180 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, |
| 2181 | voltage: &lookup_table->entries[i].us_vdd, leakage_table); |
| 2182 | |
| 2183 | return 0; |
| 2184 | } |
| 2185 | |
| 2186 | static int smu7_patch_clock_voltage_limits_with_vddc_leakage( |
| 2187 | struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, |
| 2188 | uint16_t *vddc) |
| 2189 | { |
| 2190 | struct phm_ppt_v1_information *table_info = |
| 2191 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2192 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, voltage: (uint16_t *)vddc, leakage_table); |
| 2193 | hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = |
| 2194 | table_info->max_clock_voltage_on_dc.vddc; |
| 2195 | return 0; |
| 2196 | } |
| 2197 | |
| 2198 | static int smu7_patch_voltage_dependency_tables_with_lookup_table( |
| 2199 | struct pp_hwmgr *hwmgr) |
| 2200 | { |
| 2201 | uint8_t entry_id; |
| 2202 | uint8_t voltage_id; |
| 2203 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2204 | struct phm_ppt_v1_information *table_info = |
| 2205 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2206 | |
| 2207 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = |
| 2208 | table_info->vdd_dep_on_sclk; |
| 2209 | struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = |
| 2210 | table_info->vdd_dep_on_mclk; |
| 2211 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = |
| 2212 | table_info->mm_dep_table; |
| 2213 | |
| 2214 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 2215 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { |
| 2216 | voltage_id = sclk_table->entries[entry_id].vddInd; |
| 2217 | sclk_table->entries[entry_id].vddgfx = |
| 2218 | table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; |
| 2219 | } |
| 2220 | } else { |
| 2221 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { |
| 2222 | voltage_id = sclk_table->entries[entry_id].vddInd; |
| 2223 | sclk_table->entries[entry_id].vddc = |
| 2224 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
| 2225 | } |
| 2226 | } |
| 2227 | |
| 2228 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { |
| 2229 | voltage_id = mclk_table->entries[entry_id].vddInd; |
| 2230 | mclk_table->entries[entry_id].vddc = |
| 2231 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
| 2232 | } |
| 2233 | |
| 2234 | for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { |
| 2235 | voltage_id = mm_table->entries[entry_id].vddcInd; |
| 2236 | mm_table->entries[entry_id].vddc = |
| 2237 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
| 2238 | } |
| 2239 | |
| 2240 | return 0; |
| 2241 | |
| 2242 | } |
| 2243 | |
| 2244 | static int phm_add_voltage(struct pp_hwmgr *hwmgr, |
| 2245 | phm_ppt_v1_voltage_lookup_table *look_up_table, |
| 2246 | phm_ppt_v1_voltage_lookup_record *record) |
| 2247 | { |
| 2248 | uint32_t i; |
| 2249 | |
| 2250 | PP_ASSERT_WITH_CODE((NULL != look_up_table), |
| 2251 | "Lookup Table empty." , return -EINVAL); |
| 2252 | PP_ASSERT_WITH_CODE((0 != look_up_table->count), |
| 2253 | "Lookup Table empty." , return -EINVAL); |
| 2254 | |
| 2255 | i = smum_get_mac_definition(hwmgr, value: SMU_MAX_LEVELS_VDDGFX); |
| 2256 | PP_ASSERT_WITH_CODE((i >= look_up_table->count), |
| 2257 | "Lookup Table is full." , return -EINVAL); |
| 2258 | |
| 2259 | /* This is to avoid entering duplicate calculated records. */ |
| 2260 | for (i = 0; i < look_up_table->count; i++) { |
| 2261 | if (look_up_table->entries[i].us_vdd == record->us_vdd) { |
| 2262 | if (look_up_table->entries[i].us_calculated == 1) |
| 2263 | return 0; |
| 2264 | break; |
| 2265 | } |
| 2266 | } |
| 2267 | |
| 2268 | look_up_table->entries[i].us_calculated = 1; |
| 2269 | look_up_table->entries[i].us_vdd = record->us_vdd; |
| 2270 | look_up_table->entries[i].us_cac_low = record->us_cac_low; |
| 2271 | look_up_table->entries[i].us_cac_mid = record->us_cac_mid; |
| 2272 | look_up_table->entries[i].us_cac_high = record->us_cac_high; |
| 2273 | /* Only increment the count when we're appending, not replacing duplicate entry. */ |
| 2274 | if (i == look_up_table->count) |
| 2275 | look_up_table->count++; |
| 2276 | |
| 2277 | return 0; |
| 2278 | } |
| 2279 | |
| 2280 | |
| 2281 | static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) |
| 2282 | { |
| 2283 | uint8_t entry_id; |
| 2284 | struct phm_ppt_v1_voltage_lookup_record v_record; |
| 2285 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2286 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2287 | |
| 2288 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; |
| 2289 | phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; |
| 2290 | |
| 2291 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 2292 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { |
| 2293 | if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) |
| 2294 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + |
| 2295 | sclk_table->entries[entry_id].vdd_offset - 0xFFFF; |
| 2296 | else |
| 2297 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + |
| 2298 | sclk_table->entries[entry_id].vdd_offset; |
| 2299 | |
| 2300 | sclk_table->entries[entry_id].vddc = |
| 2301 | v_record.us_cac_low = v_record.us_cac_mid = |
| 2302 | v_record.us_cac_high = v_record.us_vdd; |
| 2303 | |
| 2304 | phm_add_voltage(hwmgr, look_up_table: pptable_info->vddc_lookup_table, record: &v_record); |
| 2305 | } |
| 2306 | |
| 2307 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { |
| 2308 | if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) |
| 2309 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + |
| 2310 | mclk_table->entries[entry_id].vdd_offset - 0xFFFF; |
| 2311 | else |
| 2312 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + |
| 2313 | mclk_table->entries[entry_id].vdd_offset; |
| 2314 | |
| 2315 | mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = |
| 2316 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; |
| 2317 | phm_add_voltage(hwmgr, look_up_table: pptable_info->vddgfx_lookup_table, record: &v_record); |
| 2318 | } |
| 2319 | } |
| 2320 | return 0; |
| 2321 | } |
| 2322 | |
| 2323 | static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) |
| 2324 | { |
| 2325 | uint8_t entry_id; |
| 2326 | struct phm_ppt_v1_voltage_lookup_record v_record; |
| 2327 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2328 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2329 | phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; |
| 2330 | |
| 2331 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 2332 | for (entry_id = 0; entry_id < mm_table->count; entry_id++) { |
| 2333 | if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) |
| 2334 | v_record.us_vdd = mm_table->entries[entry_id].vddc + |
| 2335 | mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; |
| 2336 | else |
| 2337 | v_record.us_vdd = mm_table->entries[entry_id].vddc + |
| 2338 | mm_table->entries[entry_id].vddgfx_offset; |
| 2339 | |
| 2340 | /* Add the calculated VDDGFX to the VDDGFX lookup table */ |
| 2341 | mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = |
| 2342 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; |
| 2343 | phm_add_voltage(hwmgr, look_up_table: pptable_info->vddgfx_lookup_table, record: &v_record); |
| 2344 | } |
| 2345 | } |
| 2346 | return 0; |
| 2347 | } |
| 2348 | |
| 2349 | static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, |
| 2350 | struct phm_ppt_v1_voltage_lookup_table *lookup_table) |
| 2351 | { |
| 2352 | uint32_t table_size, i, j; |
| 2353 | table_size = lookup_table->count; |
| 2354 | |
| 2355 | PP_ASSERT_WITH_CODE(0 != lookup_table->count, |
| 2356 | "Lookup table is empty" , return -EINVAL); |
| 2357 | |
| 2358 | /* Sorting voltages */ |
| 2359 | for (i = 0; i < table_size - 1; i++) { |
| 2360 | for (j = i + 1; j > 0; j--) { |
| 2361 | if (lookup_table->entries[j].us_vdd < |
| 2362 | lookup_table->entries[j - 1].us_vdd) { |
| 2363 | swap(lookup_table->entries[j - 1], |
| 2364 | lookup_table->entries[j]); |
| 2365 | } |
| 2366 | } |
| 2367 | } |
| 2368 | |
| 2369 | return 0; |
| 2370 | } |
| 2371 | |
| 2372 | static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) |
| 2373 | { |
| 2374 | int result = 0; |
| 2375 | int tmp_result; |
| 2376 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2377 | struct phm_ppt_v1_information *table_info = |
| 2378 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2379 | |
| 2380 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 2381 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, |
| 2382 | lookup_table: table_info->vddgfx_lookup_table, leakage_table: &(data->vddcgfx_leakage)); |
| 2383 | if (tmp_result != 0) |
| 2384 | result = tmp_result; |
| 2385 | |
| 2386 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, |
| 2387 | voltage: &table_info->max_clock_voltage_on_dc.vddgfx, leakage_table: &(data->vddcgfx_leakage)); |
| 2388 | } else { |
| 2389 | |
| 2390 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, |
| 2391 | lookup_table: table_info->vddc_lookup_table, leakage_table: &(data->vddc_leakage)); |
| 2392 | if (tmp_result) |
| 2393 | result = tmp_result; |
| 2394 | |
| 2395 | tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, |
| 2396 | leakage_table: &(data->vddc_leakage), vddc: &table_info->max_clock_voltage_on_dc.vddc); |
| 2397 | if (tmp_result) |
| 2398 | result = tmp_result; |
| 2399 | } |
| 2400 | |
| 2401 | tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); |
| 2402 | if (tmp_result) |
| 2403 | result = tmp_result; |
| 2404 | |
| 2405 | tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); |
| 2406 | if (tmp_result) |
| 2407 | result = tmp_result; |
| 2408 | |
| 2409 | tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); |
| 2410 | if (tmp_result) |
| 2411 | result = tmp_result; |
| 2412 | |
| 2413 | tmp_result = smu7_sort_lookup_table(hwmgr, lookup_table: table_info->vddgfx_lookup_table); |
| 2414 | if (tmp_result) |
| 2415 | result = tmp_result; |
| 2416 | |
| 2417 | tmp_result = smu7_sort_lookup_table(hwmgr, lookup_table: table_info->vddc_lookup_table); |
| 2418 | if (tmp_result) |
| 2419 | result = tmp_result; |
| 2420 | |
| 2421 | return result; |
| 2422 | } |
| 2423 | |
| 2424 | static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr) |
| 2425 | { |
| 2426 | struct phm_ppt_v1_information *table_info = |
| 2427 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2428 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = |
| 2429 | table_info->vdd_dep_on_sclk; |
| 2430 | struct phm_ppt_v1_voltage_lookup_table *lookup_table = |
| 2431 | table_info->vddc_lookup_table; |
| 2432 | uint16_t highest_voltage; |
| 2433 | uint32_t i; |
| 2434 | |
| 2435 | highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; |
| 2436 | |
| 2437 | for (i = 0; i < lookup_table->count; i++) { |
| 2438 | if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 && |
| 2439 | lookup_table->entries[i].us_vdd > highest_voltage) |
| 2440 | highest_voltage = lookup_table->entries[i].us_vdd; |
| 2441 | } |
| 2442 | |
| 2443 | return highest_voltage; |
| 2444 | } |
| 2445 | |
| 2446 | static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) |
| 2447 | { |
| 2448 | struct phm_ppt_v1_information *table_info = |
| 2449 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2450 | |
| 2451 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = |
| 2452 | table_info->vdd_dep_on_sclk; |
| 2453 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = |
| 2454 | table_info->vdd_dep_on_mclk; |
| 2455 | |
| 2456 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, |
| 2457 | "VDD dependency on SCLK table is missing." , |
| 2458 | return -EINVAL); |
| 2459 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, |
| 2460 | "VDD dependency on SCLK table has to have is missing." , |
| 2461 | return -EINVAL); |
| 2462 | |
| 2463 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, |
| 2464 | "VDD dependency on MCLK table is missing" , |
| 2465 | return -EINVAL); |
| 2466 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, |
| 2467 | "VDD dependency on MCLK table has to have is missing." , |
| 2468 | return -EINVAL); |
| 2469 | |
| 2470 | table_info->max_clock_voltage_on_ac.sclk = |
| 2471 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; |
| 2472 | table_info->max_clock_voltage_on_ac.mclk = |
| 2473 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; |
| 2474 | if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) |
| 2475 | table_info->max_clock_voltage_on_ac.vddc = |
| 2476 | smu7_find_highest_vddc(hwmgr); |
| 2477 | else |
| 2478 | table_info->max_clock_voltage_on_ac.vddc = |
| 2479 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; |
| 2480 | table_info->max_clock_voltage_on_ac.vddci = |
| 2481 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; |
| 2482 | |
| 2483 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; |
| 2484 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; |
| 2485 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; |
| 2486 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; |
| 2487 | |
| 2488 | return 0; |
| 2489 | } |
| 2490 | |
| 2491 | static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) |
| 2492 | { |
| 2493 | struct phm_ppt_v1_information *table_info = |
| 2494 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2495 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
| 2496 | struct phm_ppt_v1_voltage_lookup_table *lookup_table; |
| 2497 | uint32_t i; |
| 2498 | uint32_t hw_revision, sub_vendor_id, sub_sys_id; |
| 2499 | struct amdgpu_device *adev = hwmgr->adev; |
| 2500 | |
| 2501 | if (table_info != NULL) { |
| 2502 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 2503 | lookup_table = table_info->vddc_lookup_table; |
| 2504 | } else |
| 2505 | return 0; |
| 2506 | |
| 2507 | hw_revision = adev->pdev->revision; |
| 2508 | sub_sys_id = adev->pdev->subsystem_device; |
| 2509 | sub_vendor_id = adev->pdev->subsystem_vendor; |
| 2510 | |
| 2511 | if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 && |
| 2512 | ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || |
| 2513 | (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || |
| 2514 | (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { |
| 2515 | |
| 2516 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 2517 | CGS_IND_REG__SMC, |
| 2518 | PWR_CKS_CNTL, |
| 2519 | CKS_STRETCH_AMOUNT, |
| 2520 | 0x3); |
| 2521 | |
| 2522 | if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) |
| 2523 | return 0; |
| 2524 | |
| 2525 | for (i = 0; i < lookup_table->count; i++) { |
| 2526 | if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { |
| 2527 | dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; |
| 2528 | return 0; |
| 2529 | } |
| 2530 | } |
| 2531 | } |
| 2532 | return 0; |
| 2533 | } |
| 2534 | |
| 2535 | static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) |
| 2536 | { |
| 2537 | struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; |
| 2538 | uint32_t temp_reg; |
| 2539 | struct phm_ppt_v1_information *table_info = |
| 2540 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2541 | |
| 2542 | |
| 2543 | if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, gpio_pin_assignment: &gpio_pin_assignment)) { |
| 2544 | temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); |
| 2545 | switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { |
| 2546 | case 0: |
| 2547 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); |
| 2548 | break; |
| 2549 | case 1: |
| 2550 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); |
| 2551 | break; |
| 2552 | case 2: |
| 2553 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); |
| 2554 | break; |
| 2555 | case 3: |
| 2556 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); |
| 2557 | break; |
| 2558 | case 4: |
| 2559 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); |
| 2560 | break; |
| 2561 | default: |
| 2562 | break; |
| 2563 | } |
| 2564 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); |
| 2565 | } |
| 2566 | |
| 2567 | if (table_info == NULL) |
| 2568 | return 0; |
| 2569 | |
| 2570 | if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && |
| 2571 | hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { |
| 2572 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = |
| 2573 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; |
| 2574 | |
| 2575 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = |
| 2576 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; |
| 2577 | |
| 2578 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; |
| 2579 | |
| 2580 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; |
| 2581 | |
| 2582 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = |
| 2583 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; |
| 2584 | |
| 2585 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; |
| 2586 | |
| 2587 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? |
| 2588 | (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; |
| 2589 | |
| 2590 | table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; |
| 2591 | table_info->cac_dtp_table->usOperatingTempStep = 1; |
| 2592 | table_info->cac_dtp_table->usOperatingTempHyst = 1; |
| 2593 | |
| 2594 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = |
| 2595 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; |
| 2596 | |
| 2597 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = |
| 2598 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; |
| 2599 | |
| 2600 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = |
| 2601 | table_info->cac_dtp_table->usOperatingTempMinLimit; |
| 2602 | |
| 2603 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = |
| 2604 | table_info->cac_dtp_table->usOperatingTempMaxLimit; |
| 2605 | |
| 2606 | hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = |
| 2607 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp; |
| 2608 | |
| 2609 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = |
| 2610 | table_info->cac_dtp_table->usOperatingTempStep; |
| 2611 | |
| 2612 | hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = |
| 2613 | table_info->cac_dtp_table->usTargetOperatingTemp; |
| 2614 | if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK) |
| 2615 | phm_cap_set(caps: hwmgr->platform_descriptor.platformCaps, |
| 2616 | c: PHM_PlatformCaps_ODFuzzyFanControlSupport); |
| 2617 | } |
| 2618 | |
| 2619 | return 0; |
| 2620 | } |
| 2621 | |
| 2622 | /** |
| 2623 | * smu7_patch_ppt_v0_with_vdd_leakage - Change virtual leakage voltage to actual value. |
| 2624 | * |
| 2625 | * @hwmgr: the address of the powerplay hardware manager. |
| 2626 | * @voltage: pointer to changing voltage |
| 2627 | * @leakage_table: pointer to leakage table |
| 2628 | */ |
| 2629 | static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, |
| 2630 | uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) |
| 2631 | { |
| 2632 | uint32_t index; |
| 2633 | |
| 2634 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ |
| 2635 | for (index = 0; index < leakage_table->count; index++) { |
| 2636 | /* if this voltage matches a leakage voltage ID */ |
| 2637 | /* patch with actual leakage voltage */ |
| 2638 | if (leakage_table->leakage_id[index] == *voltage) { |
| 2639 | *voltage = leakage_table->actual_voltage[index]; |
| 2640 | break; |
| 2641 | } |
| 2642 | } |
| 2643 | |
| 2644 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) |
| 2645 | pr_info("Voltage value looks like a Leakage ID but it's not patched\n" ); |
| 2646 | } |
| 2647 | |
| 2648 | |
| 2649 | static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, |
| 2650 | struct phm_clock_voltage_dependency_table *tab) |
| 2651 | { |
| 2652 | uint16_t i; |
| 2653 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2654 | |
| 2655 | if (tab) |
| 2656 | for (i = 0; i < tab->count; i++) |
| 2657 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &tab->entries[i].v, |
| 2658 | leakage_table: &data->vddc_leakage); |
| 2659 | |
| 2660 | return 0; |
| 2661 | } |
| 2662 | |
| 2663 | static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, |
| 2664 | struct phm_clock_voltage_dependency_table *tab) |
| 2665 | { |
| 2666 | uint16_t i; |
| 2667 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2668 | |
| 2669 | if (tab) |
| 2670 | for (i = 0; i < tab->count; i++) |
| 2671 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &tab->entries[i].v, |
| 2672 | leakage_table: &data->vddci_leakage); |
| 2673 | |
| 2674 | return 0; |
| 2675 | } |
| 2676 | |
| 2677 | static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, |
| 2678 | struct phm_vce_clock_voltage_dependency_table *tab) |
| 2679 | { |
| 2680 | uint16_t i; |
| 2681 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2682 | |
| 2683 | if (tab) |
| 2684 | for (i = 0; i < tab->count; i++) |
| 2685 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &tab->entries[i].v, |
| 2686 | leakage_table: &data->vddc_leakage); |
| 2687 | |
| 2688 | return 0; |
| 2689 | } |
| 2690 | |
| 2691 | |
| 2692 | static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, |
| 2693 | struct phm_uvd_clock_voltage_dependency_table *tab) |
| 2694 | { |
| 2695 | uint16_t i; |
| 2696 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2697 | |
| 2698 | if (tab) |
| 2699 | for (i = 0; i < tab->count; i++) |
| 2700 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &tab->entries[i].v, |
| 2701 | leakage_table: &data->vddc_leakage); |
| 2702 | |
| 2703 | return 0; |
| 2704 | } |
| 2705 | |
| 2706 | static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, |
| 2707 | struct phm_phase_shedding_limits_table *tab) |
| 2708 | { |
| 2709 | uint16_t i; |
| 2710 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2711 | |
| 2712 | if (tab) |
| 2713 | for (i = 0; i < tab->count; i++) |
| 2714 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &tab->entries[i].Voltage, |
| 2715 | leakage_table: &data->vddc_leakage); |
| 2716 | |
| 2717 | return 0; |
| 2718 | } |
| 2719 | |
| 2720 | static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, |
| 2721 | struct phm_samu_clock_voltage_dependency_table *tab) |
| 2722 | { |
| 2723 | uint16_t i; |
| 2724 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2725 | |
| 2726 | if (tab) |
| 2727 | for (i = 0; i < tab->count; i++) |
| 2728 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &tab->entries[i].v, |
| 2729 | leakage_table: &data->vddc_leakage); |
| 2730 | |
| 2731 | return 0; |
| 2732 | } |
| 2733 | |
| 2734 | static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, |
| 2735 | struct phm_acp_clock_voltage_dependency_table *tab) |
| 2736 | { |
| 2737 | uint16_t i; |
| 2738 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2739 | |
| 2740 | if (tab) |
| 2741 | for (i = 0; i < tab->count; i++) |
| 2742 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &tab->entries[i].v, |
| 2743 | leakage_table: &data->vddc_leakage); |
| 2744 | |
| 2745 | return 0; |
| 2746 | } |
| 2747 | |
| 2748 | static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, |
| 2749 | struct phm_clock_and_voltage_limits *tab) |
| 2750 | { |
| 2751 | uint32_t vddc, vddci; |
| 2752 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2753 | |
| 2754 | if (tab) { |
| 2755 | vddc = tab->vddc; |
| 2756 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &vddc, |
| 2757 | leakage_table: &data->vddc_leakage); |
| 2758 | tab->vddc = vddc; |
| 2759 | vddci = tab->vddci; |
| 2760 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &vddci, |
| 2761 | leakage_table: &data->vddci_leakage); |
| 2762 | tab->vddci = vddci; |
| 2763 | } |
| 2764 | |
| 2765 | return 0; |
| 2766 | } |
| 2767 | |
| 2768 | static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) |
| 2769 | { |
| 2770 | uint32_t i; |
| 2771 | uint32_t vddc; |
| 2772 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2773 | |
| 2774 | if (tab) { |
| 2775 | for (i = 0; i < tab->count; i++) { |
| 2776 | vddc = (uint32_t)(tab->entries[i].Vddc); |
| 2777 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, voltage: &vddc, leakage_table: &data->vddc_leakage); |
| 2778 | tab->entries[i].Vddc = (uint16_t)vddc; |
| 2779 | } |
| 2780 | } |
| 2781 | |
| 2782 | return 0; |
| 2783 | } |
| 2784 | |
| 2785 | static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) |
| 2786 | { |
| 2787 | int tmp; |
| 2788 | |
| 2789 | tmp = smu7_patch_vddc(hwmgr, tab: hwmgr->dyn_state.vddc_dependency_on_sclk); |
| 2790 | if (tmp) |
| 2791 | return -EINVAL; |
| 2792 | |
| 2793 | tmp = smu7_patch_vddc(hwmgr, tab: hwmgr->dyn_state.vddc_dependency_on_mclk); |
| 2794 | if (tmp) |
| 2795 | return -EINVAL; |
| 2796 | |
| 2797 | tmp = smu7_patch_vddc(hwmgr, tab: hwmgr->dyn_state.vddc_dep_on_dal_pwrl); |
| 2798 | if (tmp) |
| 2799 | return -EINVAL; |
| 2800 | |
| 2801 | tmp = smu7_patch_vddci(hwmgr, tab: hwmgr->dyn_state.vddci_dependency_on_mclk); |
| 2802 | if (tmp) |
| 2803 | return -EINVAL; |
| 2804 | |
| 2805 | tmp = smu7_patch_vce_vddc(hwmgr, tab: hwmgr->dyn_state.vce_clock_voltage_dependency_table); |
| 2806 | if (tmp) |
| 2807 | return -EINVAL; |
| 2808 | |
| 2809 | tmp = smu7_patch_uvd_vddc(hwmgr, tab: hwmgr->dyn_state.uvd_clock_voltage_dependency_table); |
| 2810 | if (tmp) |
| 2811 | return -EINVAL; |
| 2812 | |
| 2813 | tmp = smu7_patch_samu_vddc(hwmgr, tab: hwmgr->dyn_state.samu_clock_voltage_dependency_table); |
| 2814 | if (tmp) |
| 2815 | return -EINVAL; |
| 2816 | |
| 2817 | tmp = smu7_patch_acp_vddc(hwmgr, tab: hwmgr->dyn_state.acp_clock_voltage_dependency_table); |
| 2818 | if (tmp) |
| 2819 | return -EINVAL; |
| 2820 | |
| 2821 | tmp = smu7_patch_vddc_shed_limit(hwmgr, tab: hwmgr->dyn_state.vddc_phase_shed_limits_table); |
| 2822 | if (tmp) |
| 2823 | return -EINVAL; |
| 2824 | |
| 2825 | tmp = smu7_patch_limits_vddc(hwmgr, tab: &hwmgr->dyn_state.max_clock_voltage_on_ac); |
| 2826 | if (tmp) |
| 2827 | return -EINVAL; |
| 2828 | |
| 2829 | tmp = smu7_patch_limits_vddc(hwmgr, tab: &hwmgr->dyn_state.max_clock_voltage_on_dc); |
| 2830 | if (tmp) |
| 2831 | return -EINVAL; |
| 2832 | |
| 2833 | tmp = smu7_patch_cac_vddc(hwmgr, tab: hwmgr->dyn_state.cac_leakage_table); |
| 2834 | if (tmp) |
| 2835 | return -EINVAL; |
| 2836 | |
| 2837 | return 0; |
| 2838 | } |
| 2839 | |
| 2840 | |
| 2841 | static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) |
| 2842 | { |
| 2843 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2844 | |
| 2845 | struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; |
| 2846 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; |
| 2847 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; |
| 2848 | |
| 2849 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, |
| 2850 | "VDDC dependency on SCLK table is missing. This table is mandatory" , |
| 2851 | return -EINVAL); |
| 2852 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, |
| 2853 | "VDDC dependency on SCLK table has to have is missing. This table is mandatory" , |
| 2854 | return -EINVAL); |
| 2855 | |
| 2856 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, |
| 2857 | "VDDC dependency on MCLK table is missing. This table is mandatory" , |
| 2858 | return -EINVAL); |
| 2859 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, |
| 2860 | "VDD dependency on MCLK table has to have is missing. This table is mandatory" , |
| 2861 | return -EINVAL); |
| 2862 | |
| 2863 | data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; |
| 2864 | data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; |
| 2865 | |
| 2866 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = |
| 2867 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; |
| 2868 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = |
| 2869 | allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; |
| 2870 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = |
| 2871 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; |
| 2872 | |
| 2873 | if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { |
| 2874 | data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; |
| 2875 | data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; |
| 2876 | } |
| 2877 | |
| 2878 | if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) |
| 2879 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; |
| 2880 | |
| 2881 | return 0; |
| 2882 | } |
| 2883 | |
| 2884 | static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) |
| 2885 | { |
| 2886 | kfree(objp: hwmgr->dyn_state.vddc_dep_on_dal_pwrl); |
| 2887 | hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; |
| 2888 | kfree(objp: hwmgr->backend); |
| 2889 | hwmgr->backend = NULL; |
| 2890 | |
| 2891 | return 0; |
| 2892 | } |
| 2893 | |
| 2894 | static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) |
| 2895 | { |
| 2896 | uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; |
| 2897 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2898 | int i; |
| 2899 | |
| 2900 | if (atomctrl_get_leakage_id_from_efuse(hwmgr, virtual_voltage_id: &efuse_voltage_id) == 0) { |
| 2901 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { |
| 2902 | virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; |
| 2903 | if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, vddc: &vddc, vddci: &vddci, |
| 2904 | virtual_voltage_id, |
| 2905 | efuse_voltage_id) == 0) { |
| 2906 | if (vddc != 0 && vddc != virtual_voltage_id) { |
| 2907 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; |
| 2908 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; |
| 2909 | data->vddc_leakage.count++; |
| 2910 | } |
| 2911 | if (vddci != 0 && vddci != virtual_voltage_id) { |
| 2912 | data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; |
| 2913 | data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; |
| 2914 | data->vddci_leakage.count++; |
| 2915 | } |
| 2916 | } |
| 2917 | } |
| 2918 | } |
| 2919 | return 0; |
| 2920 | } |
| 2921 | |
| 2922 | #define LEAKAGE_ID_MSB 463 |
| 2923 | #define LEAKAGE_ID_LSB 454 |
| 2924 | |
| 2925 | static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr) |
| 2926 | { |
| 2927 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2928 | uint32_t efuse; |
| 2929 | uint16_t offset; |
| 2930 | int ret = 0; |
| 2931 | |
| 2932 | if (data->disable_edc_leakage_controller) |
| 2933 | return 0; |
| 2934 | |
| 2935 | ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr, |
| 2936 | table: &data->edc_hilo_leakage_offset_from_vbios); |
| 2937 | if (ret) |
| 2938 | return ret; |
| 2939 | |
| 2940 | if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset && |
| 2941 | data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) { |
| 2942 | atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, efuse: &efuse); |
| 2943 | if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold) |
| 2944 | offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset; |
| 2945 | else |
| 2946 | offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset; |
| 2947 | |
| 2948 | ret = atomctrl_get_edc_leakage_table(hwmgr, |
| 2949 | table: &data->edc_leakage_table, |
| 2950 | offset); |
| 2951 | if (ret) |
| 2952 | return ret; |
| 2953 | } |
| 2954 | |
| 2955 | return ret; |
| 2956 | } |
| 2957 | |
| 2958 | static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
| 2959 | { |
| 2960 | struct amdgpu_device *adev = hwmgr->adev; |
| 2961 | struct smu7_hwmgr *data; |
| 2962 | int result = 0; |
| 2963 | |
| 2964 | data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); |
| 2965 | if (data == NULL) |
| 2966 | return -ENOMEM; |
| 2967 | |
| 2968 | hwmgr->backend = data; |
| 2969 | smu7_patch_voltage_workaround(hwmgr); |
| 2970 | smu7_init_dpm_defaults(hwmgr); |
| 2971 | |
| 2972 | /* Get leakage voltage based on leakage ID. */ |
| 2973 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 2974 | c: PHM_PlatformCaps_EVV)) { |
| 2975 | result = smu7_get_evv_voltages(hwmgr); |
| 2976 | if (result) { |
| 2977 | pr_info("Get EVV Voltage Failed. Abort Driver loading!\n" ); |
| 2978 | kfree(objp: hwmgr->backend); |
| 2979 | hwmgr->backend = NULL; |
| 2980 | return -EINVAL; |
| 2981 | } |
| 2982 | } else { |
| 2983 | smu7_get_elb_voltages(hwmgr); |
| 2984 | } |
| 2985 | |
| 2986 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 2987 | smu7_complete_dependency_tables(hwmgr); |
| 2988 | smu7_set_private_data_based_on_pptable_v1(hwmgr); |
| 2989 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 2990 | smu7_patch_dependency_tables_with_leakage(hwmgr); |
| 2991 | smu7_set_private_data_based_on_pptable_v0(hwmgr); |
| 2992 | } |
| 2993 | |
| 2994 | /* Initalize Dynamic State Adjustment Rule Settings */ |
| 2995 | result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); |
| 2996 | |
| 2997 | if (result) |
| 2998 | goto fail; |
| 2999 | |
| 3000 | data->is_tlu_enabled = false; |
| 3001 | |
| 3002 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = |
| 3003 | SMU7_MAX_HARDWARE_POWERLEVELS; |
| 3004 | hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; |
| 3005 | hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; |
| 3006 | |
| 3007 | data->pcie_gen_cap = adev->pm.pcie_gen_mask; |
| 3008 | if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) |
| 3009 | data->pcie_spc_cap = 20; |
| 3010 | else |
| 3011 | data->pcie_spc_cap = 16; |
| 3012 | data->pcie_lane_cap = adev->pm.pcie_mlw_mask; |
| 3013 | |
| 3014 | hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ |
| 3015 | /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ |
| 3016 | hwmgr->platform_descriptor.clockStep.engineClock = 500; |
| 3017 | hwmgr->platform_descriptor.clockStep.memoryClock = 500; |
| 3018 | smu7_thermal_parameter_init(hwmgr); |
| 3019 | |
| 3020 | result = smu7_update_edc_leakage_table(hwmgr); |
| 3021 | if (result) |
| 3022 | goto fail; |
| 3023 | |
| 3024 | return 0; |
| 3025 | fail: |
| 3026 | smu7_hwmgr_backend_fini(hwmgr); |
| 3027 | return result; |
| 3028 | } |
| 3029 | |
| 3030 | static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) |
| 3031 | { |
| 3032 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3033 | uint32_t level, tmp; |
| 3034 | |
| 3035 | if (!data->pcie_dpm_key_disabled) { |
| 3036 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
| 3037 | level = 0; |
| 3038 | tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; |
| 3039 | while (tmp >>= 1) |
| 3040 | level++; |
| 3041 | |
| 3042 | if (level) |
| 3043 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3044 | PPSMC_MSG_PCIeDPM_ForceLevel, parameter: level, |
| 3045 | NULL); |
| 3046 | } |
| 3047 | } |
| 3048 | |
| 3049 | if (!data->sclk_dpm_key_disabled) { |
| 3050 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
| 3051 | level = 0; |
| 3052 | tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; |
| 3053 | while (tmp >>= 1) |
| 3054 | level++; |
| 3055 | |
| 3056 | if (level) |
| 3057 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3058 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 3059 | parameter: (1 << level), |
| 3060 | NULL); |
| 3061 | } |
| 3062 | } |
| 3063 | |
| 3064 | if (!data->mclk_dpm_key_disabled) { |
| 3065 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { |
| 3066 | level = 0; |
| 3067 | tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; |
| 3068 | while (tmp >>= 1) |
| 3069 | level++; |
| 3070 | |
| 3071 | if (level) |
| 3072 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3073 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 3074 | parameter: (1 << level), |
| 3075 | NULL); |
| 3076 | } |
| 3077 | } |
| 3078 | |
| 3079 | return 0; |
| 3080 | } |
| 3081 | |
| 3082 | static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) |
| 3083 | { |
| 3084 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3085 | |
| 3086 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 3087 | phm_apply_dal_min_voltage_request(hwmgr); |
| 3088 | /* TO DO for v0 iceland and Ci*/ |
| 3089 | |
| 3090 | if (!data->sclk_dpm_key_disabled) { |
| 3091 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) |
| 3092 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3093 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 3094 | parameter: data->dpm_level_enable_mask.sclk_dpm_enable_mask, |
| 3095 | NULL); |
| 3096 | } |
| 3097 | |
| 3098 | if (!data->mclk_dpm_key_disabled) { |
| 3099 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) |
| 3100 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3101 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 3102 | parameter: data->dpm_level_enable_mask.mclk_dpm_enable_mask, |
| 3103 | NULL); |
| 3104 | } |
| 3105 | |
| 3106 | return 0; |
| 3107 | } |
| 3108 | |
| 3109 | static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) |
| 3110 | { |
| 3111 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3112 | |
| 3113 | if (!smum_is_dpm_running(hwmgr)) |
| 3114 | return -EINVAL; |
| 3115 | |
| 3116 | if (!data->pcie_dpm_key_disabled) { |
| 3117 | smum_send_msg_to_smc(hwmgr, |
| 3118 | PPSMC_MSG_PCIeDPM_UnForceLevel, |
| 3119 | NULL); |
| 3120 | } |
| 3121 | |
| 3122 | return smu7_upload_dpm_level_enable_mask(hwmgr); |
| 3123 | } |
| 3124 | |
| 3125 | static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) |
| 3126 | { |
| 3127 | struct smu7_hwmgr *data = |
| 3128 | (struct smu7_hwmgr *)(hwmgr->backend); |
| 3129 | uint32_t level; |
| 3130 | |
| 3131 | if (!data->sclk_dpm_key_disabled) |
| 3132 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
| 3133 | level = phm_get_lowest_enabled_level(hwmgr, |
| 3134 | mask: data->dpm_level_enable_mask.sclk_dpm_enable_mask); |
| 3135 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3136 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 3137 | parameter: (1 << level), |
| 3138 | NULL); |
| 3139 | |
| 3140 | } |
| 3141 | |
| 3142 | if (!data->mclk_dpm_key_disabled) { |
| 3143 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { |
| 3144 | level = phm_get_lowest_enabled_level(hwmgr, |
| 3145 | mask: data->dpm_level_enable_mask.mclk_dpm_enable_mask); |
| 3146 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3147 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 3148 | parameter: (1 << level), |
| 3149 | NULL); |
| 3150 | } |
| 3151 | } |
| 3152 | |
| 3153 | if (!data->pcie_dpm_key_disabled) { |
| 3154 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
| 3155 | level = phm_get_lowest_enabled_level(hwmgr, |
| 3156 | mask: data->dpm_level_enable_mask.pcie_dpm_enable_mask); |
| 3157 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3158 | PPSMC_MSG_PCIeDPM_ForceLevel, |
| 3159 | parameter: (level), |
| 3160 | NULL); |
| 3161 | } |
| 3162 | } |
| 3163 | |
| 3164 | return 0; |
| 3165 | } |
| 3166 | |
| 3167 | static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, |
| 3168 | uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) |
| 3169 | { |
| 3170 | uint32_t percentage; |
| 3171 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3172 | struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; |
| 3173 | int32_t tmp_mclk; |
| 3174 | int32_t tmp_sclk; |
| 3175 | int32_t count; |
| 3176 | |
| 3177 | if (golden_dpm_table->mclk_table.count < 1) |
| 3178 | return -EINVAL; |
| 3179 | |
| 3180 | percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / |
| 3181 | golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; |
| 3182 | |
| 3183 | if (golden_dpm_table->mclk_table.count == 1) { |
| 3184 | percentage = 70; |
| 3185 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; |
| 3186 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; |
| 3187 | } else { |
| 3188 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; |
| 3189 | *mclk_mask = golden_dpm_table->mclk_table.count - 2; |
| 3190 | } |
| 3191 | |
| 3192 | tmp_sclk = tmp_mclk * percentage / 100; |
| 3193 | |
| 3194 | if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 3195 | for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; |
| 3196 | count >= 0; count--) { |
| 3197 | if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { |
| 3198 | *sclk_mask = count; |
| 3199 | break; |
| 3200 | } |
| 3201 | } |
| 3202 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) |
| 3203 | *sclk_mask = 0; |
| 3204 | |
| 3205 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 3206 | *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; |
| 3207 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 3208 | struct phm_ppt_v1_information *table_info = |
| 3209 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 3210 | |
| 3211 | for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { |
| 3212 | if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { |
| 3213 | *sclk_mask = count; |
| 3214 | break; |
| 3215 | } |
| 3216 | } |
| 3217 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) |
| 3218 | *sclk_mask = 0; |
| 3219 | |
| 3220 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 3221 | *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; |
| 3222 | } |
| 3223 | |
| 3224 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) |
| 3225 | *mclk_mask = 0; |
| 3226 | else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 3227 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; |
| 3228 | |
| 3229 | *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; |
| 3230 | |
| 3231 | return 0; |
| 3232 | } |
| 3233 | |
| 3234 | static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, |
| 3235 | enum amd_dpm_forced_level level) |
| 3236 | { |
| 3237 | int ret = 0; |
| 3238 | uint32_t sclk_mask = 0; |
| 3239 | uint32_t mclk_mask = 0; |
| 3240 | uint32_t pcie_mask = 0; |
| 3241 | |
| 3242 | switch (level) { |
| 3243 | case AMD_DPM_FORCED_LEVEL_HIGH: |
| 3244 | ret = smu7_force_dpm_highest(hwmgr); |
| 3245 | break; |
| 3246 | case AMD_DPM_FORCED_LEVEL_LOW: |
| 3247 | ret = smu7_force_dpm_lowest(hwmgr); |
| 3248 | break; |
| 3249 | case AMD_DPM_FORCED_LEVEL_AUTO: |
| 3250 | ret = smu7_unforce_dpm_levels(hwmgr); |
| 3251 | break; |
| 3252 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: |
| 3253 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: |
| 3254 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: |
| 3255 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: |
| 3256 | ret = smu7_get_profiling_clk(hwmgr, level, sclk_mask: &sclk_mask, mclk_mask: &mclk_mask, pcie_mask: &pcie_mask); |
| 3257 | if (ret) |
| 3258 | return ret; |
| 3259 | smu7_force_clock_level(hwmgr, type: PP_SCLK, mask: 1<<sclk_mask); |
| 3260 | smu7_force_clock_level(hwmgr, type: PP_MCLK, mask: 1<<mclk_mask); |
| 3261 | smu7_force_clock_level(hwmgr, type: PP_PCIE, mask: 1<<pcie_mask); |
| 3262 | break; |
| 3263 | case AMD_DPM_FORCED_LEVEL_MANUAL: |
| 3264 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: |
| 3265 | default: |
| 3266 | break; |
| 3267 | } |
| 3268 | |
| 3269 | if (!ret) { |
| 3270 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 3271 | smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, speed: 255); |
| 3272 | else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 3273 | smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); |
| 3274 | } |
| 3275 | return ret; |
| 3276 | } |
| 3277 | |
| 3278 | static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) |
| 3279 | { |
| 3280 | return sizeof(struct smu7_power_state); |
| 3281 | } |
| 3282 | |
| 3283 | static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, |
| 3284 | uint32_t vblank_time_us) |
| 3285 | { |
| 3286 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3287 | uint32_t switch_limit_us; |
| 3288 | |
| 3289 | switch (hwmgr->chip_id) { |
| 3290 | case CHIP_POLARIS10: |
| 3291 | case CHIP_POLARIS11: |
| 3292 | case CHIP_POLARIS12: |
| 3293 | if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12)) |
| 3294 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; |
| 3295 | else |
| 3296 | switch_limit_us = data->is_memory_gddr5 ? 200 : 150; |
| 3297 | break; |
| 3298 | case CHIP_VEGAM: |
| 3299 | switch_limit_us = 30; |
| 3300 | break; |
| 3301 | default: |
| 3302 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; |
| 3303 | break; |
| 3304 | } |
| 3305 | |
| 3306 | if (vblank_time_us < switch_limit_us) |
| 3307 | return true; |
| 3308 | else |
| 3309 | return false; |
| 3310 | } |
| 3311 | |
| 3312 | static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, |
| 3313 | struct pp_power_state *request_ps, |
| 3314 | const struct pp_power_state *current_ps) |
| 3315 | { |
| 3316 | struct amdgpu_device *adev = hwmgr->adev; |
| 3317 | struct smu7_power_state *smu7_ps; |
| 3318 | uint32_t sclk; |
| 3319 | uint32_t mclk; |
| 3320 | struct PP_Clocks minimum_clocks = {0}; |
| 3321 | bool disable_mclk_switching; |
| 3322 | bool disable_mclk_switching_for_frame_lock; |
| 3323 | bool disable_mclk_switching_for_display; |
| 3324 | const struct phm_clock_and_voltage_limits *max_limits; |
| 3325 | uint32_t i; |
| 3326 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3327 | struct phm_ppt_v1_information *table_info = |
| 3328 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 3329 | int32_t count; |
| 3330 | int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; |
| 3331 | uint32_t latency; |
| 3332 | bool latency_allowed = false; |
| 3333 | |
| 3334 | smu7_ps = cast_phw_smu7_power_state(hw_ps: &request_ps->hardware); |
| 3335 | if (!smu7_ps) |
| 3336 | return -EINVAL; |
| 3337 | |
| 3338 | data->battery_state = (PP_StateUILabel_Battery == |
| 3339 | request_ps->classification.ui_label); |
| 3340 | data->mclk_ignore_signal = false; |
| 3341 | |
| 3342 | max_limits = adev->pm.ac_power ? |
| 3343 | &(hwmgr->dyn_state.max_clock_voltage_on_ac) : |
| 3344 | &(hwmgr->dyn_state.max_clock_voltage_on_dc); |
| 3345 | |
| 3346 | /* Cap clock DPM tables at DC MAX if it is in DC. */ |
| 3347 | if (!adev->pm.ac_power) { |
| 3348 | for (i = 0; i < smu7_ps->performance_level_count; i++) { |
| 3349 | if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) |
| 3350 | smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; |
| 3351 | if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) |
| 3352 | smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; |
| 3353 | } |
| 3354 | } |
| 3355 | |
| 3356 | minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; |
| 3357 | minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; |
| 3358 | |
| 3359 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 3360 | c: PHM_PlatformCaps_StablePState)) { |
| 3361 | max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); |
| 3362 | stable_pstate_sclk = (max_limits->sclk * 75) / 100; |
| 3363 | |
| 3364 | for (count = table_info->vdd_dep_on_sclk->count - 1; |
| 3365 | count >= 0; count--) { |
| 3366 | if (stable_pstate_sclk >= |
| 3367 | table_info->vdd_dep_on_sclk->entries[count].clk) { |
| 3368 | stable_pstate_sclk = |
| 3369 | table_info->vdd_dep_on_sclk->entries[count].clk; |
| 3370 | break; |
| 3371 | } |
| 3372 | } |
| 3373 | |
| 3374 | if (count < 0) |
| 3375 | stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; |
| 3376 | |
| 3377 | stable_pstate_mclk = max_limits->mclk; |
| 3378 | |
| 3379 | minimum_clocks.engineClock = stable_pstate_sclk; |
| 3380 | minimum_clocks.memoryClock = stable_pstate_mclk; |
| 3381 | } |
| 3382 | |
| 3383 | disable_mclk_switching_for_frame_lock = phm_cap_enabled( |
| 3384 | caps: hwmgr->platform_descriptor.platformCaps, |
| 3385 | c: PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); |
| 3386 | |
| 3387 | disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && |
| 3388 | !hwmgr->display_config->multi_monitor_in_sync) || |
| 3389 | (hwmgr->display_config->num_display && |
| 3390 | smu7_vblank_too_short(hwmgr, vblank_time_us: hwmgr->display_config->min_vblank_time)); |
| 3391 | |
| 3392 | disable_mclk_switching = disable_mclk_switching_for_frame_lock || |
| 3393 | disable_mclk_switching_for_display; |
| 3394 | |
| 3395 | if (hwmgr->display_config->num_display == 0) { |
| 3396 | if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) |
| 3397 | data->mclk_ignore_signal = true; |
| 3398 | else |
| 3399 | disable_mclk_switching = false; |
| 3400 | } |
| 3401 | |
| 3402 | sclk = smu7_ps->performance_levels[0].engine_clock; |
| 3403 | mclk = smu7_ps->performance_levels[0].memory_clock; |
| 3404 | |
| 3405 | if (disable_mclk_switching && |
| 3406 | (!(hwmgr->chip_id >= CHIP_POLARIS10 && |
| 3407 | hwmgr->chip_id <= CHIP_VEGAM))) |
| 3408 | mclk = smu7_ps->performance_levels |
| 3409 | [smu7_ps->performance_level_count - 1].memory_clock; |
| 3410 | |
| 3411 | if (sclk < minimum_clocks.engineClock) |
| 3412 | sclk = (minimum_clocks.engineClock > max_limits->sclk) ? |
| 3413 | max_limits->sclk : minimum_clocks.engineClock; |
| 3414 | |
| 3415 | if (mclk < minimum_clocks.memoryClock) |
| 3416 | mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? |
| 3417 | max_limits->mclk : minimum_clocks.memoryClock; |
| 3418 | |
| 3419 | smu7_ps->performance_levels[0].engine_clock = sclk; |
| 3420 | smu7_ps->performance_levels[0].memory_clock = mclk; |
| 3421 | |
| 3422 | smu7_ps->performance_levels[1].engine_clock = |
| 3423 | (smu7_ps->performance_levels[1].engine_clock >= |
| 3424 | smu7_ps->performance_levels[0].engine_clock) ? |
| 3425 | smu7_ps->performance_levels[1].engine_clock : |
| 3426 | smu7_ps->performance_levels[0].engine_clock; |
| 3427 | |
| 3428 | if (disable_mclk_switching) { |
| 3429 | if (mclk < smu7_ps->performance_levels[1].memory_clock) |
| 3430 | mclk = smu7_ps->performance_levels[1].memory_clock; |
| 3431 | |
| 3432 | if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) { |
| 3433 | if (disable_mclk_switching_for_display) { |
| 3434 | /* Find the lowest MCLK frequency that is within |
| 3435 | * the tolerable latency defined in DAL |
| 3436 | */ |
| 3437 | latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; |
| 3438 | for (i = 0; i < data->mclk_latency_table.count; i++) { |
| 3439 | if (data->mclk_latency_table.entries[i].latency <= latency) { |
| 3440 | latency_allowed = true; |
| 3441 | |
| 3442 | if ((data->mclk_latency_table.entries[i].frequency >= |
| 3443 | smu7_ps->performance_levels[0].memory_clock) && |
| 3444 | (data->mclk_latency_table.entries[i].frequency <= |
| 3445 | smu7_ps->performance_levels[1].memory_clock)) { |
| 3446 | mclk = data->mclk_latency_table.entries[i].frequency; |
| 3447 | break; |
| 3448 | } |
| 3449 | } |
| 3450 | } |
| 3451 | if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) { |
| 3452 | data->mclk_ignore_signal = true; |
| 3453 | } else { |
| 3454 | data->mclk_ignore_signal = false; |
| 3455 | } |
| 3456 | } |
| 3457 | |
| 3458 | if (disable_mclk_switching_for_frame_lock) |
| 3459 | mclk = smu7_ps->performance_levels[1].memory_clock; |
| 3460 | } |
| 3461 | |
| 3462 | smu7_ps->performance_levels[0].memory_clock = mclk; |
| 3463 | |
| 3464 | if (!(hwmgr->chip_id >= CHIP_POLARIS10 && |
| 3465 | hwmgr->chip_id <= CHIP_VEGAM)) |
| 3466 | smu7_ps->performance_levels[1].memory_clock = mclk; |
| 3467 | } else { |
| 3468 | if (smu7_ps->performance_levels[1].memory_clock < |
| 3469 | smu7_ps->performance_levels[0].memory_clock) |
| 3470 | smu7_ps->performance_levels[1].memory_clock = |
| 3471 | smu7_ps->performance_levels[0].memory_clock; |
| 3472 | } |
| 3473 | |
| 3474 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 3475 | c: PHM_PlatformCaps_StablePState)) { |
| 3476 | for (i = 0; i < smu7_ps->performance_level_count; i++) { |
| 3477 | smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; |
| 3478 | smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; |
| 3479 | smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; |
| 3480 | smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; |
| 3481 | } |
| 3482 | } |
| 3483 | return 0; |
| 3484 | } |
| 3485 | |
| 3486 | |
| 3487 | static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) |
| 3488 | { |
| 3489 | struct pp_power_state *ps; |
| 3490 | struct smu7_power_state *smu7_ps; |
| 3491 | |
| 3492 | if (hwmgr == NULL) |
| 3493 | return -EINVAL; |
| 3494 | |
| 3495 | ps = hwmgr->request_ps; |
| 3496 | |
| 3497 | if (ps == NULL) |
| 3498 | return -EINVAL; |
| 3499 | |
| 3500 | smu7_ps = cast_phw_smu7_power_state(hw_ps: &ps->hardware); |
| 3501 | |
| 3502 | if (low) |
| 3503 | return smu7_ps->performance_levels[0].memory_clock; |
| 3504 | else |
| 3505 | return smu7_ps->performance_levels |
| 3506 | [smu7_ps->performance_level_count-1].memory_clock; |
| 3507 | } |
| 3508 | |
| 3509 | static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) |
| 3510 | { |
| 3511 | struct pp_power_state *ps; |
| 3512 | struct smu7_power_state *smu7_ps; |
| 3513 | |
| 3514 | if (hwmgr == NULL) |
| 3515 | return -EINVAL; |
| 3516 | |
| 3517 | ps = hwmgr->request_ps; |
| 3518 | |
| 3519 | if (ps == NULL) |
| 3520 | return -EINVAL; |
| 3521 | |
| 3522 | smu7_ps = cast_phw_smu7_power_state(hw_ps: &ps->hardware); |
| 3523 | |
| 3524 | if (low) |
| 3525 | return smu7_ps->performance_levels[0].engine_clock; |
| 3526 | else |
| 3527 | return smu7_ps->performance_levels |
| 3528 | [smu7_ps->performance_level_count-1].engine_clock; |
| 3529 | } |
| 3530 | |
| 3531 | static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, |
| 3532 | struct pp_hw_power_state *hw_ps) |
| 3533 | { |
| 3534 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3535 | struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; |
| 3536 | ATOM_FIRMWARE_INFO_V2_2 *fw_info; |
| 3537 | uint16_t size; |
| 3538 | uint8_t frev, crev; |
| 3539 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
| 3540 | |
| 3541 | /* First retrieve the Boot clocks and VDDC from the firmware info table. |
| 3542 | * We assume here that fw_info is unchanged if this call fails. |
| 3543 | */ |
| 3544 | fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(dev: hwmgr->adev, table: index, |
| 3545 | size: &size, frev: &frev, crev: &crev); |
| 3546 | if (!fw_info) |
| 3547 | /* During a test, there is no firmware info table. */ |
| 3548 | return 0; |
| 3549 | |
| 3550 | /* Patch the state. */ |
| 3551 | data->vbios_boot_state.sclk_bootup_value = |
| 3552 | le32_to_cpu(fw_info->ulDefaultEngineClock); |
| 3553 | data->vbios_boot_state.mclk_bootup_value = |
| 3554 | le32_to_cpu(fw_info->ulDefaultMemoryClock); |
| 3555 | data->vbios_boot_state.mvdd_bootup_value = |
| 3556 | le16_to_cpu(fw_info->usBootUpMVDDCVoltage); |
| 3557 | data->vbios_boot_state.vddc_bootup_value = |
| 3558 | le16_to_cpu(fw_info->usBootUpVDDCVoltage); |
| 3559 | data->vbios_boot_state.vddci_bootup_value = |
| 3560 | le16_to_cpu(fw_info->usBootUpVDDCIVoltage); |
| 3561 | data->vbios_boot_state.pcie_gen_bootup_value = |
| 3562 | smu7_get_current_pcie_speed(hwmgr); |
| 3563 | |
| 3564 | data->vbios_boot_state.pcie_lane_bootup_value = |
| 3565 | (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); |
| 3566 | |
| 3567 | /* set boot power state */ |
| 3568 | ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; |
| 3569 | ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; |
| 3570 | ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; |
| 3571 | ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; |
| 3572 | |
| 3573 | return 0; |
| 3574 | } |
| 3575 | |
| 3576 | static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) |
| 3577 | { |
| 3578 | int result; |
| 3579 | unsigned long ret = 0; |
| 3580 | |
| 3581 | if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 3582 | result = pp_tables_get_num_of_entries(hwmgr, num_of_entries: &ret); |
| 3583 | return result ? 0 : ret; |
| 3584 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 3585 | result = get_number_of_powerplay_table_entries_v1_0(hwmgr); |
| 3586 | return result; |
| 3587 | } |
| 3588 | return 0; |
| 3589 | } |
| 3590 | |
| 3591 | static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, |
| 3592 | void *state, struct pp_power_state *power_state, |
| 3593 | void *pp_table, uint32_t classification_flag) |
| 3594 | { |
| 3595 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3596 | struct smu7_power_state *smu7_power_state = |
| 3597 | (struct smu7_power_state *)(&(power_state->hardware)); |
| 3598 | struct smu7_performance_level *performance_level; |
| 3599 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; |
| 3600 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = |
| 3601 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; |
| 3602 | PPTable_Generic_SubTable_Header *sclk_dep_table = |
| 3603 | (PPTable_Generic_SubTable_Header *) |
| 3604 | (((unsigned long)powerplay_table) + |
| 3605 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); |
| 3606 | |
| 3607 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = |
| 3608 | (ATOM_Tonga_MCLK_Dependency_Table *) |
| 3609 | (((unsigned long)powerplay_table) + |
| 3610 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); |
| 3611 | |
| 3612 | /* The following fields are not initialized here: id orderedList allStatesList */ |
| 3613 | power_state->classification.ui_label = |
| 3614 | (le16_to_cpu(state_entry->usClassification) & |
| 3615 | ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> |
| 3616 | ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; |
| 3617 | power_state->classification.flags = classification_flag; |
| 3618 | /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ |
| 3619 | |
| 3620 | power_state->classification.temporary_state = false; |
| 3621 | power_state->classification.to_be_deleted = false; |
| 3622 | |
| 3623 | power_state->validation.disallowOnDC = |
| 3624 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & |
| 3625 | ATOM_Tonga_DISALLOW_ON_DC)); |
| 3626 | |
| 3627 | power_state->pcie.lanes = 0; |
| 3628 | |
| 3629 | power_state->display.disableFrameModulation = false; |
| 3630 | power_state->display.limitRefreshrate = false; |
| 3631 | power_state->display.enableVariBright = |
| 3632 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & |
| 3633 | ATOM_Tonga_ENABLE_VARIBRIGHT)); |
| 3634 | |
| 3635 | power_state->validation.supportedPowerLevels = 0; |
| 3636 | power_state->uvd_clocks.VCLK = 0; |
| 3637 | power_state->uvd_clocks.DCLK = 0; |
| 3638 | power_state->temperatures.min = 0; |
| 3639 | power_state->temperatures.max = 0; |
| 3640 | |
| 3641 | performance_level = &(smu7_power_state->performance_levels |
| 3642 | [smu7_power_state->performance_level_count++]); |
| 3643 | |
| 3644 | PP_ASSERT_WITH_CODE( |
| 3645 | (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), |
| 3646 | "Performance levels exceeds SMC limit!" , |
| 3647 | return -EINVAL); |
| 3648 | |
| 3649 | PP_ASSERT_WITH_CODE( |
| 3650 | (smu7_power_state->performance_level_count < |
| 3651 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), |
| 3652 | "Performance levels exceeds Driver limit!" , |
| 3653 | return -EINVAL); |
| 3654 | |
| 3655 | /* Performance levels are arranged from low to high. */ |
| 3656 | performance_level->memory_clock = mclk_dep_table->entries |
| 3657 | [state_entry->ucMemoryClockIndexLow].ulMclk; |
| 3658 | if (sclk_dep_table->ucRevId == 0) |
| 3659 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3660 | [state_entry->ucEngineClockIndexLow].ulSclk; |
| 3661 | else if (sclk_dep_table->ucRevId == 1) |
| 3662 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3663 | [state_entry->ucEngineClockIndexLow].ulSclk; |
| 3664 | performance_level->pcie_gen = get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 3665 | ns_pcie_gen: state_entry->ucPCIEGenLow); |
| 3666 | performance_level->pcie_lane = get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 3667 | ns_pcie_lanes: state_entry->ucPCIELaneLow); |
| 3668 | |
| 3669 | performance_level = &(smu7_power_state->performance_levels |
| 3670 | [smu7_power_state->performance_level_count++]); |
| 3671 | performance_level->memory_clock = mclk_dep_table->entries |
| 3672 | [state_entry->ucMemoryClockIndexHigh].ulMclk; |
| 3673 | |
| 3674 | if (sclk_dep_table->ucRevId == 0) |
| 3675 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3676 | [state_entry->ucEngineClockIndexHigh].ulSclk; |
| 3677 | else if (sclk_dep_table->ucRevId == 1) |
| 3678 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3679 | [state_entry->ucEngineClockIndexHigh].ulSclk; |
| 3680 | |
| 3681 | performance_level->pcie_gen = get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, |
| 3682 | ns_pcie_gen: state_entry->ucPCIEGenHigh); |
| 3683 | performance_level->pcie_lane = get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, |
| 3684 | ns_pcie_lanes: state_entry->ucPCIELaneHigh); |
| 3685 | |
| 3686 | return 0; |
| 3687 | } |
| 3688 | |
| 3689 | static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, |
| 3690 | unsigned long entry_index, struct pp_power_state *state) |
| 3691 | { |
| 3692 | int result; |
| 3693 | struct smu7_power_state *ps; |
| 3694 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3695 | struct phm_ppt_v1_information *table_info = |
| 3696 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 3697 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = |
| 3698 | table_info->vdd_dep_on_mclk; |
| 3699 | |
| 3700 | state->hardware.magic = PHM_VIslands_Magic; |
| 3701 | |
| 3702 | ps = (struct smu7_power_state *)(&state->hardware); |
| 3703 | |
| 3704 | result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, power_state: state, |
| 3705 | call_back_func: smu7_get_pp_table_entry_callback_func_v1); |
| 3706 | |
| 3707 | /* This is the earliest time we have all the dependency table and the VBIOS boot state |
| 3708 | * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state |
| 3709 | * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state |
| 3710 | */ |
| 3711 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { |
| 3712 | if (dep_mclk_table->entries[0].clk != |
| 3713 | data->vbios_boot_state.mclk_bootup_value) |
| 3714 | pr_debug("Single MCLK entry VDDCI/MCLK dependency table " |
| 3715 | "does not match VBIOS boot MCLK level" ); |
| 3716 | if (dep_mclk_table->entries[0].vddci != |
| 3717 | data->vbios_boot_state.vddci_bootup_value) |
| 3718 | pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " |
| 3719 | "does not match VBIOS boot VDDCI level" ); |
| 3720 | } |
| 3721 | |
| 3722 | /* set DC compatible flag if this state supports DC */ |
| 3723 | if (!state->validation.disallowOnDC) |
| 3724 | ps->dc_compatible = true; |
| 3725 | |
| 3726 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) |
| 3727 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; |
| 3728 | |
| 3729 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; |
| 3730 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; |
| 3731 | |
| 3732 | if (!result) { |
| 3733 | uint32_t i; |
| 3734 | |
| 3735 | switch (state->classification.ui_label) { |
| 3736 | case PP_StateUILabel_Performance: |
| 3737 | data->use_pcie_performance_levels = true; |
| 3738 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3739 | if (data->pcie_gen_performance.max < |
| 3740 | ps->performance_levels[i].pcie_gen) |
| 3741 | data->pcie_gen_performance.max = |
| 3742 | ps->performance_levels[i].pcie_gen; |
| 3743 | |
| 3744 | if (data->pcie_gen_performance.min > |
| 3745 | ps->performance_levels[i].pcie_gen) |
| 3746 | data->pcie_gen_performance.min = |
| 3747 | ps->performance_levels[i].pcie_gen; |
| 3748 | |
| 3749 | if (data->pcie_lane_performance.max < |
| 3750 | ps->performance_levels[i].pcie_lane) |
| 3751 | data->pcie_lane_performance.max = |
| 3752 | ps->performance_levels[i].pcie_lane; |
| 3753 | if (data->pcie_lane_performance.min > |
| 3754 | ps->performance_levels[i].pcie_lane) |
| 3755 | data->pcie_lane_performance.min = |
| 3756 | ps->performance_levels[i].pcie_lane; |
| 3757 | } |
| 3758 | break; |
| 3759 | case PP_StateUILabel_Battery: |
| 3760 | data->use_pcie_power_saving_levels = true; |
| 3761 | |
| 3762 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3763 | if (data->pcie_gen_power_saving.max < |
| 3764 | ps->performance_levels[i].pcie_gen) |
| 3765 | data->pcie_gen_power_saving.max = |
| 3766 | ps->performance_levels[i].pcie_gen; |
| 3767 | |
| 3768 | if (data->pcie_gen_power_saving.min > |
| 3769 | ps->performance_levels[i].pcie_gen) |
| 3770 | data->pcie_gen_power_saving.min = |
| 3771 | ps->performance_levels[i].pcie_gen; |
| 3772 | |
| 3773 | if (data->pcie_lane_power_saving.max < |
| 3774 | ps->performance_levels[i].pcie_lane) |
| 3775 | data->pcie_lane_power_saving.max = |
| 3776 | ps->performance_levels[i].pcie_lane; |
| 3777 | |
| 3778 | if (data->pcie_lane_power_saving.min > |
| 3779 | ps->performance_levels[i].pcie_lane) |
| 3780 | data->pcie_lane_power_saving.min = |
| 3781 | ps->performance_levels[i].pcie_lane; |
| 3782 | } |
| 3783 | break; |
| 3784 | default: |
| 3785 | break; |
| 3786 | } |
| 3787 | } |
| 3788 | return 0; |
| 3789 | } |
| 3790 | |
| 3791 | static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, |
| 3792 | struct pp_hw_power_state *power_state, |
| 3793 | unsigned int index, const void *clock_info) |
| 3794 | { |
| 3795 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3796 | struct smu7_power_state *ps = cast_phw_smu7_power_state(hw_ps: power_state); |
| 3797 | const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; |
| 3798 | struct smu7_performance_level *performance_level; |
| 3799 | uint32_t engine_clock, memory_clock; |
| 3800 | uint16_t pcie_gen_from_bios; |
| 3801 | |
| 3802 | engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; |
| 3803 | memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; |
| 3804 | |
| 3805 | if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) |
| 3806 | data->highest_mclk = memory_clock; |
| 3807 | |
| 3808 | PP_ASSERT_WITH_CODE( |
| 3809 | (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), |
| 3810 | "Performance levels exceeds SMC limit!" , |
| 3811 | return -EINVAL); |
| 3812 | |
| 3813 | PP_ASSERT_WITH_CODE( |
| 3814 | (ps->performance_level_count < |
| 3815 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), |
| 3816 | "Performance levels exceeds Driver limit, Skip!" , |
| 3817 | return 0); |
| 3818 | |
| 3819 | performance_level = &(ps->performance_levels |
| 3820 | [ps->performance_level_count++]); |
| 3821 | |
| 3822 | /* Performance levels are arranged from low to high. */ |
| 3823 | performance_level->memory_clock = memory_clock; |
| 3824 | performance_level->engine_clock = engine_clock; |
| 3825 | |
| 3826 | pcie_gen_from_bios = visland_clk_info->ucPCIEGen; |
| 3827 | |
| 3828 | performance_level->pcie_gen = get_pcie_gen_support(pcie_link_speed_cap: data->pcie_gen_cap, ns_pcie_gen: pcie_gen_from_bios); |
| 3829 | performance_level->pcie_lane = get_pcie_lane_support(pcie_lane_width_cap: data->pcie_lane_cap, ns_pcie_lanes: visland_clk_info->usPCIELane); |
| 3830 | |
| 3831 | return 0; |
| 3832 | } |
| 3833 | |
| 3834 | static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, |
| 3835 | unsigned long entry_index, struct pp_power_state *state) |
| 3836 | { |
| 3837 | int result; |
| 3838 | struct smu7_power_state *ps; |
| 3839 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3840 | struct phm_clock_voltage_dependency_table *dep_mclk_table = |
| 3841 | hwmgr->dyn_state.vddci_dependency_on_mclk; |
| 3842 | |
| 3843 | memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); |
| 3844 | |
| 3845 | state->hardware.magic = PHM_VIslands_Magic; |
| 3846 | |
| 3847 | ps = (struct smu7_power_state *)(&state->hardware); |
| 3848 | |
| 3849 | result = pp_tables_get_entry(hwmgr, entry_index, ps: state, |
| 3850 | func: smu7_get_pp_table_entry_callback_func_v0); |
| 3851 | |
| 3852 | /* |
| 3853 | * This is the earliest time we have all the dependency table |
| 3854 | * and the VBIOS boot state as |
| 3855 | * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot |
| 3856 | * state if there is only one VDDCI/MCLK level, check if it's |
| 3857 | * the same as VBIOS boot state |
| 3858 | */ |
| 3859 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { |
| 3860 | if (dep_mclk_table->entries[0].clk != |
| 3861 | data->vbios_boot_state.mclk_bootup_value) |
| 3862 | pr_debug("Single MCLK entry VDDCI/MCLK dependency table " |
| 3863 | "does not match VBIOS boot MCLK level" ); |
| 3864 | if (dep_mclk_table->entries[0].v != |
| 3865 | data->vbios_boot_state.vddci_bootup_value) |
| 3866 | pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " |
| 3867 | "does not match VBIOS boot VDDCI level" ); |
| 3868 | } |
| 3869 | |
| 3870 | /* set DC compatible flag if this state supports DC */ |
| 3871 | if (!state->validation.disallowOnDC) |
| 3872 | ps->dc_compatible = true; |
| 3873 | |
| 3874 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) |
| 3875 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; |
| 3876 | |
| 3877 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; |
| 3878 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; |
| 3879 | |
| 3880 | if (!result) { |
| 3881 | uint32_t i; |
| 3882 | |
| 3883 | switch (state->classification.ui_label) { |
| 3884 | case PP_StateUILabel_Performance: |
| 3885 | data->use_pcie_performance_levels = true; |
| 3886 | |
| 3887 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3888 | if (data->pcie_gen_performance.max < |
| 3889 | ps->performance_levels[i].pcie_gen) |
| 3890 | data->pcie_gen_performance.max = |
| 3891 | ps->performance_levels[i].pcie_gen; |
| 3892 | |
| 3893 | if (data->pcie_gen_performance.min > |
| 3894 | ps->performance_levels[i].pcie_gen) |
| 3895 | data->pcie_gen_performance.min = |
| 3896 | ps->performance_levels[i].pcie_gen; |
| 3897 | |
| 3898 | if (data->pcie_lane_performance.max < |
| 3899 | ps->performance_levels[i].pcie_lane) |
| 3900 | data->pcie_lane_performance.max = |
| 3901 | ps->performance_levels[i].pcie_lane; |
| 3902 | |
| 3903 | if (data->pcie_lane_performance.min > |
| 3904 | ps->performance_levels[i].pcie_lane) |
| 3905 | data->pcie_lane_performance.min = |
| 3906 | ps->performance_levels[i].pcie_lane; |
| 3907 | } |
| 3908 | break; |
| 3909 | case PP_StateUILabel_Battery: |
| 3910 | data->use_pcie_power_saving_levels = true; |
| 3911 | |
| 3912 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3913 | if (data->pcie_gen_power_saving.max < |
| 3914 | ps->performance_levels[i].pcie_gen) |
| 3915 | data->pcie_gen_power_saving.max = |
| 3916 | ps->performance_levels[i].pcie_gen; |
| 3917 | |
| 3918 | if (data->pcie_gen_power_saving.min > |
| 3919 | ps->performance_levels[i].pcie_gen) |
| 3920 | data->pcie_gen_power_saving.min = |
| 3921 | ps->performance_levels[i].pcie_gen; |
| 3922 | |
| 3923 | if (data->pcie_lane_power_saving.max < |
| 3924 | ps->performance_levels[i].pcie_lane) |
| 3925 | data->pcie_lane_power_saving.max = |
| 3926 | ps->performance_levels[i].pcie_lane; |
| 3927 | |
| 3928 | if (data->pcie_lane_power_saving.min > |
| 3929 | ps->performance_levels[i].pcie_lane) |
| 3930 | data->pcie_lane_power_saving.min = |
| 3931 | ps->performance_levels[i].pcie_lane; |
| 3932 | } |
| 3933 | break; |
| 3934 | default: |
| 3935 | break; |
| 3936 | } |
| 3937 | } |
| 3938 | return 0; |
| 3939 | } |
| 3940 | |
| 3941 | static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, |
| 3942 | unsigned long entry_index, struct pp_power_state *state) |
| 3943 | { |
| 3944 | if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 3945 | return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); |
| 3946 | else if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 3947 | return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); |
| 3948 | |
| 3949 | return 0; |
| 3950 | } |
| 3951 | |
| 3952 | static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) |
| 3953 | { |
| 3954 | struct amdgpu_device *adev = hwmgr->adev; |
| 3955 | int i; |
| 3956 | u32 tmp = 0; |
| 3957 | |
| 3958 | if (!query) |
| 3959 | return -EINVAL; |
| 3960 | |
| 3961 | /* |
| 3962 | * PPSMC_MSG_GetCurrPkgPwr is not supported on: |
| 3963 | * - Hawaii |
| 3964 | * - Bonaire |
| 3965 | * - Fiji |
| 3966 | * - Tonga |
| 3967 | */ |
| 3968 | if ((adev->asic_type != CHIP_HAWAII) && |
| 3969 | (adev->asic_type != CHIP_BONAIRE) && |
| 3970 | (adev->asic_type != CHIP_FIJI) && |
| 3971 | (adev->asic_type != CHIP_TONGA)) { |
| 3972 | smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, parameter: 0, resp: &tmp); |
| 3973 | *query = tmp; |
| 3974 | |
| 3975 | if (tmp != 0) |
| 3976 | return 0; |
| 3977 | } |
| 3978 | |
| 3979 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL); |
| 3980 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 3981 | ixSMU_PM_STATUS_95, 0); |
| 3982 | |
| 3983 | for (i = 0; i < 10; i++) { |
| 3984 | msleep(msecs: 500); |
| 3985 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL); |
| 3986 | tmp = cgs_read_ind_register(hwmgr->device, |
| 3987 | CGS_IND_REG__SMC, |
| 3988 | ixSMU_PM_STATUS_95); |
| 3989 | if (tmp != 0) |
| 3990 | break; |
| 3991 | } |
| 3992 | *query = tmp; |
| 3993 | |
| 3994 | return 0; |
| 3995 | } |
| 3996 | |
| 3997 | static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, |
| 3998 | void *value, int *size) |
| 3999 | { |
| 4000 | uint32_t sclk, mclk, activity_percent; |
| 4001 | uint32_t offset, val_vid; |
| 4002 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4003 | struct amdgpu_device *adev = hwmgr->adev; |
| 4004 | int ret = 0; |
| 4005 | |
| 4006 | /* size must be at least 4 bytes for all sensors */ |
| 4007 | if (*size < 4) |
| 4008 | return -EINVAL; |
| 4009 | |
| 4010 | switch (idx) { |
| 4011 | case AMDGPU_PP_SENSOR_GFX_SCLK: |
| 4012 | ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, resp: &sclk); |
| 4013 | if (ret) |
| 4014 | return ret; |
| 4015 | *((uint32_t *)value) = sclk; |
| 4016 | *size = 4; |
| 4017 | return 0; |
| 4018 | case AMDGPU_PP_SENSOR_GFX_MCLK: |
| 4019 | ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, resp: &mclk); |
| 4020 | if (ret) |
| 4021 | return ret; |
| 4022 | *((uint32_t *)value) = mclk; |
| 4023 | *size = 4; |
| 4024 | return 0; |
| 4025 | case AMDGPU_PP_SENSOR_GPU_LOAD: |
| 4026 | case AMDGPU_PP_SENSOR_MEM_LOAD: |
| 4027 | offset = data->soft_regs_start + smum_get_offsetof(hwmgr, |
| 4028 | type: SMU_SoftRegisters, |
| 4029 | member: (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ? |
| 4030 | AverageGraphicsActivity : |
| 4031 | AverageMemoryActivity); |
| 4032 | |
| 4033 | activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); |
| 4034 | activity_percent += 0x80; |
| 4035 | activity_percent >>= 8; |
| 4036 | *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; |
| 4037 | *size = 4; |
| 4038 | return 0; |
| 4039 | case AMDGPU_PP_SENSOR_GPU_TEMP: |
| 4040 | *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); |
| 4041 | *size = 4; |
| 4042 | return 0; |
| 4043 | case AMDGPU_PP_SENSOR_UVD_POWER: |
| 4044 | *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; |
| 4045 | *size = 4; |
| 4046 | return 0; |
| 4047 | case AMDGPU_PP_SENSOR_VCE_POWER: |
| 4048 | *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; |
| 4049 | *size = 4; |
| 4050 | return 0; |
| 4051 | case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: |
| 4052 | if ((adev->asic_type != CHIP_HAWAII) && |
| 4053 | (adev->asic_type != CHIP_BONAIRE) && |
| 4054 | (adev->asic_type != CHIP_FIJI) && |
| 4055 | (adev->asic_type != CHIP_TONGA)) |
| 4056 | return smu7_get_gpu_power(hwmgr, query: (uint32_t *)value); |
| 4057 | else |
| 4058 | return -EOPNOTSUPP; |
| 4059 | case AMDGPU_PP_SENSOR_GPU_AVG_POWER: |
| 4060 | if ((adev->asic_type != CHIP_HAWAII) && |
| 4061 | (adev->asic_type != CHIP_BONAIRE) && |
| 4062 | (adev->asic_type != CHIP_FIJI) && |
| 4063 | (adev->asic_type != CHIP_TONGA)) |
| 4064 | return -EOPNOTSUPP; |
| 4065 | else |
| 4066 | return smu7_get_gpu_power(hwmgr, query: (uint32_t *)value); |
| 4067 | case AMDGPU_PP_SENSOR_VDDGFX: |
| 4068 | if ((data->vr_config & VRCONF_VDDGFX_MASK) == |
| 4069 | (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) |
| 4070 | val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, |
| 4071 | CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); |
| 4072 | else |
| 4073 | val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, |
| 4074 | CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID); |
| 4075 | |
| 4076 | *((uint32_t *)value) = (uint32_t)convert_to_vddc(vid: val_vid); |
| 4077 | return 0; |
| 4078 | default: |
| 4079 | return -EOPNOTSUPP; |
| 4080 | } |
| 4081 | } |
| 4082 | |
| 4083 | static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) |
| 4084 | { |
| 4085 | const struct phm_set_power_state_input *states = |
| 4086 | (const struct phm_set_power_state_input *)input; |
| 4087 | const struct smu7_power_state *smu7_ps = |
| 4088 | cast_const_phw_smu7_power_state(hw_ps: states->pnew_state); |
| 4089 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4090 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 4091 | uint32_t sclk = smu7_ps->performance_levels |
| 4092 | [smu7_ps->performance_level_count - 1].engine_clock; |
| 4093 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 4094 | uint32_t mclk = smu7_ps->performance_levels |
| 4095 | [smu7_ps->performance_level_count - 1].memory_clock; |
| 4096 | struct PP_Clocks min_clocks = {0}; |
| 4097 | uint32_t i; |
| 4098 | |
| 4099 | for (i = 0; i < sclk_table->count; i++) { |
| 4100 | if (sclk == sclk_table->dpm_levels[i].value) |
| 4101 | break; |
| 4102 | } |
| 4103 | |
| 4104 | if (i >= sclk_table->count) { |
| 4105 | if (sclk > sclk_table->dpm_levels[i-1].value) { |
| 4106 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
| 4107 | sclk_table->dpm_levels[i-1].value = sclk; |
| 4108 | } |
| 4109 | } else { |
| 4110 | /* TODO: Check SCLK in DAL's minimum clocks |
| 4111 | * in case DeepSleep divider update is required. |
| 4112 | */ |
| 4113 | if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && |
| 4114 | (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || |
| 4115 | data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) |
| 4116 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; |
| 4117 | } |
| 4118 | |
| 4119 | for (i = 0; i < mclk_table->count; i++) { |
| 4120 | if (mclk == mclk_table->dpm_levels[i].value) |
| 4121 | break; |
| 4122 | } |
| 4123 | |
| 4124 | if (i >= mclk_table->count) { |
| 4125 | if (mclk > mclk_table->dpm_levels[i-1].value) { |
| 4126 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; |
| 4127 | mclk_table->dpm_levels[i-1].value = mclk; |
| 4128 | } |
| 4129 | } |
| 4130 | |
| 4131 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
| 4132 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; |
| 4133 | |
| 4134 | return 0; |
| 4135 | } |
| 4136 | |
| 4137 | static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, |
| 4138 | const struct smu7_power_state *smu7_ps) |
| 4139 | { |
| 4140 | uint32_t i; |
| 4141 | uint32_t sclk, max_sclk = 0; |
| 4142 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4143 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
| 4144 | |
| 4145 | for (i = 0; i < smu7_ps->performance_level_count; i++) { |
| 4146 | sclk = smu7_ps->performance_levels[i].engine_clock; |
| 4147 | if (max_sclk < sclk) |
| 4148 | max_sclk = sclk; |
| 4149 | } |
| 4150 | |
| 4151 | for (i = 0; i < dpm_table->sclk_table.count; i++) { |
| 4152 | if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) |
| 4153 | return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? |
| 4154 | dpm_table->pcie_speed_table.dpm_levels |
| 4155 | [dpm_table->pcie_speed_table.count - 1].value : |
| 4156 | dpm_table->pcie_speed_table.dpm_levels[i].value); |
| 4157 | } |
| 4158 | |
| 4159 | return 0; |
| 4160 | } |
| 4161 | |
| 4162 | static int smu7_request_link_speed_change_before_state_change( |
| 4163 | struct pp_hwmgr *hwmgr, const void *input) |
| 4164 | { |
| 4165 | const struct phm_set_power_state_input *states = |
| 4166 | (const struct phm_set_power_state_input *)input; |
| 4167 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4168 | const struct smu7_power_state *smu7_nps = |
| 4169 | cast_const_phw_smu7_power_state(hw_ps: states->pnew_state); |
| 4170 | const struct smu7_power_state *polaris10_cps = |
| 4171 | cast_const_phw_smu7_power_state(hw_ps: states->pcurrent_state); |
| 4172 | |
| 4173 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps: smu7_nps); |
| 4174 | uint16_t current_link_speed; |
| 4175 | |
| 4176 | if (data->force_pcie_gen == PP_PCIEGenInvalid) |
| 4177 | current_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps: polaris10_cps); |
| 4178 | else |
| 4179 | current_link_speed = data->force_pcie_gen; |
| 4180 | |
| 4181 | data->force_pcie_gen = PP_PCIEGenInvalid; |
| 4182 | data->pspp_notify_required = false; |
| 4183 | |
| 4184 | if (target_link_speed > current_link_speed) { |
| 4185 | switch (target_link_speed) { |
| 4186 | #ifdef CONFIG_ACPI |
| 4187 | case PP_PCIEGen3: |
| 4188 | if (0 == amdgpu_acpi_pcie_performance_request(adev: hwmgr->adev, PCIE_PERF_REQ_GEN3, advertise: false)) |
| 4189 | break; |
| 4190 | data->force_pcie_gen = PP_PCIEGen2; |
| 4191 | if (current_link_speed == PP_PCIEGen2) |
| 4192 | break; |
| 4193 | fallthrough; |
| 4194 | case PP_PCIEGen2: |
| 4195 | if (0 == amdgpu_acpi_pcie_performance_request(adev: hwmgr->adev, PCIE_PERF_REQ_GEN2, advertise: false)) |
| 4196 | break; |
| 4197 | fallthrough; |
| 4198 | #endif |
| 4199 | default: |
| 4200 | data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); |
| 4201 | break; |
| 4202 | } |
| 4203 | } else { |
| 4204 | if (target_link_speed < current_link_speed) |
| 4205 | data->pspp_notify_required = true; |
| 4206 | } |
| 4207 | |
| 4208 | return 0; |
| 4209 | } |
| 4210 | |
| 4211 | static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 4212 | { |
| 4213 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4214 | |
| 4215 | if (0 == data->need_update_smu7_dpm_table) |
| 4216 | return 0; |
| 4217 | |
| 4218 | if ((0 == data->sclk_dpm_key_disabled) && |
| 4219 | (data->need_update_smu7_dpm_table & |
| 4220 | (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { |
| 4221 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 4222 | "Trying to freeze SCLK DPM when DPM is disabled" , |
| 4223 | ); |
| 4224 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
| 4225 | PPSMC_MSG_SCLKDPM_FreezeLevel, |
| 4226 | NULL), |
| 4227 | "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!" , |
| 4228 | return -EINVAL); |
| 4229 | } |
| 4230 | |
| 4231 | if ((0 == data->mclk_dpm_key_disabled) && |
| 4232 | !data->mclk_ignore_signal && |
| 4233 | (data->need_update_smu7_dpm_table & |
| 4234 | DPMTABLE_OD_UPDATE_MCLK)) { |
| 4235 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 4236 | "Trying to freeze MCLK DPM when DPM is disabled" , |
| 4237 | ); |
| 4238 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
| 4239 | PPSMC_MSG_MCLKDPM_FreezeLevel, |
| 4240 | NULL), |
| 4241 | "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!" , |
| 4242 | return -EINVAL); |
| 4243 | } |
| 4244 | |
| 4245 | return 0; |
| 4246 | } |
| 4247 | |
| 4248 | static int smu7_populate_and_upload_sclk_mclk_dpm_levels( |
| 4249 | struct pp_hwmgr *hwmgr, const void *input) |
| 4250 | { |
| 4251 | int result = 0; |
| 4252 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4253 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
| 4254 | uint32_t count; |
| 4255 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 4256 | struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); |
| 4257 | struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); |
| 4258 | |
| 4259 | if (0 == data->need_update_smu7_dpm_table) |
| 4260 | return 0; |
| 4261 | |
| 4262 | if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { |
| 4263 | for (count = 0; count < dpm_table->sclk_table.count; count++) { |
| 4264 | dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; |
| 4265 | dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; |
| 4266 | } |
| 4267 | } |
| 4268 | |
| 4269 | if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { |
| 4270 | for (count = 0; count < dpm_table->mclk_table.count; count++) { |
| 4271 | dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; |
| 4272 | dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; |
| 4273 | } |
| 4274 | } |
| 4275 | |
| 4276 | if (data->need_update_smu7_dpm_table & |
| 4277 | (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { |
| 4278 | result = smum_populate_all_graphic_levels(hwmgr); |
| 4279 | PP_ASSERT_WITH_CODE((0 == result), |
| 4280 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!" , |
| 4281 | return result); |
| 4282 | } |
| 4283 | |
| 4284 | if (data->need_update_smu7_dpm_table & |
| 4285 | (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { |
| 4286 | /*populate MCLK dpm table to SMU7 */ |
| 4287 | result = smum_populate_all_memory_levels(hwmgr); |
| 4288 | PP_ASSERT_WITH_CODE((0 == result), |
| 4289 | "Failed to populate MCLK during PopulateNewDPMClocksStates Function!" , |
| 4290 | return result); |
| 4291 | } |
| 4292 | |
| 4293 | return result; |
| 4294 | } |
| 4295 | |
| 4296 | static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, |
| 4297 | struct smu7_single_dpm_table *dpm_table, |
| 4298 | uint32_t low_limit, uint32_t high_limit) |
| 4299 | { |
| 4300 | uint32_t i; |
| 4301 | |
| 4302 | /* force the trim if mclk_switching is disabled to prevent flicker */ |
| 4303 | bool force_trim = (low_limit == high_limit); |
| 4304 | for (i = 0; i < dpm_table->count; i++) { |
| 4305 | /*skip the trim if od is enabled*/ |
| 4306 | if ((!hwmgr->od_enabled || force_trim) |
| 4307 | && (dpm_table->dpm_levels[i].value < low_limit |
| 4308 | || dpm_table->dpm_levels[i].value > high_limit)) |
| 4309 | dpm_table->dpm_levels[i].enabled = false; |
| 4310 | else |
| 4311 | dpm_table->dpm_levels[i].enabled = true; |
| 4312 | } |
| 4313 | |
| 4314 | return 0; |
| 4315 | } |
| 4316 | |
| 4317 | static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, |
| 4318 | const struct smu7_power_state *smu7_ps) |
| 4319 | { |
| 4320 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4321 | uint32_t high_limit_count; |
| 4322 | |
| 4323 | PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), |
| 4324 | "power state did not have any performance level" , |
| 4325 | return -EINVAL); |
| 4326 | |
| 4327 | high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; |
| 4328 | |
| 4329 | smu7_trim_single_dpm_states(hwmgr, |
| 4330 | dpm_table: &(data->dpm_table.sclk_table), |
| 4331 | low_limit: smu7_ps->performance_levels[0].engine_clock, |
| 4332 | high_limit: smu7_ps->performance_levels[high_limit_count].engine_clock); |
| 4333 | |
| 4334 | smu7_trim_single_dpm_states(hwmgr, |
| 4335 | dpm_table: &(data->dpm_table.mclk_table), |
| 4336 | low_limit: smu7_ps->performance_levels[0].memory_clock, |
| 4337 | high_limit: smu7_ps->performance_levels[high_limit_count].memory_clock); |
| 4338 | |
| 4339 | return 0; |
| 4340 | } |
| 4341 | |
| 4342 | static int smu7_generate_dpm_level_enable_mask( |
| 4343 | struct pp_hwmgr *hwmgr, const void *input) |
| 4344 | { |
| 4345 | int result = 0; |
| 4346 | const struct phm_set_power_state_input *states = |
| 4347 | (const struct phm_set_power_state_input *)input; |
| 4348 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4349 | const struct smu7_power_state *smu7_ps = |
| 4350 | cast_const_phw_smu7_power_state(hw_ps: states->pnew_state); |
| 4351 | |
| 4352 | |
| 4353 | result = smu7_trim_dpm_states(hwmgr, smu7_ps); |
| 4354 | if (result) |
| 4355 | return result; |
| 4356 | |
| 4357 | data->dpm_level_enable_mask.sclk_dpm_enable_mask = |
| 4358 | phm_get_dpm_level_enable_mask_value(table: &data->dpm_table.sclk_table); |
| 4359 | data->dpm_level_enable_mask.mclk_dpm_enable_mask = |
| 4360 | phm_get_dpm_level_enable_mask_value(table: &data->dpm_table.mclk_table); |
| 4361 | data->dpm_level_enable_mask.pcie_dpm_enable_mask = |
| 4362 | phm_get_dpm_level_enable_mask_value(table: &data->dpm_table.pcie_speed_table); |
| 4363 | |
| 4364 | return 0; |
| 4365 | } |
| 4366 | |
| 4367 | static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 4368 | { |
| 4369 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4370 | |
| 4371 | if (0 == data->need_update_smu7_dpm_table) |
| 4372 | return 0; |
| 4373 | |
| 4374 | if ((0 == data->sclk_dpm_key_disabled) && |
| 4375 | (data->need_update_smu7_dpm_table & |
| 4376 | (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { |
| 4377 | |
| 4378 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 4379 | "Trying to Unfreeze SCLK DPM when DPM is disabled" , |
| 4380 | ); |
| 4381 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
| 4382 | PPSMC_MSG_SCLKDPM_UnfreezeLevel, |
| 4383 | NULL), |
| 4384 | "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!" , |
| 4385 | return -EINVAL); |
| 4386 | } |
| 4387 | |
| 4388 | if ((0 == data->mclk_dpm_key_disabled) && |
| 4389 | !data->mclk_ignore_signal && |
| 4390 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { |
| 4391 | |
| 4392 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 4393 | "Trying to Unfreeze MCLK DPM when DPM is disabled" , |
| 4394 | ); |
| 4395 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
| 4396 | PPSMC_MSG_MCLKDPM_UnfreezeLevel, |
| 4397 | NULL), |
| 4398 | "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!" , |
| 4399 | return -EINVAL); |
| 4400 | } |
| 4401 | |
| 4402 | data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; |
| 4403 | |
| 4404 | return 0; |
| 4405 | } |
| 4406 | |
| 4407 | static int smu7_notify_link_speed_change_after_state_change( |
| 4408 | struct pp_hwmgr *hwmgr, const void *input) |
| 4409 | { |
| 4410 | const struct phm_set_power_state_input *states = |
| 4411 | (const struct phm_set_power_state_input *)input; |
| 4412 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4413 | const struct smu7_power_state *smu7_ps = |
| 4414 | cast_const_phw_smu7_power_state(hw_ps: states->pnew_state); |
| 4415 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); |
| 4416 | uint8_t request; |
| 4417 | |
| 4418 | if (data->pspp_notify_required) { |
| 4419 | if (target_link_speed == PP_PCIEGen3) |
| 4420 | request = PCIE_PERF_REQ_GEN3; |
| 4421 | else if (target_link_speed == PP_PCIEGen2) |
| 4422 | request = PCIE_PERF_REQ_GEN2; |
| 4423 | else |
| 4424 | request = PCIE_PERF_REQ_GEN1; |
| 4425 | |
| 4426 | if (request == PCIE_PERF_REQ_GEN1 && |
| 4427 | smu7_get_current_pcie_speed(hwmgr) > 0) |
| 4428 | return 0; |
| 4429 | |
| 4430 | #ifdef CONFIG_ACPI |
| 4431 | if (amdgpu_acpi_pcie_performance_request(adev: hwmgr->adev, perf_req: request, advertise: false)) { |
| 4432 | if (PP_PCIEGen2 == target_link_speed) |
| 4433 | pr_info("PSPP request to switch to Gen2 from Gen3 Failed!" ); |
| 4434 | else |
| 4435 | pr_info("PSPP request to switch to Gen1 from Gen2 Failed!" ); |
| 4436 | } |
| 4437 | #endif |
| 4438 | } |
| 4439 | |
| 4440 | return 0; |
| 4441 | } |
| 4442 | |
| 4443 | static int smu7_notify_no_display(struct pp_hwmgr *hwmgr) |
| 4444 | { |
| 4445 | return (smum_send_msg_to_smc(hwmgr, msg: (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ? 0 : -EINVAL; |
| 4446 | } |
| 4447 | |
| 4448 | static int smu7_notify_has_display(struct pp_hwmgr *hwmgr) |
| 4449 | { |
| 4450 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4451 | |
| 4452 | if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { |
| 4453 | if (hwmgr->chip_id == CHIP_VEGAM) |
| 4454 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 4455 | msg: (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, parameter: data->frame_time_x2, |
| 4456 | NULL); |
| 4457 | else |
| 4458 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 4459 | msg: (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, parameter: data->frame_time_x2, |
| 4460 | NULL); |
| 4461 | data->last_sent_vbi_timeout = data->frame_time_x2; |
| 4462 | } |
| 4463 | |
| 4464 | return (smum_send_msg_to_smc(hwmgr, msg: (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL; |
| 4465 | } |
| 4466 | |
| 4467 | static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) |
| 4468 | { |
| 4469 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4470 | int result = 0; |
| 4471 | |
| 4472 | if (data->mclk_ignore_signal) |
| 4473 | result = smu7_notify_no_display(hwmgr); |
| 4474 | else |
| 4475 | result = smu7_notify_has_display(hwmgr); |
| 4476 | |
| 4477 | return result; |
| 4478 | } |
| 4479 | |
| 4480 | static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) |
| 4481 | { |
| 4482 | int tmp_result, result = 0; |
| 4483 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4484 | |
| 4485 | tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); |
| 4486 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4487 | "Failed to find DPM states clocks in DPM table!" , |
| 4488 | result = tmp_result); |
| 4489 | |
| 4490 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 4491 | c: PHM_PlatformCaps_PCIEPerformanceRequest)) { |
| 4492 | tmp_result = |
| 4493 | smu7_request_link_speed_change_before_state_change(hwmgr, input); |
| 4494 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4495 | "Failed to request link speed change before state change!" , |
| 4496 | result = tmp_result); |
| 4497 | } |
| 4498 | |
| 4499 | tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); |
| 4500 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4501 | "Failed to freeze SCLK MCLK DPM!" , result = tmp_result); |
| 4502 | |
| 4503 | tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); |
| 4504 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4505 | "Failed to populate and upload SCLK MCLK DPM levels!" , |
| 4506 | result = tmp_result); |
| 4507 | |
| 4508 | /* |
| 4509 | * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. |
| 4510 | * That effectively disables AVFS feature. |
| 4511 | */ |
| 4512 | if (hwmgr->hardcode_pp_table != NULL) |
| 4513 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; |
| 4514 | |
| 4515 | tmp_result = smu7_update_avfs(hwmgr); |
| 4516 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4517 | "Failed to update avfs voltages!" , |
| 4518 | result = tmp_result); |
| 4519 | |
| 4520 | tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); |
| 4521 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4522 | "Failed to generate DPM level enabled mask!" , |
| 4523 | result = tmp_result); |
| 4524 | |
| 4525 | tmp_result = smum_update_sclk_threshold(hwmgr); |
| 4526 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4527 | "Failed to update SCLK threshold!" , |
| 4528 | result = tmp_result); |
| 4529 | |
| 4530 | tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); |
| 4531 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4532 | "Failed to unfreeze SCLK MCLK DPM!" , |
| 4533 | result = tmp_result); |
| 4534 | |
| 4535 | tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); |
| 4536 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4537 | "Failed to upload DPM level enabled mask!" , |
| 4538 | result = tmp_result); |
| 4539 | |
| 4540 | tmp_result = smu7_notify_smc_display(hwmgr); |
| 4541 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4542 | "Failed to notify smc display settings!" , |
| 4543 | result = tmp_result); |
| 4544 | |
| 4545 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 4546 | c: PHM_PlatformCaps_PCIEPerformanceRequest)) { |
| 4547 | tmp_result = |
| 4548 | smu7_notify_link_speed_change_after_state_change(hwmgr, input); |
| 4549 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4550 | "Failed to notify link speed change after state change!" , |
| 4551 | result = tmp_result); |
| 4552 | } |
| 4553 | data->apply_optimized_settings = false; |
| 4554 | return result; |
| 4555 | } |
| 4556 | |
| 4557 | static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) |
| 4558 | { |
| 4559 | hwmgr->thermal_controller. |
| 4560 | advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; |
| 4561 | |
| 4562 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
| 4563 | PPSMC_MSG_SetFanPwmMax, parameter: us_max_fan_pwm, |
| 4564 | NULL); |
| 4565 | } |
| 4566 | |
| 4567 | static int |
| 4568 | smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) |
| 4569 | { |
| 4570 | return 0; |
| 4571 | } |
| 4572 | |
| 4573 | /** |
| 4574 | * smu7_program_display_gap - Programs the display gap |
| 4575 | * |
| 4576 | * @hwmgr: the address of the powerplay hardware manager. |
| 4577 | * Return: always OK |
| 4578 | */ |
| 4579 | static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
| 4580 | { |
| 4581 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4582 | uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); |
| 4583 | uint32_t display_gap2; |
| 4584 | uint32_t pre_vbi_time_in_us; |
| 4585 | uint32_t frame_time_in_us; |
| 4586 | uint32_t ref_clock, refresh_rate; |
| 4587 | |
| 4588 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); |
| 4589 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); |
| 4590 | |
| 4591 | ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); |
| 4592 | refresh_rate = hwmgr->display_config->vrefresh; |
| 4593 | |
| 4594 | if (0 == refresh_rate) |
| 4595 | refresh_rate = 60; |
| 4596 | |
| 4597 | frame_time_in_us = 1000000 / refresh_rate; |
| 4598 | |
| 4599 | pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; |
| 4600 | |
| 4601 | data->frame_time_x2 = frame_time_in_us * 2 / 100; |
| 4602 | |
| 4603 | if (data->frame_time_x2 < 280) { |
| 4604 | pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n" , __func__, data->frame_time_x2); |
| 4605 | data->frame_time_x2 = 280; |
| 4606 | } |
| 4607 | |
| 4608 | display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); |
| 4609 | |
| 4610 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); |
| 4611 | |
| 4612 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 4613 | data->soft_regs_start + smum_get_offsetof(hwmgr, |
| 4614 | SMU_SoftRegisters, |
| 4615 | PreVBlankGap), 0x64); |
| 4616 | |
| 4617 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 4618 | data->soft_regs_start + smum_get_offsetof(hwmgr, |
| 4619 | SMU_SoftRegisters, |
| 4620 | VBlankTimeout), |
| 4621 | (frame_time_in_us - pre_vbi_time_in_us)); |
| 4622 | |
| 4623 | return 0; |
| 4624 | } |
| 4625 | |
| 4626 | static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) |
| 4627 | { |
| 4628 | return smu7_program_display_gap(hwmgr); |
| 4629 | } |
| 4630 | |
| 4631 | /** |
| 4632 | * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM |
| 4633 | * |
| 4634 | * @hwmgr: the address of the powerplay hardware manager. |
| 4635 | * @us_max_fan_rpm: max operating fan RPM value. |
| 4636 | * Return: The response that came from the SMC. |
| 4637 | */ |
| 4638 | static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) |
| 4639 | { |
| 4640 | hwmgr->thermal_controller. |
| 4641 | advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; |
| 4642 | |
| 4643 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
| 4644 | PPSMC_MSG_SetFanRpmMax, parameter: us_max_fan_rpm, |
| 4645 | NULL); |
| 4646 | } |
| 4647 | |
| 4648 | static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { |
| 4649 | .process = phm_irq_process, |
| 4650 | }; |
| 4651 | |
| 4652 | static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) |
| 4653 | { |
| 4654 | struct amdgpu_irq_src *source = |
| 4655 | kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); |
| 4656 | |
| 4657 | if (!source) |
| 4658 | return -ENOMEM; |
| 4659 | |
| 4660 | source->funcs = &smu7_irq_funcs; |
| 4661 | |
| 4662 | amdgpu_irq_add_id(adev: (struct amdgpu_device *)(hwmgr->adev), |
| 4663 | AMDGPU_IRQ_CLIENTID_LEGACY, |
| 4664 | VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, |
| 4665 | source); |
| 4666 | amdgpu_irq_add_id(adev: (struct amdgpu_device *)(hwmgr->adev), |
| 4667 | AMDGPU_IRQ_CLIENTID_LEGACY, |
| 4668 | VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, |
| 4669 | source); |
| 4670 | |
| 4671 | /* Register CTF(GPIO_19) interrupt */ |
| 4672 | amdgpu_irq_add_id(adev: (struct amdgpu_device *)(hwmgr->adev), |
| 4673 | AMDGPU_IRQ_CLIENTID_LEGACY, |
| 4674 | VISLANDS30_IV_SRCID_GPIO_19, |
| 4675 | source); |
| 4676 | |
| 4677 | return 0; |
| 4678 | } |
| 4679 | |
| 4680 | static bool |
| 4681 | smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) |
| 4682 | { |
| 4683 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4684 | bool is_update_required = false; |
| 4685 | |
| 4686 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
| 4687 | is_update_required = true; |
| 4688 | |
| 4689 | if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh) |
| 4690 | is_update_required = true; |
| 4691 | |
| 4692 | if (hwmgr->chip_id >= CHIP_POLARIS10 && |
| 4693 | hwmgr->chip_id <= CHIP_VEGAM && |
| 4694 | data->last_sent_vbi_timeout != data->frame_time_x2) |
| 4695 | is_update_required = true; |
| 4696 | |
| 4697 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, c: PHM_PlatformCaps_SclkDeepSleep)) { |
| 4698 | if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && |
| 4699 | (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || |
| 4700 | hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) |
| 4701 | is_update_required = true; |
| 4702 | } |
| 4703 | return is_update_required; |
| 4704 | } |
| 4705 | |
| 4706 | static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, |
| 4707 | const struct smu7_performance_level *pl2) |
| 4708 | { |
| 4709 | return ((pl1->memory_clock == pl2->memory_clock) && |
| 4710 | (pl1->engine_clock == pl2->engine_clock) && |
| 4711 | (pl1->pcie_gen == pl2->pcie_gen) && |
| 4712 | (pl1->pcie_lane == pl2->pcie_lane)); |
| 4713 | } |
| 4714 | |
| 4715 | static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, |
| 4716 | const struct pp_hw_power_state *pstate1, |
| 4717 | const struct pp_hw_power_state *pstate2, bool *equal) |
| 4718 | { |
| 4719 | const struct smu7_power_state *psa; |
| 4720 | const struct smu7_power_state *psb; |
| 4721 | int i; |
| 4722 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4723 | |
| 4724 | if (pstate1 == NULL || pstate2 == NULL || equal == NULL) |
| 4725 | return -EINVAL; |
| 4726 | |
| 4727 | psa = cast_const_phw_smu7_power_state(hw_ps: pstate1); |
| 4728 | psb = cast_const_phw_smu7_power_state(hw_ps: pstate2); |
| 4729 | /* If the two states don't even have the same number of performance levels they cannot be the same state. */ |
| 4730 | if (psa->performance_level_count != psb->performance_level_count) { |
| 4731 | *equal = false; |
| 4732 | return 0; |
| 4733 | } |
| 4734 | |
| 4735 | for (i = 0; i < psa->performance_level_count; i++) { |
| 4736 | if (!smu7_are_power_levels_equal(pl1: &(psa->performance_levels[i]), pl2: &(psb->performance_levels[i]))) { |
| 4737 | /* If we have found even one performance level pair that is different the states are different. */ |
| 4738 | *equal = false; |
| 4739 | return 0; |
| 4740 | } |
| 4741 | } |
| 4742 | |
| 4743 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ |
| 4744 | *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); |
| 4745 | *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); |
| 4746 | *equal &= (psa->sclk_threshold == psb->sclk_threshold); |
| 4747 | /* For OD call, set value based on flag */ |
| 4748 | *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | |
| 4749 | DPMTABLE_OD_UPDATE_MCLK | |
| 4750 | DPMTABLE_OD_UPDATE_VDDC)); |
| 4751 | |
| 4752 | return 0; |
| 4753 | } |
| 4754 | |
| 4755 | static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) |
| 4756 | { |
| 4757 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4758 | |
| 4759 | uint32_t tmp; |
| 4760 | |
| 4761 | /* Read MC indirect register offset 0x9F bits [3:0] to see |
| 4762 | * if VBIOS has already loaded a full version of MC ucode |
| 4763 | * or not. |
| 4764 | */ |
| 4765 | |
| 4766 | smu7_get_mc_microcode_version(hwmgr); |
| 4767 | |
| 4768 | data->need_long_memory_training = false; |
| 4769 | |
| 4770 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, |
| 4771 | ixMC_IO_DEBUG_UP_13); |
| 4772 | tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); |
| 4773 | |
| 4774 | if (tmp & (1 << 23)) { |
| 4775 | data->mem_latency_high = MEM_LATENCY_HIGH; |
| 4776 | data->mem_latency_low = MEM_LATENCY_LOW; |
| 4777 | if ((hwmgr->chip_id == CHIP_POLARIS10) || |
| 4778 | (hwmgr->chip_id == CHIP_POLARIS11) || |
| 4779 | (hwmgr->chip_id == CHIP_POLARIS12)) |
| 4780 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL); |
| 4781 | } else { |
| 4782 | data->mem_latency_high = 330; |
| 4783 | data->mem_latency_low = 330; |
| 4784 | if ((hwmgr->chip_id == CHIP_POLARIS10) || |
| 4785 | (hwmgr->chip_id == CHIP_POLARIS11) || |
| 4786 | (hwmgr->chip_id == CHIP_POLARIS12)) |
| 4787 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL); |
| 4788 | } |
| 4789 | |
| 4790 | return 0; |
| 4791 | } |
| 4792 | |
| 4793 | static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) |
| 4794 | { |
| 4795 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4796 | |
| 4797 | data->clock_registers.vCG_SPLL_FUNC_CNTL = |
| 4798 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); |
| 4799 | data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = |
| 4800 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); |
| 4801 | data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = |
| 4802 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); |
| 4803 | data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = |
| 4804 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); |
| 4805 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = |
| 4806 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); |
| 4807 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = |
| 4808 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); |
| 4809 | data->clock_registers.vDLL_CNTL = |
| 4810 | cgs_read_register(hwmgr->device, mmDLL_CNTL); |
| 4811 | data->clock_registers.vMCLK_PWRMGT_CNTL = |
| 4812 | cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); |
| 4813 | data->clock_registers.vMPLL_AD_FUNC_CNTL = |
| 4814 | cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); |
| 4815 | data->clock_registers.vMPLL_DQ_FUNC_CNTL = |
| 4816 | cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); |
| 4817 | data->clock_registers.vMPLL_FUNC_CNTL = |
| 4818 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); |
| 4819 | data->clock_registers.vMPLL_FUNC_CNTL_1 = |
| 4820 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); |
| 4821 | data->clock_registers.vMPLL_FUNC_CNTL_2 = |
| 4822 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); |
| 4823 | data->clock_registers.vMPLL_SS1 = |
| 4824 | cgs_read_register(hwmgr->device, mmMPLL_SS1); |
| 4825 | data->clock_registers.vMPLL_SS2 = |
| 4826 | cgs_read_register(hwmgr->device, mmMPLL_SS2); |
| 4827 | return 0; |
| 4828 | |
| 4829 | } |
| 4830 | |
| 4831 | /** |
| 4832 | * smu7_get_memory_type - Find out if memory is GDDR5. |
| 4833 | * |
| 4834 | * @hwmgr: the address of the powerplay hardware manager. |
| 4835 | * Return: always 0 |
| 4836 | */ |
| 4837 | static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) |
| 4838 | { |
| 4839 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4840 | struct amdgpu_device *adev = hwmgr->adev; |
| 4841 | |
| 4842 | data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); |
| 4843 | |
| 4844 | return 0; |
| 4845 | } |
| 4846 | |
| 4847 | /** |
| 4848 | * smu7_enable_acpi_power_management - Enables Dynamic Power Management by SMC |
| 4849 | * |
| 4850 | * @hwmgr: the address of the powerplay hardware manager. |
| 4851 | * Return: always 0 |
| 4852 | */ |
| 4853 | static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) |
| 4854 | { |
| 4855 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 4856 | GENERAL_PWRMGT, STATIC_PM_EN, 1); |
| 4857 | |
| 4858 | return 0; |
| 4859 | } |
| 4860 | |
| 4861 | /** |
| 4862 | * smu7_init_power_gate_state - Initialize PowerGating States for different engines |
| 4863 | * |
| 4864 | * @hwmgr: the address of the powerplay hardware manager. |
| 4865 | * Return: always 0 |
| 4866 | */ |
| 4867 | static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) |
| 4868 | { |
| 4869 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4870 | |
| 4871 | data->uvd_power_gated = false; |
| 4872 | data->vce_power_gated = false; |
| 4873 | |
| 4874 | return 0; |
| 4875 | } |
| 4876 | |
| 4877 | static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) |
| 4878 | { |
| 4879 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4880 | |
| 4881 | data->low_sclk_interrupt_threshold = 0; |
| 4882 | return 0; |
| 4883 | } |
| 4884 | |
| 4885 | static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) |
| 4886 | { |
| 4887 | int tmp_result, result = 0; |
| 4888 | |
| 4889 | smu7_check_mc_firmware(hwmgr); |
| 4890 | |
| 4891 | tmp_result = smu7_read_clock_registers(hwmgr); |
| 4892 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4893 | "Failed to read clock registers!" , result = tmp_result); |
| 4894 | |
| 4895 | tmp_result = smu7_get_memory_type(hwmgr); |
| 4896 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4897 | "Failed to get memory type!" , result = tmp_result); |
| 4898 | |
| 4899 | tmp_result = smu7_enable_acpi_power_management(hwmgr); |
| 4900 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4901 | "Failed to enable ACPI power management!" , result = tmp_result); |
| 4902 | |
| 4903 | tmp_result = smu7_init_power_gate_state(hwmgr); |
| 4904 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4905 | "Failed to init power gate state!" , result = tmp_result); |
| 4906 | |
| 4907 | tmp_result = smu7_get_mc_microcode_version(hwmgr); |
| 4908 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4909 | "Failed to get MC microcode version!" , result = tmp_result); |
| 4910 | |
| 4911 | tmp_result = smu7_init_sclk_threshold(hwmgr); |
| 4912 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4913 | "Failed to init sclk threshold!" , result = tmp_result); |
| 4914 | |
| 4915 | return result; |
| 4916 | } |
| 4917 | |
| 4918 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, |
| 4919 | enum pp_clock_type type, uint32_t mask) |
| 4920 | { |
| 4921 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4922 | |
| 4923 | if (mask == 0) |
| 4924 | return -EINVAL; |
| 4925 | |
| 4926 | switch (type) { |
| 4927 | case PP_SCLK: |
| 4928 | if (!data->sclk_dpm_key_disabled) |
| 4929 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 4930 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 4931 | parameter: data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask, |
| 4932 | NULL); |
| 4933 | break; |
| 4934 | case PP_MCLK: |
| 4935 | if (!data->mclk_dpm_key_disabled) |
| 4936 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 4937 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 4938 | parameter: data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask, |
| 4939 | NULL); |
| 4940 | break; |
| 4941 | case PP_PCIE: |
| 4942 | { |
| 4943 | uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; |
| 4944 | |
| 4945 | if (!data->pcie_dpm_key_disabled) { |
| 4946 | if (fls(x: tmp) != ffs(tmp)) |
| 4947 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel, |
| 4948 | NULL); |
| 4949 | else |
| 4950 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 4951 | PPSMC_MSG_PCIeDPM_ForceLevel, |
| 4952 | parameter: fls(x: tmp) - 1, |
| 4953 | NULL); |
| 4954 | } |
| 4955 | break; |
| 4956 | } |
| 4957 | default: |
| 4958 | break; |
| 4959 | } |
| 4960 | |
| 4961 | return 0; |
| 4962 | } |
| 4963 | |
| 4964 | static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, |
| 4965 | enum pp_clock_type type, char *buf) |
| 4966 | { |
| 4967 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4968 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 4969 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 4970 | struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); |
| 4971 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 4972 | struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); |
| 4973 | struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); |
| 4974 | int size = 0, ret = 0; |
| 4975 | uint32_t i, now, clock, pcie_speed; |
| 4976 | |
| 4977 | switch (type) { |
| 4978 | case PP_SCLK: |
| 4979 | ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, resp: &clock); |
| 4980 | if (ret) |
| 4981 | return ret; |
| 4982 | for (i = 0; i < sclk_table->count; i++) { |
| 4983 | if (clock > sclk_table->dpm_levels[i].value) |
| 4984 | continue; |
| 4985 | break; |
| 4986 | } |
| 4987 | now = i; |
| 4988 | |
| 4989 | for (i = 0; i < sclk_table->count; i++) |
| 4990 | size += sprintf(buf: buf + size, fmt: "%d: %uMhz %s\n" , |
| 4991 | i, sclk_table->dpm_levels[i].value / 100, |
| 4992 | (i == now) ? "*" : "" ); |
| 4993 | break; |
| 4994 | case PP_MCLK: |
| 4995 | ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, resp: &clock); |
| 4996 | if (ret) |
| 4997 | return ret; |
| 4998 | for (i = 0; i < mclk_table->count; i++) { |
| 4999 | if (clock > mclk_table->dpm_levels[i].value) |
| 5000 | continue; |
| 5001 | break; |
| 5002 | } |
| 5003 | now = i; |
| 5004 | |
| 5005 | for (i = 0; i < mclk_table->count; i++) |
| 5006 | size += sprintf(buf: buf + size, fmt: "%d: %uMhz %s\n" , |
| 5007 | i, mclk_table->dpm_levels[i].value / 100, |
| 5008 | (i == now) ? "*" : "" ); |
| 5009 | break; |
| 5010 | case PP_PCIE: |
| 5011 | pcie_speed = smu7_get_current_pcie_speed(hwmgr); |
| 5012 | for (i = 0; i < pcie_table->count; i++) { |
| 5013 | if (pcie_speed != pcie_table->dpm_levels[i].value) |
| 5014 | continue; |
| 5015 | break; |
| 5016 | } |
| 5017 | now = i; |
| 5018 | |
| 5019 | for (i = 0; i < pcie_table->count; i++) |
| 5020 | size += sprintf(buf: buf + size, fmt: "%d: %s %s\n" , i, |
| 5021 | (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : |
| 5022 | (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : |
| 5023 | (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "" , |
| 5024 | (i == now) ? "*" : "" ); |
| 5025 | break; |
| 5026 | case OD_SCLK: |
| 5027 | if (hwmgr->od_enabled) { |
| 5028 | size += sprintf(buf: buf + size, fmt: "%s:\n" , "OD_SCLK" ); |
| 5029 | for (i = 0; i < odn_sclk_table->num_of_pl; i++) |
| 5030 | size += sprintf(buf: buf + size, fmt: "%d: %10uMHz %10umV\n" , |
| 5031 | i, odn_sclk_table->entries[i].clock/100, |
| 5032 | odn_sclk_table->entries[i].vddc); |
| 5033 | } |
| 5034 | break; |
| 5035 | case OD_MCLK: |
| 5036 | if (hwmgr->od_enabled) { |
| 5037 | size += sprintf(buf: buf + size, fmt: "%s:\n" , "OD_MCLK" ); |
| 5038 | for (i = 0; i < odn_mclk_table->num_of_pl; i++) |
| 5039 | size += sprintf(buf: buf + size, fmt: "%d: %10uMHz %10umV\n" , |
| 5040 | i, odn_mclk_table->entries[i].clock/100, |
| 5041 | odn_mclk_table->entries[i].vddc); |
| 5042 | } |
| 5043 | break; |
| 5044 | case OD_RANGE: |
| 5045 | if (hwmgr->od_enabled) { |
| 5046 | size += sprintf(buf: buf + size, fmt: "%s:\n" , "OD_RANGE" ); |
| 5047 | size += sprintf(buf: buf + size, fmt: "SCLK: %7uMHz %10uMHz\n" , |
| 5048 | data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, |
| 5049 | hwmgr->platform_descriptor.overdriveLimit.engineClock/100); |
| 5050 | size += sprintf(buf: buf + size, fmt: "MCLK: %7uMHz %10uMHz\n" , |
| 5051 | data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, |
| 5052 | hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); |
| 5053 | size += sprintf(buf: buf + size, fmt: "VDDC: %7umV %11umV\n" , |
| 5054 | data->odn_dpm_table.min_vddc, |
| 5055 | data->odn_dpm_table.max_vddc); |
| 5056 | } |
| 5057 | break; |
| 5058 | default: |
| 5059 | break; |
| 5060 | } |
| 5061 | return size; |
| 5062 | } |
| 5063 | |
| 5064 | static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) |
| 5065 | { |
| 5066 | switch (mode) { |
| 5067 | case AMD_FAN_CTRL_NONE: |
| 5068 | smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, speed: 255); |
| 5069 | break; |
| 5070 | case AMD_FAN_CTRL_MANUAL: |
| 5071 | if (phm_cap_enabled(caps: hwmgr->platform_descriptor.platformCaps, |
| 5072 | c: PHM_PlatformCaps_MicrocodeFanControl)) |
| 5073 | smu7_fan_ctrl_stop_smc_fan_control(hwmgr); |
| 5074 | break; |
| 5075 | case AMD_FAN_CTRL_AUTO: |
| 5076 | if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) |
| 5077 | smu7_fan_ctrl_start_smc_fan_control(hwmgr); |
| 5078 | break; |
| 5079 | default: |
| 5080 | break; |
| 5081 | } |
| 5082 | } |
| 5083 | |
| 5084 | static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) |
| 5085 | { |
| 5086 | return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; |
| 5087 | } |
| 5088 | |
| 5089 | static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) |
| 5090 | { |
| 5091 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5092 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 5093 | struct smu7_single_dpm_table *golden_sclk_table = |
| 5094 | &(data->golden_dpm_table.sclk_table); |
| 5095 | int value = sclk_table->dpm_levels[sclk_table->count - 1].value; |
| 5096 | int golden_value = golden_sclk_table->dpm_levels |
| 5097 | [golden_sclk_table->count - 1].value; |
| 5098 | |
| 5099 | value -= golden_value; |
| 5100 | value = DIV_ROUND_UP(value * 100, golden_value); |
| 5101 | |
| 5102 | return value; |
| 5103 | } |
| 5104 | |
| 5105 | static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) |
| 5106 | { |
| 5107 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5108 | struct smu7_single_dpm_table *golden_sclk_table = |
| 5109 | &(data->golden_dpm_table.sclk_table); |
| 5110 | struct pp_power_state *ps; |
| 5111 | struct smu7_power_state *smu7_ps; |
| 5112 | |
| 5113 | if (value > 20) |
| 5114 | value = 20; |
| 5115 | |
| 5116 | ps = hwmgr->request_ps; |
| 5117 | |
| 5118 | if (ps == NULL) |
| 5119 | return -EINVAL; |
| 5120 | |
| 5121 | smu7_ps = cast_phw_smu7_power_state(hw_ps: &ps->hardware); |
| 5122 | |
| 5123 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = |
| 5124 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * |
| 5125 | value / 100 + |
| 5126 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; |
| 5127 | |
| 5128 | return 0; |
| 5129 | } |
| 5130 | |
| 5131 | static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) |
| 5132 | { |
| 5133 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5134 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 5135 | struct smu7_single_dpm_table *golden_mclk_table = |
| 5136 | &(data->golden_dpm_table.mclk_table); |
| 5137 | int value = mclk_table->dpm_levels[mclk_table->count - 1].value; |
| 5138 | int golden_value = golden_mclk_table->dpm_levels |
| 5139 | [golden_mclk_table->count - 1].value; |
| 5140 | |
| 5141 | value -= golden_value; |
| 5142 | value = DIV_ROUND_UP(value * 100, golden_value); |
| 5143 | |
| 5144 | return value; |
| 5145 | } |
| 5146 | |
| 5147 | static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) |
| 5148 | { |
| 5149 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5150 | struct smu7_single_dpm_table *golden_mclk_table = |
| 5151 | &(data->golden_dpm_table.mclk_table); |
| 5152 | struct pp_power_state *ps; |
| 5153 | struct smu7_power_state *smu7_ps; |
| 5154 | |
| 5155 | if (value > 20) |
| 5156 | value = 20; |
| 5157 | |
| 5158 | ps = hwmgr->request_ps; |
| 5159 | |
| 5160 | if (ps == NULL) |
| 5161 | return -EINVAL; |
| 5162 | |
| 5163 | smu7_ps = cast_phw_smu7_power_state(hw_ps: &ps->hardware); |
| 5164 | |
| 5165 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = |
| 5166 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * |
| 5167 | value / 100 + |
| 5168 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; |
| 5169 | |
| 5170 | return 0; |
| 5171 | } |
| 5172 | |
| 5173 | |
| 5174 | static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) |
| 5175 | { |
| 5176 | struct phm_ppt_v1_information *table_info = |
| 5177 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 5178 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; |
| 5179 | struct phm_clock_voltage_dependency_table *sclk_table; |
| 5180 | int i; |
| 5181 | |
| 5182 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 5183 | if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) |
| 5184 | return -EINVAL; |
| 5185 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 5186 | for (i = 0; i < dep_sclk_table->count; i++) |
| 5187 | clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; |
| 5188 | clocks->count = dep_sclk_table->count; |
| 5189 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 5190 | sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; |
| 5191 | for (i = 0; i < sclk_table->count; i++) |
| 5192 | clocks->clock[i] = sclk_table->entries[i].clk * 10; |
| 5193 | clocks->count = sclk_table->count; |
| 5194 | } |
| 5195 | |
| 5196 | return 0; |
| 5197 | } |
| 5198 | |
| 5199 | static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) |
| 5200 | { |
| 5201 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5202 | |
| 5203 | if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) |
| 5204 | return data->mem_latency_high; |
| 5205 | else if (clk >= MEM_FREQ_HIGH_LATENCY) |
| 5206 | return data->mem_latency_low; |
| 5207 | else |
| 5208 | return MEM_LATENCY_ERR; |
| 5209 | } |
| 5210 | |
| 5211 | static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) |
| 5212 | { |
| 5213 | struct phm_ppt_v1_information *table_info = |
| 5214 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 5215 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
| 5216 | int i; |
| 5217 | struct phm_clock_voltage_dependency_table *mclk_table; |
| 5218 | |
| 5219 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 5220 | if (table_info == NULL) |
| 5221 | return -EINVAL; |
| 5222 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 5223 | for (i = 0; i < dep_mclk_table->count; i++) { |
| 5224 | clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; |
| 5225 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, |
| 5226 | clk: dep_mclk_table->entries[i].clk); |
| 5227 | } |
| 5228 | clocks->count = dep_mclk_table->count; |
| 5229 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 5230 | mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; |
| 5231 | for (i = 0; i < mclk_table->count; i++) |
| 5232 | clocks->clock[i] = mclk_table->entries[i].clk * 10; |
| 5233 | clocks->count = mclk_table->count; |
| 5234 | } |
| 5235 | return 0; |
| 5236 | } |
| 5237 | |
| 5238 | static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, |
| 5239 | struct amd_pp_clocks *clocks) |
| 5240 | { |
| 5241 | switch (type) { |
| 5242 | case amd_pp_sys_clock: |
| 5243 | smu7_get_sclks(hwmgr, clocks); |
| 5244 | break; |
| 5245 | case amd_pp_mem_clock: |
| 5246 | smu7_get_mclks(hwmgr, clocks); |
| 5247 | break; |
| 5248 | default: |
| 5249 | return -EINVAL; |
| 5250 | } |
| 5251 | |
| 5252 | return 0; |
| 5253 | } |
| 5254 | |
| 5255 | static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr, |
| 5256 | struct pp_clock_levels_with_latency *clocks) |
| 5257 | { |
| 5258 | struct phm_ppt_v1_information *table_info = |
| 5259 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 5260 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = |
| 5261 | table_info->vdd_dep_on_sclk; |
| 5262 | int i; |
| 5263 | |
| 5264 | clocks->num_levels = 0; |
| 5265 | for (i = 0; i < dep_sclk_table->count; i++) { |
| 5266 | if (dep_sclk_table->entries[i].clk) { |
| 5267 | clocks->data[clocks->num_levels].clocks_in_khz = |
| 5268 | dep_sclk_table->entries[i].clk * 10; |
| 5269 | clocks->num_levels++; |
| 5270 | } |
| 5271 | } |
| 5272 | |
| 5273 | return 0; |
| 5274 | } |
| 5275 | |
| 5276 | static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr, |
| 5277 | struct pp_clock_levels_with_latency *clocks) |
| 5278 | { |
| 5279 | struct phm_ppt_v1_information *table_info = |
| 5280 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 5281 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = |
| 5282 | table_info->vdd_dep_on_mclk; |
| 5283 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5284 | int i; |
| 5285 | |
| 5286 | clocks->num_levels = 0; |
| 5287 | data->mclk_latency_table.count = 0; |
| 5288 | for (i = 0; i < dep_mclk_table->count; i++) { |
| 5289 | if (dep_mclk_table->entries[i].clk) { |
| 5290 | clocks->data[clocks->num_levels].clocks_in_khz = |
| 5291 | dep_mclk_table->entries[i].clk * 10; |
| 5292 | data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency = |
| 5293 | dep_mclk_table->entries[i].clk; |
| 5294 | clocks->data[clocks->num_levels].latency_in_us = |
| 5295 | data->mclk_latency_table.entries[data->mclk_latency_table.count].latency = |
| 5296 | smu7_get_mem_latency(hwmgr, clk: dep_mclk_table->entries[i].clk); |
| 5297 | clocks->num_levels++; |
| 5298 | data->mclk_latency_table.count++; |
| 5299 | } |
| 5300 | } |
| 5301 | |
| 5302 | return 0; |
| 5303 | } |
| 5304 | |
| 5305 | static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, |
| 5306 | enum amd_pp_clock_type type, |
| 5307 | struct pp_clock_levels_with_latency *clocks) |
| 5308 | { |
| 5309 | if (!(hwmgr->chip_id >= CHIP_POLARIS10 && |
| 5310 | hwmgr->chip_id <= CHIP_VEGAM)) |
| 5311 | return -EINVAL; |
| 5312 | |
| 5313 | switch (type) { |
| 5314 | case amd_pp_sys_clock: |
| 5315 | smu7_get_sclks_with_latency(hwmgr, clocks); |
| 5316 | break; |
| 5317 | case amd_pp_mem_clock: |
| 5318 | smu7_get_mclks_with_latency(hwmgr, clocks); |
| 5319 | break; |
| 5320 | default: |
| 5321 | return -EINVAL; |
| 5322 | } |
| 5323 | |
| 5324 | return 0; |
| 5325 | } |
| 5326 | |
| 5327 | static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, |
| 5328 | void *clock_range) |
| 5329 | { |
| 5330 | struct phm_ppt_v1_information *table_info = |
| 5331 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 5332 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = |
| 5333 | table_info->vdd_dep_on_mclk; |
| 5334 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = |
| 5335 | table_info->vdd_dep_on_sclk; |
| 5336 | struct polaris10_smumgr *smu_data = |
| 5337 | (struct polaris10_smumgr *)(hwmgr->smu_backend); |
| 5338 | SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); |
| 5339 | struct dm_pp_wm_sets_with_clock_ranges *watermarks = |
| 5340 | (struct dm_pp_wm_sets_with_clock_ranges *)clock_range; |
| 5341 | uint32_t i, j, k; |
| 5342 | bool valid_entry; |
| 5343 | |
| 5344 | if (!(hwmgr->chip_id >= CHIP_POLARIS10 && |
| 5345 | hwmgr->chip_id <= CHIP_VEGAM)) |
| 5346 | return -EINVAL; |
| 5347 | |
| 5348 | for (i = 0; i < dep_mclk_table->count; i++) { |
| 5349 | for (j = 0; j < dep_sclk_table->count; j++) { |
| 5350 | valid_entry = false; |
| 5351 | for (k = 0; k < watermarks->num_wm_sets; k++) { |
| 5352 | if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 && |
| 5353 | dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 && |
| 5354 | dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 && |
| 5355 | dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) { |
| 5356 | valid_entry = true; |
| 5357 | table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id; |
| 5358 | break; |
| 5359 | } |
| 5360 | } |
| 5361 | PP_ASSERT_WITH_CODE(valid_entry, |
| 5362 | "Clock is not in range of specified clock range for watermark from DAL! Using highest water mark set." , |
| 5363 | table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id); |
| 5364 | } |
| 5365 | } |
| 5366 | |
| 5367 | return smu7_copy_bytes_to_smc(hwmgr, |
| 5368 | smc_start_address: smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark), |
| 5369 | src: (uint8_t *)table->DisplayWatermark, |
| 5370 | byte_count: sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS, |
| 5371 | SMC_RAM_END); |
| 5372 | } |
| 5373 | |
| 5374 | static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, |
| 5375 | uint32_t virtual_addr_low, |
| 5376 | uint32_t virtual_addr_hi, |
| 5377 | uint32_t mc_addr_low, |
| 5378 | uint32_t mc_addr_hi, |
| 5379 | uint32_t size) |
| 5380 | { |
| 5381 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5382 | |
| 5383 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 5384 | data->soft_regs_start + |
| 5385 | smum_get_offsetof(hwmgr, |
| 5386 | SMU_SoftRegisters, DRAM_LOG_ADDR_H), |
| 5387 | mc_addr_hi); |
| 5388 | |
| 5389 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 5390 | data->soft_regs_start + |
| 5391 | smum_get_offsetof(hwmgr, |
| 5392 | SMU_SoftRegisters, DRAM_LOG_ADDR_L), |
| 5393 | mc_addr_low); |
| 5394 | |
| 5395 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 5396 | data->soft_regs_start + |
| 5397 | smum_get_offsetof(hwmgr, |
| 5398 | SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H), |
| 5399 | virtual_addr_hi); |
| 5400 | |
| 5401 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 5402 | data->soft_regs_start + |
| 5403 | smum_get_offsetof(hwmgr, |
| 5404 | SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L), |
| 5405 | virtual_addr_low); |
| 5406 | |
| 5407 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 5408 | data->soft_regs_start + |
| 5409 | smum_get_offsetof(hwmgr, |
| 5410 | SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE), |
| 5411 | size); |
| 5412 | return 0; |
| 5413 | } |
| 5414 | |
| 5415 | static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr, |
| 5416 | struct amd_pp_simple_clock_info *clocks) |
| 5417 | { |
| 5418 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5419 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 5420 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 5421 | |
| 5422 | if (clocks == NULL) |
| 5423 | return -EINVAL; |
| 5424 | |
| 5425 | clocks->memory_max_clock = mclk_table->count > 1 ? |
| 5426 | mclk_table->dpm_levels[mclk_table->count-1].value : |
| 5427 | mclk_table->dpm_levels[0].value; |
| 5428 | clocks->engine_max_clock = sclk_table->count > 1 ? |
| 5429 | sclk_table->dpm_levels[sclk_table->count-1].value : |
| 5430 | sclk_table->dpm_levels[0].value; |
| 5431 | return 0; |
| 5432 | } |
| 5433 | |
| 5434 | static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, |
| 5435 | struct PP_TemperatureRange *thermal_data) |
| 5436 | { |
| 5437 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5438 | struct phm_ppt_v1_information *table_info = |
| 5439 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 5440 | |
| 5441 | memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); |
| 5442 | |
| 5443 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 5444 | thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * |
| 5445 | PP_TEMPERATURE_UNITS_PER_CENTIGRADES; |
| 5446 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 5447 | thermal_data->max = data->thermal_temp_setting.temperature_shutdown; |
| 5448 | |
| 5449 | thermal_data->sw_ctf_threshold = thermal_data->max; |
| 5450 | |
| 5451 | return 0; |
| 5452 | } |
| 5453 | |
| 5454 | static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, |
| 5455 | enum PP_OD_DPM_TABLE_COMMAND type, |
| 5456 | uint32_t clk, |
| 5457 | uint32_t voltage) |
| 5458 | { |
| 5459 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5460 | |
| 5461 | if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { |
| 5462 | pr_info("OD voltage is out of range [%d - %d] mV\n" , |
| 5463 | data->odn_dpm_table.min_vddc, |
| 5464 | data->odn_dpm_table.max_vddc); |
| 5465 | return false; |
| 5466 | } |
| 5467 | |
| 5468 | if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { |
| 5469 | if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || |
| 5470 | hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { |
| 5471 | pr_info("OD engine clock is out of range [%d - %d] MHz\n" , |
| 5472 | data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, |
| 5473 | hwmgr->platform_descriptor.overdriveLimit.engineClock/100); |
| 5474 | return false; |
| 5475 | } |
| 5476 | } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { |
| 5477 | if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || |
| 5478 | hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { |
| 5479 | pr_info("OD memory clock is out of range [%d - %d] MHz\n" , |
| 5480 | data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, |
| 5481 | hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); |
| 5482 | return false; |
| 5483 | } |
| 5484 | } else { |
| 5485 | return false; |
| 5486 | } |
| 5487 | |
| 5488 | return true; |
| 5489 | } |
| 5490 | |
| 5491 | static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, |
| 5492 | enum PP_OD_DPM_TABLE_COMMAND type, |
| 5493 | long *input, uint32_t size) |
| 5494 | { |
| 5495 | uint32_t i; |
| 5496 | struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL; |
| 5497 | struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL; |
| 5498 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5499 | |
| 5500 | uint32_t input_clk; |
| 5501 | uint32_t input_vol; |
| 5502 | uint32_t input_level; |
| 5503 | |
| 5504 | PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage" , |
| 5505 | return -EINVAL); |
| 5506 | |
| 5507 | if (!hwmgr->od_enabled) { |
| 5508 | pr_info("OverDrive feature not enabled\n" ); |
| 5509 | return -EINVAL; |
| 5510 | } |
| 5511 | |
| 5512 | if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { |
| 5513 | podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels; |
| 5514 | podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk; |
| 5515 | PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), |
| 5516 | "Failed to get ODN SCLK and Voltage tables" , |
| 5517 | return -EINVAL); |
| 5518 | } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { |
| 5519 | podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels; |
| 5520 | podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk; |
| 5521 | |
| 5522 | PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), |
| 5523 | "Failed to get ODN MCLK and Voltage tables" , |
| 5524 | return -EINVAL); |
| 5525 | } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { |
| 5526 | smu7_odn_initial_default_setting(hwmgr); |
| 5527 | return 0; |
| 5528 | } else if (PP_OD_COMMIT_DPM_TABLE == type) { |
| 5529 | smu7_check_dpm_table_updated(hwmgr); |
| 5530 | return 0; |
| 5531 | } else { |
| 5532 | return -EINVAL; |
| 5533 | } |
| 5534 | |
| 5535 | for (i = 0; i < size; i += 3) { |
| 5536 | if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) { |
| 5537 | pr_info("invalid clock voltage input \n" ); |
| 5538 | return 0; |
| 5539 | } |
| 5540 | input_level = input[i]; |
| 5541 | input_clk = input[i+1] * 100; |
| 5542 | input_vol = input[i+2]; |
| 5543 | |
| 5544 | if (smu7_check_clk_voltage_valid(hwmgr, type, clk: input_clk, voltage: input_vol)) { |
| 5545 | podn_dpm_table_in_backend->entries[input_level].clock = input_clk; |
| 5546 | podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; |
| 5547 | podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; |
| 5548 | podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; |
| 5549 | podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; |
| 5550 | } else { |
| 5551 | return -EINVAL; |
| 5552 | } |
| 5553 | } |
| 5554 | |
| 5555 | return 0; |
| 5556 | } |
| 5557 | |
| 5558 | static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) |
| 5559 | { |
| 5560 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5561 | uint32_t i, size = 0; |
| 5562 | uint32_t len; |
| 5563 | |
| 5564 | static const char *title[8] = {"NUM" , |
| 5565 | "MODE_NAME" , |
| 5566 | "SCLK_UP_HYST" , |
| 5567 | "SCLK_DOWN_HYST" , |
| 5568 | "SCLK_ACTIVE_LEVEL" , |
| 5569 | "MCLK_UP_HYST" , |
| 5570 | "MCLK_DOWN_HYST" , |
| 5571 | "MCLK_ACTIVE_LEVEL" }; |
| 5572 | |
| 5573 | if (!buf) |
| 5574 | return -EINVAL; |
| 5575 | |
| 5576 | phm_get_sysfs_buf(buf: &buf, offset: &size); |
| 5577 | |
| 5578 | size += sysfs_emit_at(buf, at: size, fmt: "%s %16s %16s %16s %16s %16s %16s %16s\n" , |
| 5579 | title[0], title[1], title[2], title[3], |
| 5580 | title[4], title[5], title[6], title[7]); |
| 5581 | |
| 5582 | len = ARRAY_SIZE(smu7_profiling); |
| 5583 | |
| 5584 | for (i = 0; i < len; i++) { |
| 5585 | if (i == hwmgr->power_profile_mode) { |
| 5586 | size += sysfs_emit_at(buf, at: size, fmt: "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n" , |
| 5587 | i, amdgpu_pp_profile_name[i], "*" , |
| 5588 | data->current_profile_setting.sclk_up_hyst, |
| 5589 | data->current_profile_setting.sclk_down_hyst, |
| 5590 | data->current_profile_setting.sclk_activity, |
| 5591 | data->current_profile_setting.mclk_up_hyst, |
| 5592 | data->current_profile_setting.mclk_down_hyst, |
| 5593 | data->current_profile_setting.mclk_activity); |
| 5594 | continue; |
| 5595 | } |
| 5596 | if (smu7_profiling[i].bupdate_sclk) |
| 5597 | size += sysfs_emit_at(buf, at: size, fmt: "%3d %16s: %8d %16d %16d " , |
| 5598 | i, amdgpu_pp_profile_name[i], smu7_profiling[i].sclk_up_hyst, |
| 5599 | smu7_profiling[i].sclk_down_hyst, |
| 5600 | smu7_profiling[i].sclk_activity); |
| 5601 | else |
| 5602 | size += sysfs_emit_at(buf, at: size, fmt: "%3d %16s: %8s %16s %16s " , |
| 5603 | i, amdgpu_pp_profile_name[i], "-" , "-" , "-" ); |
| 5604 | |
| 5605 | if (smu7_profiling[i].bupdate_mclk) |
| 5606 | size += sysfs_emit_at(buf, at: size, fmt: "%16d %16d %16d\n" , |
| 5607 | smu7_profiling[i].mclk_up_hyst, |
| 5608 | smu7_profiling[i].mclk_down_hyst, |
| 5609 | smu7_profiling[i].mclk_activity); |
| 5610 | else |
| 5611 | size += sysfs_emit_at(buf, at: size, fmt: "%16s %16s %16s\n" , |
| 5612 | "-" , "-" , "-" ); |
| 5613 | } |
| 5614 | |
| 5615 | return size; |
| 5616 | } |
| 5617 | |
| 5618 | static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr, |
| 5619 | enum PP_SMC_POWER_PROFILE requst) |
| 5620 | { |
| 5621 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5622 | uint32_t tmp, level; |
| 5623 | |
| 5624 | if (requst == PP_SMC_POWER_PROFILE_COMPUTE) { |
| 5625 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
| 5626 | level = 0; |
| 5627 | tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; |
| 5628 | while (tmp >>= 1) |
| 5629 | level++; |
| 5630 | if (level > 0) |
| 5631 | smu7_force_clock_level(hwmgr, type: PP_SCLK, mask: 3 << (level-1)); |
| 5632 | } |
| 5633 | } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { |
| 5634 | smu7_force_clock_level(hwmgr, type: PP_SCLK, mask: data->dpm_level_enable_mask.sclk_dpm_enable_mask); |
| 5635 | } |
| 5636 | } |
| 5637 | |
| 5638 | static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) |
| 5639 | { |
| 5640 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5641 | struct profile_mode_setting tmp; |
| 5642 | enum PP_SMC_POWER_PROFILE mode; |
| 5643 | |
| 5644 | if (input == NULL) |
| 5645 | return -EINVAL; |
| 5646 | |
| 5647 | mode = input[size]; |
| 5648 | switch (mode) { |
| 5649 | case PP_SMC_POWER_PROFILE_CUSTOM: |
| 5650 | if (size != 8 && size != 0) |
| 5651 | return -EINVAL; |
| 5652 | /* If only CUSTOM is passed in, use the saved values. Check |
| 5653 | * that we actually have a CUSTOM profile by ensuring that |
| 5654 | * the "use sclk" or the "use mclk" bits are set |
| 5655 | */ |
| 5656 | tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM]; |
| 5657 | if (size == 0) { |
| 5658 | if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0) |
| 5659 | return -EINVAL; |
| 5660 | } else { |
| 5661 | tmp.bupdate_sclk = input[0]; |
| 5662 | tmp.sclk_up_hyst = input[1]; |
| 5663 | tmp.sclk_down_hyst = input[2]; |
| 5664 | tmp.sclk_activity = input[3]; |
| 5665 | tmp.bupdate_mclk = input[4]; |
| 5666 | tmp.mclk_up_hyst = input[5]; |
| 5667 | tmp.mclk_down_hyst = input[6]; |
| 5668 | tmp.mclk_activity = input[7]; |
| 5669 | smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp; |
| 5670 | } |
| 5671 | if (!smum_update_dpm_settings(hwmgr, profile_setting: &tmp)) { |
| 5672 | memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); |
| 5673 | hwmgr->power_profile_mode = mode; |
| 5674 | } |
| 5675 | break; |
| 5676 | case PP_SMC_POWER_PROFILE_FULLSCREEN3D: |
| 5677 | case PP_SMC_POWER_PROFILE_POWERSAVING: |
| 5678 | case PP_SMC_POWER_PROFILE_VIDEO: |
| 5679 | case PP_SMC_POWER_PROFILE_VR: |
| 5680 | case PP_SMC_POWER_PROFILE_COMPUTE: |
| 5681 | if (mode == hwmgr->power_profile_mode) |
| 5682 | return 0; |
| 5683 | |
| 5684 | memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting)); |
| 5685 | if (!smum_update_dpm_settings(hwmgr, profile_setting: &tmp)) { |
| 5686 | if (tmp.bupdate_sclk) { |
| 5687 | data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk; |
| 5688 | data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst; |
| 5689 | data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst; |
| 5690 | data->current_profile_setting.sclk_activity = tmp.sclk_activity; |
| 5691 | } |
| 5692 | if (tmp.bupdate_mclk) { |
| 5693 | data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk; |
| 5694 | data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst; |
| 5695 | data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst; |
| 5696 | data->current_profile_setting.mclk_activity = tmp.mclk_activity; |
| 5697 | } |
| 5698 | smu7_patch_compute_profile_mode(hwmgr, requst: mode); |
| 5699 | hwmgr->power_profile_mode = mode; |
| 5700 | } |
| 5701 | break; |
| 5702 | default: |
| 5703 | return -EINVAL; |
| 5704 | } |
| 5705 | |
| 5706 | return 0; |
| 5707 | } |
| 5708 | |
| 5709 | static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, |
| 5710 | PHM_PerformanceLevelDesignation designation, uint32_t index, |
| 5711 | PHM_PerformanceLevel *level) |
| 5712 | { |
| 5713 | const struct smu7_power_state *ps; |
| 5714 | uint32_t i; |
| 5715 | |
| 5716 | if (level == NULL || hwmgr == NULL || state == NULL) |
| 5717 | return -EINVAL; |
| 5718 | |
| 5719 | ps = cast_const_phw_smu7_power_state(hw_ps: state); |
| 5720 | |
| 5721 | i = index > ps->performance_level_count - 1 ? |
| 5722 | ps->performance_level_count - 1 : index; |
| 5723 | |
| 5724 | level->coreClock = ps->performance_levels[i].engine_clock; |
| 5725 | level->memory_clock = ps->performance_levels[i].memory_clock; |
| 5726 | |
| 5727 | return 0; |
| 5728 | } |
| 5729 | |
| 5730 | static int smu7_power_off_asic(struct pp_hwmgr *hwmgr) |
| 5731 | { |
| 5732 | int result; |
| 5733 | |
| 5734 | result = smu7_disable_dpm_tasks(hwmgr); |
| 5735 | PP_ASSERT_WITH_CODE((0 == result), |
| 5736 | "[disable_dpm_tasks] Failed to disable DPM!" , |
| 5737 | ); |
| 5738 | |
| 5739 | return result; |
| 5740 | } |
| 5741 | |
| 5742 | static const struct pp_hwmgr_func smu7_hwmgr_funcs = { |
| 5743 | .backend_init = &smu7_hwmgr_backend_init, |
| 5744 | .backend_fini = &smu7_hwmgr_backend_fini, |
| 5745 | .asic_setup = &smu7_setup_asic_task, |
| 5746 | .dynamic_state_management_enable = &smu7_enable_dpm_tasks, |
| 5747 | .apply_state_adjust_rules = smu7_apply_state_adjust_rules, |
| 5748 | .force_dpm_level = &smu7_force_dpm_level, |
| 5749 | .power_state_set = smu7_set_power_state_tasks, |
| 5750 | .get_power_state_size = smu7_get_power_state_size, |
| 5751 | .get_mclk = smu7_dpm_get_mclk, |
| 5752 | .get_sclk = smu7_dpm_get_sclk, |
| 5753 | .patch_boot_state = smu7_dpm_patch_boot_state, |
| 5754 | .get_pp_table_entry = smu7_get_pp_table_entry, |
| 5755 | .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, |
| 5756 | .powergate_uvd = smu7_powergate_uvd, |
| 5757 | .powergate_vce = smu7_powergate_vce, |
| 5758 | .disable_clock_power_gating = smu7_disable_clock_power_gating, |
| 5759 | .update_clock_gatings = smu7_update_clock_gatings, |
| 5760 | .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, |
| 5761 | .display_config_changed = smu7_display_configuration_changed_task, |
| 5762 | .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, |
| 5763 | .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, |
| 5764 | .stop_thermal_controller = smu7_thermal_stop_thermal_controller, |
| 5765 | .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, |
| 5766 | .get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm, |
| 5767 | .set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm, |
| 5768 | .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, |
| 5769 | .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, |
| 5770 | .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, |
| 5771 | .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, |
| 5772 | .register_irq_handlers = smu7_register_irq_handlers, |
| 5773 | .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, |
| 5774 | .check_states_equal = smu7_check_states_equal, |
| 5775 | .set_fan_control_mode = smu7_set_fan_control_mode, |
| 5776 | .get_fan_control_mode = smu7_get_fan_control_mode, |
| 5777 | .force_clock_level = smu7_force_clock_level, |
| 5778 | .print_clock_levels = smu7_print_clock_levels, |
| 5779 | .powergate_gfx = smu7_powergate_gfx, |
| 5780 | .get_sclk_od = smu7_get_sclk_od, |
| 5781 | .set_sclk_od = smu7_set_sclk_od, |
| 5782 | .get_mclk_od = smu7_get_mclk_od, |
| 5783 | .set_mclk_od = smu7_set_mclk_od, |
| 5784 | .get_clock_by_type = smu7_get_clock_by_type, |
| 5785 | .get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency, |
| 5786 | .set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges, |
| 5787 | .read_sensor = smu7_read_sensor, |
| 5788 | .dynamic_state_management_disable = smu7_disable_dpm_tasks, |
| 5789 | .avfs_control = smu7_avfs_control, |
| 5790 | .disable_smc_firmware_ctf = smu7_thermal_disable_alert, |
| 5791 | .start_thermal_controller = smu7_start_thermal_controller, |
| 5792 | .notify_cac_buffer_info = smu7_notify_cac_buffer_info, |
| 5793 | .get_max_high_clocks = smu7_get_max_high_clocks, |
| 5794 | .get_thermal_temperature_range = smu7_get_thermal_temperature_range, |
| 5795 | .odn_edit_dpm_table = smu7_odn_edit_dpm_table, |
| 5796 | .set_power_limit = smu7_set_power_limit, |
| 5797 | .get_power_profile_mode = smu7_get_power_profile_mode, |
| 5798 | .set_power_profile_mode = smu7_set_power_profile_mode, |
| 5799 | .get_performance_level = smu7_get_performance_level, |
| 5800 | .get_bamaco_support = smu7_get_bamaco_support, |
| 5801 | .get_asic_baco_state = smu7_baco_get_state, |
| 5802 | .set_asic_baco_state = smu7_baco_set_state, |
| 5803 | .power_off_asic = smu7_power_off_asic, |
| 5804 | }; |
| 5805 | |
| 5806 | uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, |
| 5807 | uint32_t clock_insr) |
| 5808 | { |
| 5809 | uint8_t i; |
| 5810 | uint32_t temp; |
| 5811 | uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); |
| 5812 | |
| 5813 | PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!" , return 0); |
| 5814 | for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { |
| 5815 | temp = clock >> i; |
| 5816 | |
| 5817 | if (temp >= min || i == 0) |
| 5818 | break; |
| 5819 | } |
| 5820 | return i; |
| 5821 | } |
| 5822 | |
| 5823 | int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) |
| 5824 | { |
| 5825 | hwmgr->hwmgr_func = &smu7_hwmgr_funcs; |
| 5826 | if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 5827 | hwmgr->pptable_func = &pptable_funcs; |
| 5828 | else if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 5829 | hwmgr->pptable_func = &pptable_v1_0_funcs; |
| 5830 | |
| 5831 | return 0; |
| 5832 | } |
| 5833 | |