1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2018 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 */
25#include <linux/string.h>
26#include <linux/acpi.h>
27
28#include <drm/drm_probe_helper.h>
29#include <drm/amdgpu_drm.h>
30#include "dm_services.h"
31#include "amdgpu.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_irq.h"
34#include "amdgpu_pm.h"
35#include "dm_pp_smu.h"
36
37bool dm_pp_apply_display_requirements(
38 const struct dc_context *ctx,
39 const struct dm_pp_display_configuration *pp_display_cfg)
40{
41 struct amdgpu_device *adev = ctx->driver_context;
42 int i;
43
44 if (adev->pm.dpm_enabled) {
45
46 memset(&adev->pm.pm_display_cfg, 0,
47 sizeof(adev->pm.pm_display_cfg));
48
49 adev->pm.pm_display_cfg.cpu_cc6_disable =
50 pp_display_cfg->cpu_cc6_disable;
51
52 adev->pm.pm_display_cfg.cpu_pstate_disable =
53 pp_display_cfg->cpu_pstate_disable;
54
55 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
56 pp_display_cfg->cpu_pstate_separation_time;
57
58 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
59 pp_display_cfg->nb_pstate_switch_disable;
60
61 adev->pm.pm_display_cfg.num_display =
62 pp_display_cfg->display_count;
63 adev->pm.pm_display_cfg.num_path_including_non_display =
64 pp_display_cfg->display_count;
65
66 adev->pm.pm_display_cfg.min_core_set_clock =
67 pp_display_cfg->min_engine_clock_khz/10;
68 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
69 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
70 adev->pm.pm_display_cfg.min_mem_set_clock =
71 pp_display_cfg->min_memory_clock_khz/10;
72
73 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
74 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
75 adev->pm.pm_display_cfg.min_dcef_set_clk =
76 pp_display_cfg->min_dcfclock_khz/10;
77
78 adev->pm.pm_display_cfg.multi_monitor_in_sync =
79 pp_display_cfg->all_displays_in_sync;
80 adev->pm.pm_display_cfg.min_vblank_time =
81 pp_display_cfg->avail_mclk_switch_time_us;
82
83 adev->pm.pm_display_cfg.display_clk =
84 pp_display_cfg->disp_clk_khz/10;
85
86 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
87 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
88
89 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
90 adev->pm.pm_display_cfg.line_time_in_us =
91 pp_display_cfg->line_time_in_us;
92
93 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
94 adev->pm.pm_display_cfg.crossfire_display_index = -1;
95 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
96
97 for (i = 0; i < pp_display_cfg->display_count; i++) {
98 const struct dm_pp_single_disp_config *dc_cfg =
99 &pp_display_cfg->disp_configs[i];
100 adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
101 adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
102 }
103
104 amdgpu_dpm_display_configuration_change(adev, input: &adev->pm.pm_display_cfg);
105
106 amdgpu_dpm_compute_clocks(adev);
107 }
108
109 return true;
110}
111
112static void get_default_clock_levels(
113 enum dm_pp_clock_type clk_type,
114 struct dm_pp_clock_levels *clks)
115{
116 uint32_t disp_clks_in_khz[6] = {
117 300000, 400000, 496560, 626090, 685720, 757900 };
118 uint32_t sclks_in_khz[6] = {
119 300000, 360000, 423530, 514290, 626090, 720000 };
120 uint32_t mclks_in_khz[2] = { 333000, 800000 };
121
122 switch (clk_type) {
123 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
124 clks->num_levels = 6;
125 memmove(clks->clocks_in_khz, disp_clks_in_khz,
126 sizeof(disp_clks_in_khz));
127 break;
128 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
129 clks->num_levels = 6;
130 memmove(clks->clocks_in_khz, sclks_in_khz,
131 sizeof(sclks_in_khz));
132 break;
133 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
134 clks->num_levels = 2;
135 memmove(clks->clocks_in_khz, mclks_in_khz,
136 sizeof(mclks_in_khz));
137 break;
138 default:
139 clks->num_levels = 0;
140 break;
141 }
142}
143
144static enum amd_pp_clock_type dc_to_pp_clock_type(
145 enum dm_pp_clock_type dm_pp_clk_type)
146{
147 enum amd_pp_clock_type amd_pp_clk_type = 0;
148
149 switch (dm_pp_clk_type) {
150 case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
151 amd_pp_clk_type = amd_pp_disp_clock;
152 break;
153 case DM_PP_CLOCK_TYPE_ENGINE_CLK:
154 amd_pp_clk_type = amd_pp_sys_clock;
155 break;
156 case DM_PP_CLOCK_TYPE_MEMORY_CLK:
157 amd_pp_clk_type = amd_pp_mem_clock;
158 break;
159 case DM_PP_CLOCK_TYPE_DCEFCLK:
160 amd_pp_clk_type = amd_pp_dcef_clock;
161 break;
162 case DM_PP_CLOCK_TYPE_DCFCLK:
163 amd_pp_clk_type = amd_pp_dcf_clock;
164 break;
165 case DM_PP_CLOCK_TYPE_PIXELCLK:
166 amd_pp_clk_type = amd_pp_pixel_clock;
167 break;
168 case DM_PP_CLOCK_TYPE_FCLK:
169 amd_pp_clk_type = amd_pp_f_clock;
170 break;
171 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
172 amd_pp_clk_type = amd_pp_phy_clock;
173 break;
174 case DM_PP_CLOCK_TYPE_DPPCLK:
175 amd_pp_clk_type = amd_pp_dpp_clock;
176 break;
177 default:
178 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
179 dm_pp_clk_type);
180 break;
181 }
182
183 return amd_pp_clk_type;
184}
185
186static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
187 enum PP_DAL_POWERLEVEL max_clocks_state)
188{
189 switch (max_clocks_state) {
190 case PP_DAL_POWERLEVEL_0:
191 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
192 case PP_DAL_POWERLEVEL_1:
193 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
194 case PP_DAL_POWERLEVEL_2:
195 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
196 case PP_DAL_POWERLEVEL_3:
197 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
198 case PP_DAL_POWERLEVEL_4:
199 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
200 case PP_DAL_POWERLEVEL_5:
201 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
202 case PP_DAL_POWERLEVEL_6:
203 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
204 case PP_DAL_POWERLEVEL_7:
205 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
206 default:
207 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
208 max_clocks_state);
209 return DM_PP_CLOCKS_STATE_INVALID;
210 }
211}
212
213static void pp_to_dc_clock_levels(
214 const struct amd_pp_clocks *pp_clks,
215 struct dm_pp_clock_levels *dc_clks,
216 enum dm_pp_clock_type dc_clk_type)
217{
218 uint32_t i;
219
220 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
221 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
222 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
223 pp_clks->count,
224 DM_PP_MAX_CLOCK_LEVELS);
225
226 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
227 } else
228 dc_clks->num_levels = pp_clks->count;
229
230 DRM_INFO("DM_PPLIB: values for %s clock\n",
231 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
232
233 for (i = 0; i < dc_clks->num_levels; i++) {
234 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
235 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
236 }
237}
238
239static void pp_to_dc_clock_levels_with_latency(
240 const struct pp_clock_levels_with_latency *pp_clks,
241 struct dm_pp_clock_levels_with_latency *clk_level_info,
242 enum dm_pp_clock_type dc_clk_type)
243{
244 uint32_t i;
245
246 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
247 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
248 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
249 pp_clks->num_levels,
250 DM_PP_MAX_CLOCK_LEVELS);
251
252 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
253 } else
254 clk_level_info->num_levels = pp_clks->num_levels;
255
256 DRM_DEBUG("DM_PPLIB: values for %s clock\n",
257 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
258
259 for (i = 0; i < clk_level_info->num_levels; i++) {
260 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
261 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
262 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
263 }
264}
265
266static void pp_to_dc_clock_levels_with_voltage(
267 const struct pp_clock_levels_with_voltage *pp_clks,
268 struct dm_pp_clock_levels_with_voltage *clk_level_info,
269 enum dm_pp_clock_type dc_clk_type)
270{
271 uint32_t i;
272
273 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
274 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
275 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
276 pp_clks->num_levels,
277 DM_PP_MAX_CLOCK_LEVELS);
278
279 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
280 } else
281 clk_level_info->num_levels = pp_clks->num_levels;
282
283 DRM_INFO("DM_PPLIB: values for %s clock\n",
284 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
285
286 for (i = 0; i < clk_level_info->num_levels; i++) {
287 DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
288 pp_clks->data[i].voltage_in_mv);
289 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
290 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
291 }
292}
293
294bool dm_pp_get_clock_levels_by_type(
295 const struct dc_context *ctx,
296 enum dm_pp_clock_type clk_type,
297 struct dm_pp_clock_levels *dc_clks)
298{
299 struct amdgpu_device *adev = ctx->driver_context;
300 struct amd_pp_clocks pp_clks = { 0 };
301 struct amd_pp_simple_clock_info validation_clks = { 0 };
302 uint32_t i;
303
304 if (amdgpu_dpm_get_clock_by_type(adev,
305 type: dc_to_pp_clock_type(dm_pp_clk_type: clk_type), clocks: &pp_clks)) {
306 /* Error in pplib. Provide default values. */
307 get_default_clock_levels(clk_type, clks: dc_clks);
308 return true;
309 }
310
311 pp_to_dc_clock_levels(pp_clks: &pp_clks, dc_clks, dc_clk_type: clk_type);
312
313 if (amdgpu_dpm_get_display_mode_validation_clks(adev, clocks: &validation_clks)) {
314 /* Error in pplib. Provide default values. */
315 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
316 validation_clks.engine_max_clock = 72000;
317 validation_clks.memory_max_clock = 80000;
318 validation_clks.level = 0;
319 }
320
321 DRM_INFO("DM_PPLIB: Validation clocks:\n");
322 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
323 validation_clks.engine_max_clock);
324 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
325 validation_clks.memory_max_clock);
326 DRM_INFO("DM_PPLIB: level : %d\n",
327 validation_clks.level);
328
329 /* Translate 10 kHz to kHz. */
330 validation_clks.engine_max_clock *= 10;
331 validation_clks.memory_max_clock *= 10;
332
333 /* Determine the highest non-boosted level from the Validation Clocks */
334 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
335 for (i = 0; i < dc_clks->num_levels; i++) {
336 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
337 /* This clock is higher the validation clock.
338 * Than means the previous one is the highest
339 * non-boosted one.
340 */
341 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
342 dc_clks->num_levels, i);
343 dc_clks->num_levels = i > 0 ? i : 1;
344 break;
345 }
346 }
347 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
348 for (i = 0; i < dc_clks->num_levels; i++) {
349 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
350 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
351 dc_clks->num_levels, i);
352 dc_clks->num_levels = i > 0 ? i : 1;
353 break;
354 }
355 }
356 }
357
358 return true;
359}
360
361bool dm_pp_get_clock_levels_by_type_with_latency(
362 const struct dc_context *ctx,
363 enum dm_pp_clock_type clk_type,
364 struct dm_pp_clock_levels_with_latency *clk_level_info)
365{
366 struct amdgpu_device *adev = ctx->driver_context;
367 struct pp_clock_levels_with_latency pp_clks = { 0 };
368 int ret;
369
370 ret = amdgpu_dpm_get_clock_by_type_with_latency(adev,
371 type: dc_to_pp_clock_type(dm_pp_clk_type: clk_type),
372 clocks: &pp_clks);
373 if (ret)
374 return false;
375
376 pp_to_dc_clock_levels_with_latency(pp_clks: &pp_clks, clk_level_info, dc_clk_type: clk_type);
377
378 return true;
379}
380
381bool dm_pp_get_clock_levels_by_type_with_voltage(
382 const struct dc_context *ctx,
383 enum dm_pp_clock_type clk_type,
384 struct dm_pp_clock_levels_with_voltage *clk_level_info)
385{
386 struct amdgpu_device *adev = ctx->driver_context;
387 struct pp_clock_levels_with_voltage pp_clk_info = {0};
388 int ret;
389
390 ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev,
391 type: dc_to_pp_clock_type(dm_pp_clk_type: clk_type),
392 clocks: &pp_clk_info);
393 if (ret)
394 return false;
395
396 pp_to_dc_clock_levels_with_voltage(pp_clks: &pp_clk_info, clk_level_info, dc_clk_type: clk_type);
397
398 return true;
399}
400
401bool dm_pp_notify_wm_clock_changes(
402 const struct dc_context *ctx,
403 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
404{
405 struct amdgpu_device *adev = ctx->driver_context;
406
407 /*
408 * Limit this watermark setting for Polaris for now
409 * TODO: expand this to other ASICs
410 */
411 if ((adev->asic_type >= CHIP_POLARIS10) &&
412 (adev->asic_type <= CHIP_VEGAM) &&
413 !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
414 clock_ranges: (void *)wm_with_clock_ranges))
415 return true;
416
417 return false;
418}
419
420bool dm_pp_apply_power_level_change_request(
421 const struct dc_context *ctx,
422 struct dm_pp_power_level_change_request *level_change_req)
423{
424 /* TODO: to be implemented */
425 return false;
426}
427
428bool dm_pp_apply_clock_for_voltage_request(
429 const struct dc_context *ctx,
430 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
431{
432 struct amdgpu_device *adev = ctx->driver_context;
433 struct pp_display_clock_request pp_clock_request = {0};
434 int ret = 0;
435
436 pp_clock_request.clock_type = dc_to_pp_clock_type(dm_pp_clk_type: clock_for_voltage_req->clk_type);
437 pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
438
439 if (!pp_clock_request.clock_type)
440 return false;
441
442 ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &pp_clock_request);
443 if (ret && (ret != -EOPNOTSUPP))
444 return false;
445
446 return true;
447}
448
449bool dm_pp_get_static_clocks(
450 const struct dc_context *ctx,
451 struct dm_pp_static_clock_info *static_clk_info)
452{
453 struct amdgpu_device *adev = ctx->driver_context;
454 struct amd_pp_clock_info pp_clk_info = {0};
455
456 if (amdgpu_dpm_get_current_clocks(adev, clocks: &pp_clk_info))
457 return false;
458
459 static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(max_clocks_state: pp_clk_info.max_clocks_state);
460 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
461 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
462
463 return true;
464}
465
466static void pp_rv_set_wm_ranges(struct pp_smu *pp,
467 struct pp_smu_wm_range_sets *ranges)
468{
469 const struct dc_context *ctx = pp->dm;
470 struct amdgpu_device *adev = ctx->driver_context;
471 struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
472 struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
473 struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
474 int32_t i;
475
476 wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
477 wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
478
479 for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
480 if (ranges->reader_wm_sets[i].wm_inst > 3)
481 wm_dce_clocks[i].wm_set_id = WM_SET_A;
482 else
483 wm_dce_clocks[i].wm_set_id =
484 ranges->reader_wm_sets[i].wm_inst;
485 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
486 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
487 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
488 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
489 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
490 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
491 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
492 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
493 }
494
495 for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
496 if (ranges->writer_wm_sets[i].wm_inst > 3)
497 wm_soc_clocks[i].wm_set_id = WM_SET_A;
498 else
499 wm_soc_clocks[i].wm_set_id =
500 ranges->writer_wm_sets[i].wm_inst;
501 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
502 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
503 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
504 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
505 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
506 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
507 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
508 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
509 }
510
511 amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
512 clock_ranges: &wm_with_clock_ranges);
513}
514
515static void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
516{
517 const struct dc_context *ctx = pp->dm;
518 struct amdgpu_device *adev = ctx->driver_context;
519
520 amdgpu_dpm_notify_smu_enable_pwe(adev);
521}
522
523static void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
524{
525 const struct dc_context *ctx = pp->dm;
526 struct amdgpu_device *adev = ctx->driver_context;
527
528 amdgpu_dpm_set_active_display_count(adev, count);
529}
530
531static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
532{
533 const struct dc_context *ctx = pp->dm;
534 struct amdgpu_device *adev = ctx->driver_context;
535
536 amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock);
537}
538
539static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
540{
541 const struct dc_context *ctx = pp->dm;
542 struct amdgpu_device *adev = ctx->driver_context;
543
544 amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock);
545}
546
547static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
548{
549 const struct dc_context *ctx = pp->dm;
550 struct amdgpu_device *adev = ctx->driver_context;
551
552 amdgpu_dpm_set_hard_min_fclk_by_freq(adev, clock: mhz);
553}
554
555static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
556 struct pp_smu_wm_range_sets *ranges)
557{
558 const struct dc_context *ctx = pp->dm;
559 struct amdgpu_device *adev = ctx->driver_context;
560
561 amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, clock_ranges: ranges);
562
563 return PP_SMU_RESULT_OK;
564}
565
566static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
567{
568 const struct dc_context *ctx = pp->dm;
569 struct amdgpu_device *adev = ctx->driver_context;
570 int ret = 0;
571
572 ret = amdgpu_dpm_set_active_display_count(adev, count);
573 if (ret == -EOPNOTSUPP)
574 return PP_SMU_RESULT_UNSUPPORTED;
575 else if (ret)
576 /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */
577 return PP_SMU_RESULT_FAIL;
578
579 return PP_SMU_RESULT_OK;
580}
581
582static enum pp_smu_status
583pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
584{
585 const struct dc_context *ctx = pp->dm;
586 struct amdgpu_device *adev = ctx->driver_context;
587 int ret = 0;
588
589 /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
590 ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock: mhz);
591 if (ret == -EOPNOTSUPP)
592 return PP_SMU_RESULT_UNSUPPORTED;
593 else if (ret)
594 return PP_SMU_RESULT_FAIL;
595
596 return PP_SMU_RESULT_OK;
597}
598
599static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
600 struct pp_smu *pp, int mhz)
601{
602 const struct dc_context *ctx = pp->dm;
603 struct amdgpu_device *adev = ctx->driver_context;
604 struct pp_display_clock_request clock_req;
605 int ret = 0;
606
607 clock_req.clock_type = amd_pp_dcef_clock;
608 clock_req.clock_freq_in_khz = mhz * 1000;
609
610 /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
611 * 1: fail
612 */
613 ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &clock_req);
614 if (ret == -EOPNOTSUPP)
615 return PP_SMU_RESULT_UNSUPPORTED;
616 else if (ret)
617 return PP_SMU_RESULT_FAIL;
618
619 return PP_SMU_RESULT_OK;
620}
621
622static enum pp_smu_status
623pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
624{
625 const struct dc_context *ctx = pp->dm;
626 struct amdgpu_device *adev = ctx->driver_context;
627 struct pp_display_clock_request clock_req;
628 int ret = 0;
629
630 clock_req.clock_type = amd_pp_mem_clock;
631 clock_req.clock_freq_in_khz = mhz * 1000;
632
633 /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
634 * 1: fail
635 */
636 ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &clock_req);
637 if (ret == -EOPNOTSUPP)
638 return PP_SMU_RESULT_UNSUPPORTED;
639 else if (ret)
640 return PP_SMU_RESULT_FAIL;
641
642 return PP_SMU_RESULT_OK;
643}
644
645static enum pp_smu_status pp_nv_set_pstate_handshake_support(
646 struct pp_smu *pp, bool pstate_handshake_supported)
647{
648 const struct dc_context *ctx = pp->dm;
649 struct amdgpu_device *adev = ctx->driver_context;
650
651 if (amdgpu_dpm_display_disable_memory_clock_switch(adev,
652 disable_memory_clock_switch: !pstate_handshake_supported))
653 return PP_SMU_RESULT_FAIL;
654
655 return PP_SMU_RESULT_OK;
656}
657
658static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
659 enum pp_smu_nv_clock_id clock_id, int mhz)
660{
661 const struct dc_context *ctx = pp->dm;
662 struct amdgpu_device *adev = ctx->driver_context;
663 struct pp_display_clock_request clock_req;
664 int ret = 0;
665
666 switch (clock_id) {
667 case PP_SMU_NV_DISPCLK:
668 clock_req.clock_type = amd_pp_disp_clock;
669 break;
670 case PP_SMU_NV_PHYCLK:
671 clock_req.clock_type = amd_pp_phy_clock;
672 break;
673 case PP_SMU_NV_PIXELCLK:
674 clock_req.clock_type = amd_pp_pixel_clock;
675 break;
676 default:
677 break;
678 }
679 clock_req.clock_freq_in_khz = mhz * 1000;
680
681 /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
682 * 1: fail
683 */
684 ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &clock_req);
685 if (ret == -EOPNOTSUPP)
686 return PP_SMU_RESULT_UNSUPPORTED;
687 else if (ret)
688 return PP_SMU_RESULT_FAIL;
689
690 return PP_SMU_RESULT_OK;
691}
692
693static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
694 struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
695{
696 const struct dc_context *ctx = pp->dm;
697 struct amdgpu_device *adev = ctx->driver_context;
698 int ret = 0;
699
700 ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev,
701 max_clocks);
702 if (ret == -EOPNOTSUPP)
703 return PP_SMU_RESULT_UNSUPPORTED;
704 else if (ret)
705 return PP_SMU_RESULT_FAIL;
706
707 return PP_SMU_RESULT_OK;
708}
709
710static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
711 unsigned int *clock_values_in_khz, unsigned int *num_states)
712{
713 const struct dc_context *ctx = pp->dm;
714 struct amdgpu_device *adev = ctx->driver_context;
715 int ret = 0;
716
717 ret = amdgpu_dpm_get_uclk_dpm_states(adev,
718 clock_values_in_khz,
719 num_states);
720 if (ret == -EOPNOTSUPP)
721 return PP_SMU_RESULT_UNSUPPORTED;
722 else if (ret)
723 return PP_SMU_RESULT_FAIL;
724
725 return PP_SMU_RESULT_OK;
726}
727
728static enum pp_smu_status pp_rn_get_dpm_clock_table(
729 struct pp_smu *pp, struct dpm_clocks *clock_table)
730{
731 const struct dc_context *ctx = pp->dm;
732 struct amdgpu_device *adev = ctx->driver_context;
733 int ret = 0;
734
735 ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table);
736 if (ret == -EOPNOTSUPP)
737 return PP_SMU_RESULT_UNSUPPORTED;
738 else if (ret)
739 return PP_SMU_RESULT_FAIL;
740
741 return PP_SMU_RESULT_OK;
742}
743
744static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
745 struct pp_smu_wm_range_sets *ranges)
746{
747 const struct dc_context *ctx = pp->dm;
748 struct amdgpu_device *adev = ctx->driver_context;
749
750 amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, clock_ranges: ranges);
751
752 return PP_SMU_RESULT_OK;
753}
754
755void dm_pp_get_funcs(
756 struct dc_context *ctx,
757 struct pp_smu_funcs *funcs)
758{
759 switch (ctx->dce_version) {
760 case DCN_VERSION_1_0:
761 case DCN_VERSION_1_01:
762 funcs->ctx.ver = PP_SMU_VER_RV;
763 funcs->rv_funcs.pp_smu.dm = ctx;
764 funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
765 funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
766 funcs->rv_funcs.set_display_count =
767 pp_rv_set_active_display_count;
768 funcs->rv_funcs.set_min_deep_sleep_dcfclk =
769 pp_rv_set_min_deep_sleep_dcfclk;
770 funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
771 pp_rv_set_hard_min_dcefclk_by_freq;
772 funcs->rv_funcs.set_hard_min_fclk_by_freq =
773 pp_rv_set_hard_min_fclk_by_freq;
774 break;
775 case DCN_VERSION_2_0:
776 funcs->ctx.ver = PP_SMU_VER_NV;
777 funcs->nv_funcs.pp_smu.dm = ctx;
778 funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
779 funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
780 pp_nv_set_hard_min_dcefclk_by_freq;
781 funcs->nv_funcs.set_min_deep_sleep_dcfclk =
782 pp_nv_set_min_deep_sleep_dcfclk;
783 funcs->nv_funcs.set_voltage_by_freq =
784 pp_nv_set_voltage_by_freq;
785 funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
786
787 /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
788 funcs->nv_funcs.set_pme_wa_enable = NULL;
789 /* todo debug waring message */
790 funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
791 /* todo compare data with window driver*/
792 funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
793 /*todo compare data with window driver */
794 funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
795 funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
796 break;
797
798 case DCN_VERSION_2_1:
799 funcs->ctx.ver = PP_SMU_VER_RN;
800 funcs->rn_funcs.pp_smu.dm = ctx;
801 funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
802 funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
803 break;
804 default:
805 DRM_ERROR("smu version is not supported !\n");
806 break;
807 }
808}
809

source code of linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c