| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2023 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include <linux/bitops.h> |
| 7 | |
| 8 | #include <drm/drm_print.h> |
| 9 | |
| 10 | #include "intel_atomic.h" |
| 11 | #include "intel_bw.h" |
| 12 | #include "intel_cdclk.h" |
| 13 | #include "intel_de.h" |
| 14 | #include "intel_display_jiffies.h" |
| 15 | #include "intel_display_regs.h" |
| 16 | #include "intel_display_trace.h" |
| 17 | #include "intel_display_utils.h" |
| 18 | #include "intel_pmdemand.h" |
| 19 | #include "intel_step.h" |
| 20 | #include "skl_watermark.h" |
| 21 | |
| 22 | struct pmdemand_params { |
| 23 | u16 qclk_gv_bw; |
| 24 | u8 voltage_index; |
| 25 | u8 qclk_gv_index; |
| 26 | u8 active_pipes; |
| 27 | u8 active_dbufs; /* pre-Xe3 only */ |
| 28 | /* Total number of non type C active phys from active_phys_mask */ |
| 29 | u8 active_phys; |
| 30 | u8 plls; |
| 31 | u16 cdclk_freq_mhz; |
| 32 | /* max from ddi_clocks[] */ |
| 33 | u16 ddiclk_max; |
| 34 | u8 scalers; /* pre-Xe3 only */ |
| 35 | }; |
| 36 | |
| 37 | struct intel_pmdemand_state { |
| 38 | struct intel_global_state base; |
| 39 | |
| 40 | /* Maintain a persistent list of port clocks across all crtcs */ |
| 41 | int ddi_clocks[I915_MAX_PIPES]; |
| 42 | |
| 43 | /* Maintain a persistent list of non type C phys mask */ |
| 44 | u16 active_combo_phys_mask; |
| 45 | |
| 46 | /* Parameters to be configured in the pmdemand registers */ |
| 47 | struct pmdemand_params params; |
| 48 | }; |
| 49 | |
| 50 | struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state) |
| 51 | { |
| 52 | return container_of(obj_state, struct intel_pmdemand_state, base); |
| 53 | } |
| 54 | |
| 55 | static struct intel_global_state * |
| 56 | intel_pmdemand_duplicate_state(struct intel_global_obj *obj) |
| 57 | { |
| 58 | struct intel_pmdemand_state *pmdemand_state; |
| 59 | |
| 60 | pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL); |
| 61 | if (!pmdemand_state) |
| 62 | return NULL; |
| 63 | |
| 64 | return &pmdemand_state->base; |
| 65 | } |
| 66 | |
| 67 | static void intel_pmdemand_destroy_state(struct intel_global_obj *obj, |
| 68 | struct intel_global_state *state) |
| 69 | { |
| 70 | kfree(objp: state); |
| 71 | } |
| 72 | |
| 73 | static const struct intel_global_state_funcs intel_pmdemand_funcs = { |
| 74 | .atomic_duplicate_state = intel_pmdemand_duplicate_state, |
| 75 | .atomic_destroy_state = intel_pmdemand_destroy_state, |
| 76 | }; |
| 77 | |
| 78 | static struct intel_pmdemand_state * |
| 79 | intel_atomic_get_pmdemand_state(struct intel_atomic_state *state) |
| 80 | { |
| 81 | struct intel_display *display = to_intel_display(state); |
| 82 | struct intel_global_state *pmdemand_state = |
| 83 | intel_atomic_get_global_obj_state(state, |
| 84 | obj: &display->pmdemand.obj); |
| 85 | |
| 86 | if (IS_ERR(ptr: pmdemand_state)) |
| 87 | return ERR_CAST(ptr: pmdemand_state); |
| 88 | |
| 89 | return to_intel_pmdemand_state(obj_state: pmdemand_state); |
| 90 | } |
| 91 | |
| 92 | static struct intel_pmdemand_state * |
| 93 | intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state) |
| 94 | { |
| 95 | struct intel_display *display = to_intel_display(state); |
| 96 | struct intel_global_state *pmdemand_state = |
| 97 | intel_atomic_get_old_global_obj_state(state, |
| 98 | obj: &display->pmdemand.obj); |
| 99 | |
| 100 | if (!pmdemand_state) |
| 101 | return NULL; |
| 102 | |
| 103 | return to_intel_pmdemand_state(obj_state: pmdemand_state); |
| 104 | } |
| 105 | |
| 106 | static struct intel_pmdemand_state * |
| 107 | intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state) |
| 108 | { |
| 109 | struct intel_display *display = to_intel_display(state); |
| 110 | struct intel_global_state *pmdemand_state = |
| 111 | intel_atomic_get_new_global_obj_state(state, |
| 112 | obj: &display->pmdemand.obj); |
| 113 | |
| 114 | if (!pmdemand_state) |
| 115 | return NULL; |
| 116 | |
| 117 | return to_intel_pmdemand_state(obj_state: pmdemand_state); |
| 118 | } |
| 119 | |
| 120 | int intel_pmdemand_init(struct intel_display *display) |
| 121 | { |
| 122 | struct intel_pmdemand_state *pmdemand_state; |
| 123 | |
| 124 | pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL); |
| 125 | if (!pmdemand_state) |
| 126 | return -ENOMEM; |
| 127 | |
| 128 | intel_atomic_global_obj_init(display, obj: &display->pmdemand.obj, |
| 129 | state: &pmdemand_state->base, |
| 130 | funcs: &intel_pmdemand_funcs); |
| 131 | |
| 132 | if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0)) |
| 133 | /* Wa_14016740474 */ |
| 134 | intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, clear: 0, DMD_RSP_TIMEOUT_DISABLE); |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | void intel_pmdemand_init_early(struct intel_display *display) |
| 140 | { |
| 141 | mutex_init(&display->pmdemand.lock); |
| 142 | init_waitqueue_head(&display->pmdemand.waitqueue); |
| 143 | } |
| 144 | |
| 145 | void |
| 146 | intel_pmdemand_update_phys_mask(struct intel_display *display, |
| 147 | struct intel_encoder *encoder, |
| 148 | struct intel_pmdemand_state *pmdemand_state, |
| 149 | bool set_bit) |
| 150 | { |
| 151 | enum phy phy; |
| 152 | |
| 153 | if (DISPLAY_VER(display) < 14) |
| 154 | return; |
| 155 | |
| 156 | if (!encoder) |
| 157 | return; |
| 158 | |
| 159 | if (intel_encoder_is_tc(encoder)) |
| 160 | return; |
| 161 | |
| 162 | phy = intel_encoder_to_phy(encoder); |
| 163 | |
| 164 | if (set_bit) |
| 165 | pmdemand_state->active_combo_phys_mask |= BIT(phy); |
| 166 | else |
| 167 | pmdemand_state->active_combo_phys_mask &= ~BIT(phy); |
| 168 | } |
| 169 | |
| 170 | void |
| 171 | intel_pmdemand_update_port_clock(struct intel_display *display, |
| 172 | struct intel_pmdemand_state *pmdemand_state, |
| 173 | enum pipe pipe, int port_clock) |
| 174 | { |
| 175 | if (DISPLAY_VER(display) < 14) |
| 176 | return; |
| 177 | |
| 178 | pmdemand_state->ddi_clocks[pipe] = port_clock; |
| 179 | } |
| 180 | |
| 181 | static void |
| 182 | intel_pmdemand_update_max_ddiclk(struct intel_display *display, |
| 183 | struct intel_atomic_state *state, |
| 184 | struct intel_pmdemand_state *pmdemand_state) |
| 185 | { |
| 186 | int max_ddiclk = 0; |
| 187 | const struct intel_crtc_state *new_crtc_state; |
| 188 | struct intel_crtc *crtc; |
| 189 | int i; |
| 190 | |
| 191 | for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) |
| 192 | intel_pmdemand_update_port_clock(display, pmdemand_state, |
| 193 | pipe: crtc->pipe, |
| 194 | port_clock: new_crtc_state->port_clock); |
| 195 | |
| 196 | for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++) |
| 197 | max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk); |
| 198 | |
| 199 | pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000); |
| 200 | } |
| 201 | |
| 202 | static void |
| 203 | intel_pmdemand_update_connector_phys(struct intel_display *display, |
| 204 | struct intel_atomic_state *state, |
| 205 | struct drm_connector_state *conn_state, |
| 206 | bool set_bit, |
| 207 | struct intel_pmdemand_state *pmdemand_state) |
| 208 | { |
| 209 | struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); |
| 210 | struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); |
| 211 | struct intel_crtc_state *crtc_state; |
| 212 | |
| 213 | if (!crtc) |
| 214 | return; |
| 215 | |
| 216 | if (set_bit) |
| 217 | crtc_state = intel_atomic_get_new_crtc_state(state, crtc); |
| 218 | else |
| 219 | crtc_state = intel_atomic_get_old_crtc_state(state, crtc); |
| 220 | |
| 221 | if (!crtc_state->hw.active) |
| 222 | return; |
| 223 | |
| 224 | intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state, |
| 225 | set_bit); |
| 226 | } |
| 227 | |
| 228 | static void |
| 229 | intel_pmdemand_update_active_non_tc_phys(struct intel_display *display, |
| 230 | struct intel_atomic_state *state, |
| 231 | struct intel_pmdemand_state *pmdemand_state) |
| 232 | { |
| 233 | struct drm_connector_state *old_conn_state; |
| 234 | struct drm_connector_state *new_conn_state; |
| 235 | struct drm_connector *connector; |
| 236 | int i; |
| 237 | |
| 238 | for_each_oldnew_connector_in_state(&state->base, connector, |
| 239 | old_conn_state, new_conn_state, i) { |
| 240 | if (!intel_connector_needs_modeset(state, connector)) |
| 241 | continue; |
| 242 | |
| 243 | /* First clear the active phys in the old connector state */ |
| 244 | intel_pmdemand_update_connector_phys(display, state, |
| 245 | conn_state: old_conn_state, set_bit: false, |
| 246 | pmdemand_state); |
| 247 | |
| 248 | /* Then set the active phys in new connector state */ |
| 249 | intel_pmdemand_update_connector_phys(display, state, |
| 250 | conn_state: new_conn_state, set_bit: true, |
| 251 | pmdemand_state); |
| 252 | } |
| 253 | |
| 254 | pmdemand_state->params.active_phys = |
| 255 | min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask), |
| 256 | 7); |
| 257 | } |
| 258 | |
| 259 | static bool |
| 260 | intel_pmdemand_encoder_has_tc_phy(struct intel_display *display, |
| 261 | struct intel_encoder *encoder) |
| 262 | { |
| 263 | return encoder && intel_encoder_is_tc(encoder); |
| 264 | } |
| 265 | |
| 266 | static bool |
| 267 | intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) |
| 268 | { |
| 269 | struct intel_display *display = to_intel_display(state); |
| 270 | struct drm_connector_state *old_conn_state; |
| 271 | struct drm_connector_state *new_conn_state; |
| 272 | struct drm_connector *connector; |
| 273 | int i; |
| 274 | |
| 275 | for_each_oldnew_connector_in_state(&state->base, connector, |
| 276 | old_conn_state, new_conn_state, i) { |
| 277 | struct intel_encoder *old_encoder = |
| 278 | to_intel_encoder(old_conn_state->best_encoder); |
| 279 | struct intel_encoder *new_encoder = |
| 280 | to_intel_encoder(new_conn_state->best_encoder); |
| 281 | |
| 282 | if (!intel_connector_needs_modeset(state, connector)) |
| 283 | continue; |
| 284 | |
| 285 | if (old_encoder == new_encoder || |
| 286 | (intel_pmdemand_encoder_has_tc_phy(display, encoder: old_encoder) && |
| 287 | intel_pmdemand_encoder_has_tc_phy(display, encoder: new_encoder))) |
| 288 | continue; |
| 289 | |
| 290 | return true; |
| 291 | } |
| 292 | |
| 293 | return false; |
| 294 | } |
| 295 | |
| 296 | static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) |
| 297 | { |
| 298 | const struct intel_crtc_state *new_crtc_state, *old_crtc_state; |
| 299 | struct intel_crtc *crtc; |
| 300 | int i; |
| 301 | |
| 302 | if (intel_bw_pmdemand_needs_update(state)) |
| 303 | return true; |
| 304 | |
| 305 | if (intel_dbuf_pmdemand_needs_update(state)) |
| 306 | return true; |
| 307 | |
| 308 | if (intel_cdclk_pmdemand_needs_update(state)) |
| 309 | return true; |
| 310 | |
| 311 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, |
| 312 | new_crtc_state, i) |
| 313 | if (new_crtc_state->port_clock != old_crtc_state->port_clock) |
| 314 | return true; |
| 315 | |
| 316 | return intel_pmdemand_connector_needs_update(state); |
| 317 | } |
| 318 | |
| 319 | int intel_pmdemand_atomic_check(struct intel_atomic_state *state) |
| 320 | { |
| 321 | struct intel_display *display = to_intel_display(state); |
| 322 | const struct intel_bw_state *new_bw_state; |
| 323 | const struct intel_cdclk_state *new_cdclk_state; |
| 324 | const struct intel_dbuf_state *new_dbuf_state; |
| 325 | struct intel_pmdemand_state *new_pmdemand_state; |
| 326 | |
| 327 | if (DISPLAY_VER(display) < 14) |
| 328 | return 0; |
| 329 | |
| 330 | if (!intel_pmdemand_needs_update(state)) |
| 331 | return 0; |
| 332 | |
| 333 | new_pmdemand_state = intel_atomic_get_pmdemand_state(state); |
| 334 | if (IS_ERR(ptr: new_pmdemand_state)) |
| 335 | return PTR_ERR(ptr: new_pmdemand_state); |
| 336 | |
| 337 | new_bw_state = intel_atomic_get_bw_state(state); |
| 338 | if (IS_ERR(ptr: new_bw_state)) |
| 339 | return PTR_ERR(ptr: new_bw_state); |
| 340 | |
| 341 | /* firmware will calculate the qclk_gv_index, requirement is set to 0 */ |
| 342 | new_pmdemand_state->params.qclk_gv_index = 0; |
| 343 | new_pmdemand_state->params.qclk_gv_bw = intel_bw_qgv_point_peakbw(bw_state: new_bw_state); |
| 344 | |
| 345 | new_dbuf_state = intel_atomic_get_dbuf_state(state); |
| 346 | if (IS_ERR(ptr: new_dbuf_state)) |
| 347 | return PTR_ERR(ptr: new_dbuf_state); |
| 348 | |
| 349 | if (DISPLAY_VER(display) < 30) { |
| 350 | new_pmdemand_state->params.active_dbufs = |
| 351 | min_t(u8, intel_dbuf_num_enabled_slices(new_dbuf_state), 3); |
| 352 | new_pmdemand_state->params.active_pipes = |
| 353 | min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), 3); |
| 354 | } else { |
| 355 | new_pmdemand_state->params.active_pipes = |
| 356 | min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), INTEL_NUM_PIPES(display)); |
| 357 | } |
| 358 | |
| 359 | new_cdclk_state = intel_atomic_get_cdclk_state(state); |
| 360 | if (IS_ERR(ptr: new_cdclk_state)) |
| 361 | return PTR_ERR(ptr: new_cdclk_state); |
| 362 | |
| 363 | new_pmdemand_state->params.voltage_index = |
| 364 | intel_cdclk_actual_voltage_level(cdclk_state: new_cdclk_state); |
| 365 | new_pmdemand_state->params.cdclk_freq_mhz = |
| 366 | DIV_ROUND_UP(intel_cdclk_actual(new_cdclk_state), 1000); |
| 367 | |
| 368 | intel_pmdemand_update_max_ddiclk(display, state, pmdemand_state: new_pmdemand_state); |
| 369 | |
| 370 | intel_pmdemand_update_active_non_tc_phys(display, state, pmdemand_state: new_pmdemand_state); |
| 371 | |
| 372 | /* |
| 373 | * Active_PLLs starts with 1 because of CDCLK PLL. |
| 374 | * TODO: Missing to account genlock filter when it gets used. |
| 375 | */ |
| 376 | new_pmdemand_state->params.plls = |
| 377 | min_t(u16, new_pmdemand_state->params.active_phys + 1, 7); |
| 378 | |
| 379 | /* |
| 380 | * Setting scalers to max as it can not be calculated during flips and |
| 381 | * fastsets without taking global states locks. |
| 382 | */ |
| 383 | new_pmdemand_state->params.scalers = 7; |
| 384 | |
| 385 | if (state->base.allow_modeset) |
| 386 | return intel_atomic_serialize_global_state(obj_state: &new_pmdemand_state->base); |
| 387 | else |
| 388 | return intel_atomic_lock_global_state(obj_state: &new_pmdemand_state->base); |
| 389 | } |
| 390 | |
| 391 | static bool intel_pmdemand_check_prev_transaction(struct intel_display *display) |
| 392 | { |
| 393 | return !(intel_de_wait_for_clear_ms(display, |
| 394 | XELPDP_INITIATE_PMDEMAND_REQUEST(1), |
| 395 | XELPDP_PMDEMAND_REQ_ENABLE, timeout_ms: 10) || |
| 396 | intel_de_wait_for_clear_ms(display, |
| 397 | GEN12_DCPR_STATUS_1, |
| 398 | XELPDP_PMDEMAND_INFLIGHT_STATUS, timeout_ms: 10)); |
| 399 | } |
| 400 | |
| 401 | void |
| 402 | intel_pmdemand_init_pmdemand_params(struct intel_display *display, |
| 403 | struct intel_pmdemand_state *pmdemand_state) |
| 404 | { |
| 405 | u32 reg1, reg2; |
| 406 | |
| 407 | if (DISPLAY_VER(display) < 14) |
| 408 | return; |
| 409 | |
| 410 | mutex_lock(&display->pmdemand.lock); |
| 411 | if (drm_WARN_ON(display->drm, |
| 412 | !intel_pmdemand_check_prev_transaction(display))) { |
| 413 | memset(&pmdemand_state->params, 0, |
| 414 | sizeof(pmdemand_state->params)); |
| 415 | goto unlock; |
| 416 | } |
| 417 | |
| 418 | reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); |
| 419 | |
| 420 | reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); |
| 421 | |
| 422 | pmdemand_state->params.qclk_gv_bw = |
| 423 | REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); |
| 424 | pmdemand_state->params.voltage_index = |
| 425 | REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); |
| 426 | pmdemand_state->params.qclk_gv_index = |
| 427 | REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); |
| 428 | pmdemand_state->params.active_phys = |
| 429 | REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); |
| 430 | |
| 431 | pmdemand_state->params.cdclk_freq_mhz = |
| 432 | REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); |
| 433 | pmdemand_state->params.ddiclk_max = |
| 434 | REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); |
| 435 | |
| 436 | if (DISPLAY_VER(display) >= 30) { |
| 437 | pmdemand_state->params.active_pipes = |
| 438 | REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1); |
| 439 | } else { |
| 440 | pmdemand_state->params.active_pipes = |
| 441 | REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); |
| 442 | pmdemand_state->params.active_dbufs = |
| 443 | REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); |
| 444 | |
| 445 | pmdemand_state->params.scalers = |
| 446 | REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); |
| 447 | } |
| 448 | |
| 449 | unlock: |
| 450 | mutex_unlock(lock: &display->pmdemand.lock); |
| 451 | } |
| 452 | |
| 453 | static bool intel_pmdemand_req_complete(struct intel_display *display) |
| 454 | { |
| 455 | return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) & |
| 456 | XELPDP_PMDEMAND_REQ_ENABLE); |
| 457 | } |
| 458 | |
| 459 | static void intel_pmdemand_poll(struct intel_display *display) |
| 460 | { |
| 461 | const unsigned int timeout_ms = 10; |
| 462 | u32 status; |
| 463 | int ret; |
| 464 | |
| 465 | ret = intel_de_wait_ms(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), |
| 466 | XELPDP_PMDEMAND_REQ_ENABLE, value: 0, |
| 467 | timeout_ms, out_value: &status); |
| 468 | |
| 469 | if (ret == -ETIMEDOUT) |
| 470 | drm_err(display->drm, |
| 471 | "timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n" , |
| 472 | timeout_ms, status); |
| 473 | } |
| 474 | |
| 475 | static void intel_pmdemand_wait(struct intel_display *display) |
| 476 | { |
| 477 | /* Wa_14024400148 For lnl use polling method */ |
| 478 | if (DISPLAY_VER(display) == 20) { |
| 479 | intel_pmdemand_poll(display); |
| 480 | } else { |
| 481 | if (!wait_event_timeout(display->pmdemand.waitqueue, |
| 482 | intel_pmdemand_req_complete(display), |
| 483 | msecs_to_jiffies_timeout(10))) |
| 484 | drm_err(display->drm, |
| 485 | "timed out waiting for Punit PM Demand Response\n" ); |
| 486 | } |
| 487 | } |
| 488 | |
| 489 | /* Required to be programmed during Display Init Sequences. */ |
| 490 | void intel_pmdemand_program_dbuf(struct intel_display *display, |
| 491 | u8 dbuf_slices) |
| 492 | { |
| 493 | u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); |
| 494 | |
| 495 | /* PM Demand only tracks active dbufs on pre-Xe3 platforms */ |
| 496 | if (DISPLAY_VER(display) >= 30) |
| 497 | return; |
| 498 | |
| 499 | mutex_lock(&display->pmdemand.lock); |
| 500 | if (drm_WARN_ON(display->drm, |
| 501 | !intel_pmdemand_check_prev_transaction(display))) |
| 502 | goto unlock; |
| 503 | |
| 504 | intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0), |
| 505 | XELPDP_PMDEMAND_DBUFS_MASK, |
| 506 | REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs)); |
| 507 | intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), clear: 0, |
| 508 | XELPDP_PMDEMAND_REQ_ENABLE); |
| 509 | |
| 510 | intel_pmdemand_wait(display); |
| 511 | |
| 512 | unlock: |
| 513 | mutex_unlock(lock: &display->pmdemand.lock); |
| 514 | } |
| 515 | |
| 516 | static void |
| 517 | intel_pmdemand_update_params(struct intel_display *display, |
| 518 | const struct intel_pmdemand_state *new, |
| 519 | const struct intel_pmdemand_state *old, |
| 520 | u32 *reg1, u32 *reg2, bool serialized) |
| 521 | { |
| 522 | /* |
| 523 | * The pmdemand parameter updates happens in two steps. Pre plane and |
| 524 | * post plane updates. During the pre plane, as DE might still be |
| 525 | * handling with some old operations, to avoid unexpected performance |
| 526 | * issues, program the pmdemand parameters with higher of old and new |
| 527 | * values. And then after once settled, use the new parameter values |
| 528 | * as part of the post plane update. |
| 529 | * |
| 530 | * If the pmdemand params update happens without modeset allowed, this |
| 531 | * means we can't serialize the updates. So that implies possibility of |
| 532 | * some parallel atomic commits affecting the pmdemand parameters. In |
| 533 | * that case, we need to consider the current values from the register |
| 534 | * as well. So in pre-plane case, we need to check the max of old, new |
| 535 | * and current register value if not serialized. In post plane update |
| 536 | * we need to consider max of new and current register value if not |
| 537 | * serialized |
| 538 | */ |
| 539 | |
| 540 | #define update_reg(reg, field, mask) do { \ |
| 541 | u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \ |
| 542 | u32 old_val = old ? old->params.field : 0; \ |
| 543 | u32 new_val = new->params.field; \ |
| 544 | \ |
| 545 | *(reg) &= ~(mask); \ |
| 546 | *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \ |
| 547 | } while (0) |
| 548 | |
| 549 | /* Set 1*/ |
| 550 | update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); |
| 551 | update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); |
| 552 | update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); |
| 553 | update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); |
| 554 | |
| 555 | /* Set 2*/ |
| 556 | update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); |
| 557 | update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); |
| 558 | update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); |
| 559 | |
| 560 | if (DISPLAY_VER(display) >= 30) { |
| 561 | update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK); |
| 562 | } else { |
| 563 | update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); |
| 564 | update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); |
| 565 | |
| 566 | update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); |
| 567 | } |
| 568 | |
| 569 | #undef update_reg |
| 570 | } |
| 571 | |
| 572 | static void |
| 573 | intel_pmdemand_program_params(struct intel_display *display, |
| 574 | const struct intel_pmdemand_state *new, |
| 575 | const struct intel_pmdemand_state *old, |
| 576 | bool serialized) |
| 577 | { |
| 578 | bool changed = false; |
| 579 | u32 reg1, mod_reg1; |
| 580 | u32 reg2, mod_reg2; |
| 581 | |
| 582 | mutex_lock(&display->pmdemand.lock); |
| 583 | if (drm_WARN_ON(display->drm, |
| 584 | !intel_pmdemand_check_prev_transaction(display))) |
| 585 | goto unlock; |
| 586 | |
| 587 | reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); |
| 588 | mod_reg1 = reg1; |
| 589 | |
| 590 | reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); |
| 591 | mod_reg2 = reg2; |
| 592 | |
| 593 | intel_pmdemand_update_params(display, new, old, reg1: &mod_reg1, reg2: &mod_reg2, |
| 594 | serialized); |
| 595 | |
| 596 | if (reg1 != mod_reg1) { |
| 597 | intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0), |
| 598 | val: mod_reg1); |
| 599 | changed = true; |
| 600 | } |
| 601 | |
| 602 | if (reg2 != mod_reg2) { |
| 603 | intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), |
| 604 | val: mod_reg2); |
| 605 | changed = true; |
| 606 | } |
| 607 | |
| 608 | /* Initiate pm demand request only if register values are changed */ |
| 609 | if (!changed) |
| 610 | goto unlock; |
| 611 | |
| 612 | drm_dbg_kms(display->drm, |
| 613 | "initiate pmdemand request values: (0x%x 0x%x)\n" , |
| 614 | mod_reg1, mod_reg2); |
| 615 | |
| 616 | intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), clear: 0, |
| 617 | XELPDP_PMDEMAND_REQ_ENABLE); |
| 618 | |
| 619 | intel_pmdemand_wait(display); |
| 620 | |
| 621 | unlock: |
| 622 | mutex_unlock(lock: &display->pmdemand.lock); |
| 623 | } |
| 624 | |
| 625 | static bool |
| 626 | intel_pmdemand_state_changed(const struct intel_pmdemand_state *new, |
| 627 | const struct intel_pmdemand_state *old) |
| 628 | { |
| 629 | return memcmp(p: &new->params, q: &old->params, size: sizeof(new->params)) != 0; |
| 630 | } |
| 631 | |
| 632 | void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state) |
| 633 | { |
| 634 | struct intel_display *display = to_intel_display(state); |
| 635 | const struct intel_pmdemand_state *new_pmdemand_state = |
| 636 | intel_atomic_get_new_pmdemand_state(state); |
| 637 | const struct intel_pmdemand_state *old_pmdemand_state = |
| 638 | intel_atomic_get_old_pmdemand_state(state); |
| 639 | |
| 640 | if (DISPLAY_VER(display) < 14) |
| 641 | return; |
| 642 | |
| 643 | if (!new_pmdemand_state || |
| 644 | !intel_pmdemand_state_changed(new: new_pmdemand_state, |
| 645 | old: old_pmdemand_state)) |
| 646 | return; |
| 647 | |
| 648 | WARN_ON(!new_pmdemand_state->base.changed); |
| 649 | |
| 650 | intel_pmdemand_program_params(display, new: new_pmdemand_state, |
| 651 | old: old_pmdemand_state, |
| 652 | serialized: intel_atomic_global_state_is_serialized(state)); |
| 653 | } |
| 654 | |
| 655 | void intel_pmdemand_post_plane_update(struct intel_atomic_state *state) |
| 656 | { |
| 657 | struct intel_display *display = to_intel_display(state); |
| 658 | const struct intel_pmdemand_state *new_pmdemand_state = |
| 659 | intel_atomic_get_new_pmdemand_state(state); |
| 660 | const struct intel_pmdemand_state *old_pmdemand_state = |
| 661 | intel_atomic_get_old_pmdemand_state(state); |
| 662 | |
| 663 | if (DISPLAY_VER(display) < 14) |
| 664 | return; |
| 665 | |
| 666 | if (!new_pmdemand_state || |
| 667 | !intel_pmdemand_state_changed(new: new_pmdemand_state, |
| 668 | old: old_pmdemand_state)) |
| 669 | return; |
| 670 | |
| 671 | WARN_ON(!new_pmdemand_state->base.changed); |
| 672 | |
| 673 | intel_pmdemand_program_params(display, new: new_pmdemand_state, NULL, |
| 674 | serialized: intel_atomic_global_state_is_serialized(state)); |
| 675 | } |
| 676 | |