1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include <linux/iopoll.h>
7
8#include <drm/drm_print.h>
9
10#include "i915_drv.h"
11#include "i915_irq.h"
12#include "i915_reg.h"
13#include "intel_backlight_regs.h"
14#include "intel_combo_phy.h"
15#include "intel_combo_phy_regs.h"
16#include "intel_crt.h"
17#include "intel_de.h"
18#include "intel_display_irq.h"
19#include "intel_display_power_well.h"
20#include "intel_display_regs.h"
21#include "intel_display_rpm.h"
22#include "intel_display_types.h"
23#include "intel_dkl_phy.h"
24#include "intel_dkl_phy_regs.h"
25#include "intel_dmc.h"
26#include "intel_dmc_wl.h"
27#include "intel_dp_aux_regs.h"
28#include "intel_dpio_phy.h"
29#include "intel_dpll.h"
30#include "intel_hotplug.h"
31#include "intel_pcode.h"
32#include "intel_pps.h"
33#include "intel_psr.h"
34#include "intel_tc.h"
35#include "intel_vga.h"
36#include "skl_watermark.h"
37#include "vlv_dpio_phy_regs.h"
38#include "vlv_iosf_sb_reg.h"
39#include "vlv_sideband.h"
40
41/*
42 * PG0 is HW controlled, so doesn't have a corresponding power well control knob
43 *
44 * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4
45 */
46static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx)
47{
48 int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1;
49
50 return pw_idx - pw1_idx + SKL_PG1;
51}
52
53struct i915_power_well_regs {
54 i915_reg_t bios;
55 i915_reg_t driver;
56 i915_reg_t kvmr;
57 i915_reg_t debug;
58};
59
60struct i915_power_well_ops {
61 const struct i915_power_well_regs *regs;
62 /*
63 * Synchronize the well's hw state to match the current sw state, for
64 * example enable/disable it based on the current refcount. Called
65 * during driver init and resume time, possibly after first calling
66 * the enable/disable handlers.
67 */
68 void (*sync_hw)(struct intel_display *display,
69 struct i915_power_well *power_well);
70 /*
71 * Enable the well and resources that depend on it (for example
72 * interrupts located on the well). Called after the 0->1 refcount
73 * transition.
74 */
75 void (*enable)(struct intel_display *display,
76 struct i915_power_well *power_well);
77 /*
78 * Disable the well and resources that depend on it. Called after
79 * the 1->0 refcount transition.
80 */
81 void (*disable)(struct intel_display *display,
82 struct i915_power_well *power_well);
83 /* Returns the hw enabled state. */
84 bool (*is_enabled)(struct intel_display *display,
85 struct i915_power_well *power_well);
86};
87
88static const struct i915_power_well_instance *
89i915_power_well_instance(const struct i915_power_well *power_well)
90{
91 return &power_well->desc->instances->list[power_well->instance_idx];
92}
93
94struct i915_power_well *
95lookup_power_well(struct intel_display *display,
96 enum i915_power_well_id power_well_id)
97{
98 struct i915_power_well *power_well;
99
100 for_each_power_well(display, power_well)
101 if (i915_power_well_instance(power_well)->id == power_well_id)
102 return power_well;
103
104 /*
105 * It's not feasible to add error checking code to the callers since
106 * this condition really shouldn't happen and it doesn't even make sense
107 * to abort things like display initialization sequences. Just return
108 * the first power well and hope the WARN gets reported so we can fix
109 * our driver.
110 */
111 drm_WARN(display->drm, 1,
112 "Power well %d not defined for this platform\n",
113 power_well_id);
114 return &display->power.domains.power_wells[0];
115}
116
117void intel_power_well_enable(struct intel_display *display,
118 struct i915_power_well *power_well)
119{
120 drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well));
121 power_well->desc->ops->enable(display, power_well);
122 power_well->hw_enabled = true;
123}
124
125void intel_power_well_disable(struct intel_display *display,
126 struct i915_power_well *power_well)
127{
128 drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well));
129 power_well->hw_enabled = false;
130 power_well->desc->ops->disable(display, power_well);
131}
132
133void intel_power_well_sync_hw(struct intel_display *display,
134 struct i915_power_well *power_well)
135{
136 power_well->desc->ops->sync_hw(display, power_well);
137 power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well);
138}
139
140void intel_power_well_get(struct intel_display *display,
141 struct i915_power_well *power_well)
142{
143 if (!power_well->count++)
144 intel_power_well_enable(display, power_well);
145}
146
147void intel_power_well_put(struct intel_display *display,
148 struct i915_power_well *power_well)
149{
150 drm_WARN(display->drm, !power_well->count,
151 "Use count on power well %s is already zero",
152 i915_power_well_instance(power_well)->name);
153
154 if (!--power_well->count)
155 intel_power_well_disable(display, power_well);
156}
157
158bool intel_power_well_is_enabled(struct intel_display *display,
159 struct i915_power_well *power_well)
160{
161 return power_well->desc->ops->is_enabled(display, power_well);
162}
163
164bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
165{
166 return power_well->hw_enabled;
167}
168
169bool intel_display_power_well_is_enabled(struct intel_display *display,
170 enum i915_power_well_id power_well_id)
171{
172 struct i915_power_well *power_well;
173
174 power_well = lookup_power_well(display, power_well_id);
175
176 return intel_power_well_is_enabled(display, power_well);
177}
178
179bool intel_power_well_is_always_on(struct i915_power_well *power_well)
180{
181 return power_well->desc->always_on;
182}
183
184const char *intel_power_well_name(struct i915_power_well *power_well)
185{
186 return i915_power_well_instance(power_well)->name;
187}
188
189struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
190{
191 return &power_well->domains;
192}
193
194int intel_power_well_refcount(struct i915_power_well *power_well)
195{
196 return power_well->count;
197}
198
199/*
200 * Starting with Haswell, we have a "Power Down Well" that can be turned off
201 * when not needed anymore. We have 4 registers that can request the power well
202 * to be enabled, and it will only be disabled if none of the registers is
203 * requesting it to be enabled.
204 */
205static void hsw_power_well_post_enable(struct intel_display *display,
206 u8 irq_pipe_mask, bool has_vga)
207{
208 if (has_vga)
209 intel_vga_reset_io_mem(display);
210
211 if (irq_pipe_mask)
212 gen8_irq_power_well_post_enable(display, pipe_mask: irq_pipe_mask);
213}
214
215static void hsw_power_well_pre_disable(struct intel_display *display,
216 u8 irq_pipe_mask)
217{
218 if (irq_pipe_mask)
219 gen8_irq_power_well_pre_disable(display, pipe_mask: irq_pipe_mask);
220}
221
222#define ICL_AUX_PW_TO_PHY(pw_idx) \
223 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
224
225#define ICL_AUX_PW_TO_CH(pw_idx) \
226 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
227
228#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
229 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
230
231static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
232{
233 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
234
235 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
236 ICL_AUX_PW_TO_CH(pw_idx);
237}
238
239static struct intel_digital_port *
240aux_ch_to_digital_port(struct intel_display *display,
241 enum aux_ch aux_ch)
242{
243 struct intel_encoder *encoder;
244
245 for_each_intel_encoder(display->drm, encoder) {
246 struct intel_digital_port *dig_port;
247
248 /* We'll check the MST primary port */
249 if (encoder->type == INTEL_OUTPUT_DP_MST)
250 continue;
251
252 dig_port = enc_to_dig_port(encoder);
253
254 if (dig_port && dig_port->aux_ch == aux_ch)
255 return dig_port;
256 }
257
258 return NULL;
259}
260
261static enum phy icl_aux_pw_to_phy(struct intel_display *display,
262 const struct i915_power_well *power_well)
263{
264 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
265 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
266
267 /*
268 * FIXME should we care about the (VBT defined) dig_port->aux_ch
269 * relationship or should this be purely defined by the hardware layout?
270 * Currently if the port doesn't appear in the VBT, or if it's declared
271 * as HDMI-only and routed to a combo PHY, the encoder either won't be
272 * present at all or it will not have an aux_ch assigned.
273 */
274 return dig_port ? intel_encoder_to_phy(encoder: &dig_port->base) : PHY_NONE;
275}
276
277static void hsw_wait_for_power_well_enable(struct intel_display *display,
278 struct i915_power_well *power_well,
279 bool timeout_expected)
280{
281 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
282 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
283 int timeout = power_well->desc->enable_timeout ? : 1;
284
285 /*
286 * For some power wells we're not supposed to watch the status bit for
287 * an ack, but rather just wait a fixed amount of time and then
288 * proceed. This is only used on DG2.
289 */
290 if (display->platform.dg2 && power_well->desc->fixed_enable_delay) {
291 usleep_range(min: 600, max: 1200);
292 return;
293 }
294
295 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
296 if (intel_de_wait_for_set_ms(display, reg: regs->driver,
297 HSW_PWR_WELL_CTL_STATE(pw_idx), timeout_ms: timeout)) {
298 drm_dbg_kms(display->drm, "%s power well enable timeout\n",
299 intel_power_well_name(power_well));
300
301 drm_WARN_ON(display->drm, !timeout_expected);
302
303 }
304}
305
306static u32 hsw_power_well_requesters(struct intel_display *display,
307 const struct i915_power_well_regs *regs,
308 int pw_idx)
309{
310 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
311 u32 ret;
312
313 ret = intel_de_read(display, reg: regs->bios) & req_mask ? 1 : 0;
314 ret |= intel_de_read(display, reg: regs->driver) & req_mask ? 2 : 0;
315 if (regs->kvmr.reg)
316 ret |= intel_de_read(display, reg: regs->kvmr) & req_mask ? 4 : 0;
317 ret |= intel_de_read(display, reg: regs->debug) & req_mask ? 8 : 0;
318
319 return ret;
320}
321
322static void hsw_wait_for_power_well_disable(struct intel_display *display,
323 struct i915_power_well *power_well)
324{
325 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
326 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
327 u32 reqs;
328 int ret;
329
330 /*
331 * Bspec doesn't require waiting for PWs to get disabled, but still do
332 * this for paranoia. The known cases where a PW will be forced on:
333 * - a KVMR request on any power well via the KVMR request register
334 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
335 * DEBUG request registers
336 * Skip the wait in case any of the request bits are set and print a
337 * diagnostic message.
338 */
339 reqs = hsw_power_well_requesters(display, regs, pw_idx);
340
341 ret = intel_de_wait_for_clear_ms(display, reg: regs->driver,
342 HSW_PWR_WELL_CTL_STATE(pw_idx),
343 timeout_ms: reqs ? 0 : 1);
344 if (!ret)
345 return;
346
347 /* Refresh requesters in case they popped up during the wait. */
348 if (!reqs)
349 reqs = hsw_power_well_requesters(display, regs, pw_idx);
350
351 drm_dbg_kms(display->drm,
352 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
353 intel_power_well_name(power_well),
354 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
355}
356
357static void gen9_wait_for_power_well_fuses(struct intel_display *display,
358 enum skl_power_gate pg)
359{
360 /* Timeout 5us for PG#0, for other PGs 1us */
361 drm_WARN_ON(display->drm,
362 intel_de_wait_for_set_ms(display, SKL_FUSE_STATUS,
363 SKL_FUSE_PG_DIST_STATUS(pg), 1));
364}
365
366static void hsw_power_well_enable(struct intel_display *display,
367 struct i915_power_well *power_well)
368{
369 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
370 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
371
372 if (power_well->desc->has_fuses) {
373 enum skl_power_gate pg;
374
375 pg = pw_idx_to_pg(display, pw_idx);
376
377 /* Wa_16013190616:adlp */
378 if (display->platform.alderlake_p && pg == SKL_PG1)
379 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, clear: 0, DISABLE_FLR_SRC);
380
381 /*
382 * For PW1 we have to wait both for the PW0/PG0 fuse state
383 * before enabling the power well and PW1/PG1's own fuse
384 * state after the enabling. For all other power wells with
385 * fuses we only have to wait for that PW/PG's fuse state
386 * after the enabling.
387 */
388 if (pg == SKL_PG1)
389 gen9_wait_for_power_well_fuses(display, pg: SKL_PG0);
390 }
391
392 intel_de_rmw(display, reg: regs->driver, clear: 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
393
394 hsw_wait_for_power_well_enable(display, power_well, timeout_expected: false);
395
396 if (power_well->desc->has_fuses) {
397 enum skl_power_gate pg;
398
399 pg = pw_idx_to_pg(display, pw_idx);
400
401 gen9_wait_for_power_well_fuses(display, pg);
402 }
403
404 hsw_power_well_post_enable(display,
405 irq_pipe_mask: power_well->desc->irq_pipe_mask,
406 has_vga: power_well->desc->has_vga);
407}
408
409static void hsw_power_well_disable(struct intel_display *display,
410 struct i915_power_well *power_well)
411{
412 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
413 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
414
415 hsw_power_well_pre_disable(display,
416 irq_pipe_mask: power_well->desc->irq_pipe_mask);
417
418 intel_de_rmw(display, reg: regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), set: 0);
419 hsw_wait_for_power_well_disable(display, power_well);
420}
421
422static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch)
423{
424 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
425
426 return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
427}
428
429static void
430icl_combo_phy_aux_power_well_enable(struct intel_display *display,
431 struct i915_power_well *power_well)
432{
433 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
434 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
435
436 drm_WARN_ON(display->drm, !display->platform.icelake);
437
438 intel_de_rmw(display, reg: regs->driver, clear: 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
439
440 /*
441 * FIXME not sure if we should derive the PHY from the pw_idx, or
442 * from the VBT defined AUX_CH->DDI->PHY mapping.
443 */
444 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
445 clear: 0, ICL_LANE_ENABLE_AUX);
446
447 hsw_wait_for_power_well_enable(display, power_well, timeout_expected: false);
448
449 /* Display WA #1178: icl */
450 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
451 !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx)))
452 intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
453 clear: 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
454}
455
456static void
457icl_combo_phy_aux_power_well_disable(struct intel_display *display,
458 struct i915_power_well *power_well)
459{
460 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
461 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
462
463 drm_WARN_ON(display->drm, !display->platform.icelake);
464
465 /*
466 * FIXME not sure if we should derive the PHY from the pw_idx, or
467 * from the VBT defined AUX_CH->DDI->PHY mapping.
468 */
469 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
470 ICL_LANE_ENABLE_AUX, set: 0);
471
472 intel_de_rmw(display, reg: regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), set: 0);
473
474 hsw_wait_for_power_well_disable(display, power_well);
475}
476
477#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
478
479static void icl_tc_port_assert_ref_held(struct intel_display *display,
480 struct i915_power_well *power_well,
481 struct intel_digital_port *dig_port)
482{
483 if (drm_WARN_ON(display->drm, !dig_port))
484 return;
485
486 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
487 return;
488
489 drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
490}
491
492#else
493
494static void icl_tc_port_assert_ref_held(struct intel_display *display,
495 struct i915_power_well *power_well,
496 struct intel_digital_port *dig_port)
497{
498}
499
500#endif
501
502#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
503
504static void icl_tc_cold_exit(struct intel_display *display)
505{
506 int ret, tries = 0;
507
508 while (1) {
509 ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0);
510 if (ret != -EAGAIN || ++tries == 3)
511 break;
512 msleep(msecs: 1);
513 }
514
515 /* Spec states that TC cold exit can take up to 1ms to complete */
516 if (!ret)
517 msleep(msecs: 1);
518
519 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
520 drm_dbg_kms(display->drm, "TC cold block %s\n", ret ? "failed" :
521 "succeeded");
522}
523
524static void
525icl_tc_phy_aux_power_well_enable(struct intel_display *display,
526 struct i915_power_well *power_well)
527{
528 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
529 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
530 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
531 bool is_tbt = power_well->desc->is_tc_tbt;
532 bool timeout_expected;
533 u32 val;
534 int ret;
535
536 icl_tc_port_assert_ref_held(display, power_well, dig_port);
537
538 intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch),
539 DP_AUX_CH_CTL_TBT_IO, set: is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
540
541 intel_de_rmw(display, reg: regs->driver,
542 clear: 0,
543 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
544
545 /*
546 * An AUX timeout is expected if the TBT DP tunnel is down,
547 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
548 * exit sequence.
549 */
550 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
551 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
552 icl_tc_cold_exit(display);
553
554 hsw_wait_for_power_well_enable(display, power_well, timeout_expected);
555
556 if (DISPLAY_VER(display) >= 12 && !is_tbt) {
557 enum tc_port tc_port;
558
559 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
560
561 ret = poll_timeout_us(val = intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)),
562 val & DKL_CMN_UC_DW27_UC_HEALTH,
563 100, 1000, false);
564 if (ret)
565 drm_warn(display->drm, "Timeout waiting TC uC health\n");
566 }
567}
568
569static void
570icl_aux_power_well_enable(struct intel_display *display,
571 struct i915_power_well *power_well)
572{
573 enum phy phy = icl_aux_pw_to_phy(display, power_well);
574
575 if (intel_phy_is_tc(display, phy))
576 return icl_tc_phy_aux_power_well_enable(display, power_well);
577 else if (display->platform.icelake)
578 return icl_combo_phy_aux_power_well_enable(display,
579 power_well);
580 else
581 return hsw_power_well_enable(display, power_well);
582}
583
584static void
585icl_aux_power_well_disable(struct intel_display *display,
586 struct i915_power_well *power_well)
587{
588 enum phy phy = icl_aux_pw_to_phy(display, power_well);
589
590 if (intel_phy_is_tc(display, phy))
591 return hsw_power_well_disable(display, power_well);
592 else if (display->platform.icelake)
593 return icl_combo_phy_aux_power_well_disable(display,
594 power_well);
595 else
596 return hsw_power_well_disable(display, power_well);
597}
598
599/*
600 * We should only use the power well if we explicitly asked the hardware to
601 * enable it, so check if it's enabled and also check if we've requested it to
602 * be enabled.
603 */
604static bool hsw_power_well_enabled(struct intel_display *display,
605 struct i915_power_well *power_well)
606{
607 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
608 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
609 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
610 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
611 HSW_PWR_WELL_CTL_STATE(pw_idx);
612 u32 val;
613
614 val = intel_de_read(display, reg: regs->driver);
615
616 /*
617 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
618 * and the MISC_IO PW will be not restored, so check instead for the
619 * BIOS's own request bits, which are forced-on for these power wells
620 * when exiting DC5/6.
621 */
622 if (DISPLAY_VER(display) == 9 && !display->platform.broxton &&
623 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
624 val |= intel_de_read(display, reg: regs->bios);
625
626 return (val & mask) == mask;
627}
628
629static void assert_can_enable_dc9(struct intel_display *display)
630{
631 struct drm_i915_private *dev_priv = to_i915(dev: display->drm);
632
633 drm_WARN_ONCE(display->drm,
634 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9),
635 "DC9 already programmed to be enabled.\n");
636 drm_WARN_ONCE(display->drm,
637 intel_de_read(display, DC_STATE_EN) &
638 DC_STATE_EN_UPTO_DC5,
639 "DC5 still not disabled to enable DC9.\n");
640 drm_WARN_ONCE(display->drm,
641 intel_de_read(display, HSW_PWR_WELL_CTL2) &
642 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
643 "Power well 2 on.\n");
644 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
645 "Interrupts not disabled yet.\n");
646
647 /*
648 * TODO: check for the following to verify the conditions to enter DC9
649 * state are satisfied:
650 * 1] Check relevant display engine registers to verify if mode set
651 * disable sequence was followed.
652 * 2] Check if display uninitialize sequence is initialized.
653 */
654}
655
656static void assert_can_disable_dc9(struct intel_display *display)
657{
658 struct drm_i915_private *dev_priv = to_i915(dev: display->drm);
659
660 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
661 "Interrupts not disabled yet.\n");
662 drm_WARN_ONCE(display->drm,
663 intel_de_read(display, DC_STATE_EN) &
664 DC_STATE_EN_UPTO_DC5,
665 "DC5 still not disabled.\n");
666
667 /*
668 * TODO: check for the following to verify DC9 state was indeed
669 * entered before programming to disable it:
670 * 1] Check relevant display engine registers to verify if mode
671 * set disable sequence was followed.
672 * 2] Check if display uninitialize sequence is initialized.
673 */
674}
675
676static void gen9_write_dc_state(struct intel_display *display,
677 u32 state)
678{
679 int rewrites = 0;
680 int rereads = 0;
681 u32 v;
682
683 intel_de_write(display, DC_STATE_EN, val: state);
684
685 /* It has been observed that disabling the dc6 state sometimes
686 * doesn't stick and dmc keeps returning old value. Make sure
687 * the write really sticks enough times and also force rewrite until
688 * we are confident that state is exactly what we want.
689 */
690 do {
691 v = intel_de_read(display, DC_STATE_EN);
692
693 if (v != state) {
694 intel_de_write(display, DC_STATE_EN, val: state);
695 rewrites++;
696 rereads = 0;
697 } else if (rereads++ > 5) {
698 break;
699 }
700
701 } while (rewrites < 100);
702
703 if (v != state)
704 drm_err(display->drm,
705 "Writing dc state to 0x%x failed, now 0x%x\n",
706 state, v);
707
708 /* Most of the times we need one retry, avoid spam */
709 if (rewrites > 1)
710 drm_dbg_kms(display->drm,
711 "Rewrote dc state to 0x%x %d times\n",
712 state, rewrites);
713}
714
715static u32 gen9_dc_mask(struct intel_display *display)
716{
717 u32 mask;
718
719 mask = DC_STATE_EN_UPTO_DC5;
720
721 if (DISPLAY_VER(display) >= 12)
722 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
723 | DC_STATE_EN_DC9;
724 else if (DISPLAY_VER(display) == 11)
725 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
726 else if (display->platform.geminilake || display->platform.broxton)
727 mask |= DC_STATE_EN_DC9;
728 else
729 mask |= DC_STATE_EN_UPTO_DC6;
730
731 return mask;
732}
733
734void gen9_sanitize_dc_state(struct intel_display *display)
735{
736 struct i915_power_domains *power_domains = &display->power.domains;
737 u32 val;
738
739 if (!HAS_DISPLAY(display))
740 return;
741
742 val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display);
743
744 drm_dbg_kms(display->drm,
745 "Resetting DC state tracking from %02x to %02x\n",
746 power_domains->dc_state, val);
747 power_domains->dc_state = val;
748}
749
750/**
751 * gen9_set_dc_state - set target display C power state
752 * @display: display instance
753 * @state: target DC power state
754 * - DC_STATE_DISABLE
755 * - DC_STATE_EN_UPTO_DC5
756 * - DC_STATE_EN_UPTO_DC6
757 * - DC_STATE_EN_DC9
758 *
759 * Signal to DMC firmware/HW the target DC power state passed in @state.
760 * DMC/HW can turn off individual display clocks and power rails when entering
761 * a deeper DC power state (higher in number) and turns these back when exiting
762 * that state to a shallower power state (lower in number). The HW will decide
763 * when to actually enter a given state on an on-demand basis, for instance
764 * depending on the active state of display pipes. The state of display
765 * registers backed by affected power rails are saved/restored as needed.
766 *
767 * Based on the above enabling a deeper DC power state is asynchronous wrt.
768 * enabling it. Disabling a deeper power state is synchronous: for instance
769 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
770 * back on and register state is restored. This is guaranteed by the MMIO write
771 * to DC_STATE_EN blocking until the state is restored.
772 */
773void gen9_set_dc_state(struct intel_display *display, u32 state)
774{
775 struct i915_power_domains *power_domains = &display->power.domains;
776 bool dc6_was_enabled, enable_dc6;
777 u32 mask;
778 u32 val;
779
780 if (!HAS_DISPLAY(display))
781 return;
782
783 if (drm_WARN_ON_ONCE(display->drm,
784 state & ~power_domains->allowed_dc_mask))
785 state &= power_domains->allowed_dc_mask;
786
787 if (!power_domains->initializing)
788 intel_psr_notify_dc5_dc6(display);
789
790 val = intel_de_read(display, DC_STATE_EN);
791 mask = gen9_dc_mask(display);
792 drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
793 val & mask, state);
794
795 /* Check if DMC is ignoring our DC state requests */
796 if ((val & mask) != power_domains->dc_state)
797 drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
798 power_domains->dc_state, val & mask);
799
800 enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
801 dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
802 if (!dc6_was_enabled && enable_dc6)
803 intel_dmc_update_dc6_allowed_count(display, start_tracking: true);
804
805 val &= ~mask;
806 val |= state;
807
808 gen9_write_dc_state(display, state: val);
809
810 if (!enable_dc6 && dc6_was_enabled)
811 intel_dmc_update_dc6_allowed_count(display, start_tracking: false);
812
813 power_domains->dc_state = val & mask;
814}
815
816static void tgl_enable_dc3co(struct intel_display *display)
817{
818 drm_dbg_kms(display->drm, "Enabling DC3CO\n");
819 gen9_set_dc_state(display, DC_STATE_EN_DC3CO);
820}
821
822static void tgl_disable_dc3co(struct intel_display *display)
823{
824 drm_dbg_kms(display->drm, "Disabling DC3CO\n");
825 intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, set: 0);
826 gen9_set_dc_state(display, DC_STATE_DISABLE);
827 /*
828 * Delay of 200us DC3CO Exit time B.Spec 49196
829 */
830 usleep_range(min: 200, max: 210);
831}
832
833static void assert_can_enable_dc5(struct intel_display *display)
834{
835 enum i915_power_well_id high_pg;
836
837 /* Power wells at this level and above must be disabled for DC5 entry */
838 if (DISPLAY_VER(display) == 12)
839 high_pg = ICL_DISP_PW_3;
840 else
841 high_pg = SKL_DISP_PW_2;
842
843 drm_WARN_ONCE(display->drm,
844 intel_display_power_well_is_enabled(display, high_pg),
845 "Power wells above platform's DC5 limit still enabled.\n");
846
847 drm_WARN_ONCE(display->drm,
848 (intel_de_read(display, DC_STATE_EN) &
849 DC_STATE_EN_UPTO_DC5),
850 "DC5 already programmed to be enabled.\n");
851
852 assert_display_rpm_held(display);
853
854 assert_main_dmc_loaded(display);
855}
856
857void gen9_enable_dc5(struct intel_display *display)
858{
859 assert_can_enable_dc5(display);
860
861 drm_dbg_kms(display->drm, "Enabling DC5\n");
862
863 /* Wa Display #1183: skl,kbl,cfl */
864 if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
865 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
866 clear: 0, SKL_SELECT_ALTERNATE_DC_EXIT);
867
868 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5);
869
870 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5);
871}
872
873static void assert_can_enable_dc6(struct intel_display *display)
874{
875 drm_WARN_ONCE(display->drm,
876 (intel_de_read(display, UTIL_PIN_CTL) &
877 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
878 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
879 "Utility pin enabled in PWM mode\n");
880 drm_WARN_ONCE(display->drm,
881 (intel_de_read(display, DC_STATE_EN) &
882 DC_STATE_EN_UPTO_DC6),
883 "DC6 already programmed to be enabled.\n");
884
885 assert_main_dmc_loaded(display);
886}
887
888void skl_enable_dc6(struct intel_display *display)
889{
890 assert_can_enable_dc6(display);
891
892 drm_dbg_kms(display->drm, "Enabling DC6\n");
893
894 /* Wa Display #1183: skl,kbl,cfl */
895 if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
896 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
897 clear: 0, SKL_SELECT_ALTERNATE_DC_EXIT);
898
899 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6);
900
901 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6);
902}
903
904void bxt_enable_dc9(struct intel_display *display)
905{
906 assert_can_enable_dc9(display);
907
908 drm_dbg_kms(display->drm, "Enabling DC9\n");
909 /*
910 * Power sequencer reset is needed on BXT/GLK, because the PPS registers
911 * aren't always on, unlike with South Display Engine on PCH.
912 */
913 if (display->platform.broxton || display->platform.geminilake)
914 bxt_pps_reset_all(display);
915 gen9_set_dc_state(display, DC_STATE_EN_DC9);
916}
917
918void bxt_disable_dc9(struct intel_display *display)
919{
920 assert_can_disable_dc9(display);
921
922 drm_dbg_kms(display->drm, "Disabling DC9\n");
923
924 gen9_set_dc_state(display, DC_STATE_DISABLE);
925
926 intel_pps_unlock_regs_wa(display);
927}
928
929static void hsw_power_well_sync_hw(struct intel_display *display,
930 struct i915_power_well *power_well)
931{
932 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
933 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
934 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
935 u32 bios_req = intel_de_read(display, reg: regs->bios);
936
937 /* Take over the request bit if set by BIOS. */
938 if (bios_req & mask) {
939 u32 drv_req = intel_de_read(display, reg: regs->driver);
940
941 if (!(drv_req & mask))
942 intel_de_write(display, reg: regs->driver, val: drv_req | mask);
943 intel_de_write(display, reg: regs->bios, val: bios_req & ~mask);
944 }
945}
946
947static void bxt_dpio_cmn_power_well_enable(struct intel_display *display,
948 struct i915_power_well *power_well)
949{
950 bxt_dpio_phy_init(display, phy: i915_power_well_instance(power_well)->bxt.phy);
951}
952
953static void bxt_dpio_cmn_power_well_disable(struct intel_display *display,
954 struct i915_power_well *power_well)
955{
956 bxt_dpio_phy_uninit(display, phy: i915_power_well_instance(power_well)->bxt.phy);
957}
958
959static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display,
960 struct i915_power_well *power_well)
961{
962 return bxt_dpio_phy_is_enabled(display, phy: i915_power_well_instance(power_well)->bxt.phy);
963}
964
965static void bxt_verify_dpio_phy_power_wells(struct intel_display *display)
966{
967 struct i915_power_well *power_well;
968
969 power_well = lookup_power_well(display, power_well_id: BXT_DISP_PW_DPIO_CMN_A);
970 if (intel_power_well_refcount(power_well) > 0)
971 bxt_dpio_phy_verify_state(display, phy: i915_power_well_instance(power_well)->bxt.phy);
972
973 power_well = lookup_power_well(display, power_well_id: VLV_DISP_PW_DPIO_CMN_BC);
974 if (intel_power_well_refcount(power_well) > 0)
975 bxt_dpio_phy_verify_state(display, phy: i915_power_well_instance(power_well)->bxt.phy);
976
977 if (display->platform.geminilake) {
978 power_well = lookup_power_well(display,
979 power_well_id: GLK_DISP_PW_DPIO_CMN_C);
980 if (intel_power_well_refcount(power_well) > 0)
981 bxt_dpio_phy_verify_state(display,
982 phy: i915_power_well_instance(power_well)->bxt.phy);
983 }
984}
985
986static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
987 struct i915_power_well *power_well)
988{
989 return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
990 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
991}
992
993static void gen9_assert_dbuf_enabled(struct intel_display *display)
994{
995 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(display);
996 u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
997
998 drm_WARN(display->drm,
999 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1000 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1001 hw_enabled_dbuf_slices,
1002 enabled_dbuf_slices);
1003}
1004
1005void gen9_disable_dc_states(struct intel_display *display)
1006{
1007 struct i915_power_domains *power_domains = &display->power.domains;
1008 struct intel_cdclk_config cdclk_config = {};
1009 u32 old_state = power_domains->dc_state;
1010
1011 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
1012 tgl_disable_dc3co(display);
1013 return;
1014 }
1015
1016 if (HAS_DISPLAY(display)) {
1017 intel_dmc_wl_get_noreg(display);
1018 gen9_set_dc_state(display, DC_STATE_DISABLE);
1019 intel_dmc_wl_put_noreg(display);
1020 } else {
1021 gen9_set_dc_state(display, DC_STATE_DISABLE);
1022 return;
1023 }
1024
1025 if (old_state == DC_STATE_EN_UPTO_DC5 ||
1026 old_state == DC_STATE_EN_UPTO_DC6)
1027 intel_dmc_wl_disable(display);
1028
1029 intel_cdclk_get_cdclk(display, cdclk_config: &cdclk_config);
1030 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1031 drm_WARN_ON(display->drm,
1032 intel_cdclk_clock_changed(&display->cdclk.hw,
1033 &cdclk_config));
1034
1035 gen9_assert_dbuf_enabled(display);
1036
1037 if (display->platform.geminilake || display->platform.broxton)
1038 bxt_verify_dpio_phy_power_wells(display);
1039
1040 if (DISPLAY_VER(display) >= 11)
1041 /*
1042 * DMC retains HW context only for port A, the other combo
1043 * PHY's HW context for port B is lost after DC transitions,
1044 * so we need to restore it manually.
1045 */
1046 intel_combo_phy_init(display);
1047}
1048
1049static void gen9_dc_off_power_well_enable(struct intel_display *display,
1050 struct i915_power_well *power_well)
1051{
1052 gen9_disable_dc_states(display);
1053}
1054
1055static void gen9_dc_off_power_well_disable(struct intel_display *display,
1056 struct i915_power_well *power_well)
1057{
1058 struct i915_power_domains *power_domains = &display->power.domains;
1059
1060 if (!intel_dmc_has_payload(display))
1061 return;
1062
1063 switch (power_domains->target_dc_state) {
1064 case DC_STATE_EN_DC3CO:
1065 tgl_enable_dc3co(display);
1066 break;
1067 case DC_STATE_EN_UPTO_DC6:
1068 skl_enable_dc6(display);
1069 break;
1070 case DC_STATE_EN_UPTO_DC5:
1071 gen9_enable_dc5(display);
1072 break;
1073 }
1074}
1075
1076static void i9xx_power_well_sync_hw_noop(struct intel_display *display,
1077 struct i915_power_well *power_well)
1078{
1079}
1080
1081static void i9xx_always_on_power_well_noop(struct intel_display *display,
1082 struct i915_power_well *power_well)
1083{
1084}
1085
1086static bool i9xx_always_on_power_well_enabled(struct intel_display *display,
1087 struct i915_power_well *power_well)
1088{
1089 return true;
1090}
1091
1092static void i830_pipes_power_well_enable(struct intel_display *display,
1093 struct i915_power_well *power_well)
1094{
1095 if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0)
1096 i830_enable_pipe(display, pipe: PIPE_A);
1097 if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0)
1098 i830_enable_pipe(display, pipe: PIPE_B);
1099}
1100
1101static void i830_pipes_power_well_disable(struct intel_display *display,
1102 struct i915_power_well *power_well)
1103{
1104 i830_disable_pipe(display, pipe: PIPE_B);
1105 i830_disable_pipe(display, pipe: PIPE_A);
1106}
1107
1108static bool i830_pipes_power_well_enabled(struct intel_display *display,
1109 struct i915_power_well *power_well)
1110{
1111 return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE &&
1112 intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
1113}
1114
1115static void i830_pipes_power_well_sync_hw(struct intel_display *display,
1116 struct i915_power_well *power_well)
1117{
1118 if (intel_power_well_refcount(power_well) > 0)
1119 i830_pipes_power_well_enable(display, power_well);
1120 else
1121 i830_pipes_power_well_disable(display, power_well);
1122}
1123
1124static void vlv_set_power_well(struct intel_display *display,
1125 struct i915_power_well *power_well, bool enable)
1126{
1127 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1128 u32 mask;
1129 u32 state;
1130 u32 ctrl;
1131 u32 val;
1132 int ret;
1133
1134 mask = PUNIT_PWRGT_MASK(pw_idx);
1135 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1136 PUNIT_PWRGT_PWR_GATE(pw_idx);
1137
1138 vlv_punit_get(drm: display->drm);
1139
1140 val = vlv_punit_read(drm: display->drm, PUNIT_REG_PWRGT_STATUS);
1141 if ((val & mask) == state)
1142 goto out;
1143
1144 ctrl = vlv_punit_read(drm: display->drm, PUNIT_REG_PWRGT_CTRL);
1145 ctrl &= ~mask;
1146 ctrl |= state;
1147 vlv_punit_write(drm: display->drm, PUNIT_REG_PWRGT_CTRL, val: ctrl);
1148
1149 ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS),
1150 (val & mask) == state,
1151 500, 100 * 1000, false);
1152 if (ret)
1153 drm_err(display->drm,
1154 "timeout setting power well state %08x (%08x)\n",
1155 state,
1156 vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
1157
1158out:
1159 vlv_punit_put(drm: display->drm);
1160}
1161
1162static void vlv_power_well_enable(struct intel_display *display,
1163 struct i915_power_well *power_well)
1164{
1165 vlv_set_power_well(display, power_well, enable: true);
1166}
1167
1168static void vlv_power_well_disable(struct intel_display *display,
1169 struct i915_power_well *power_well)
1170{
1171 vlv_set_power_well(display, power_well, enable: false);
1172}
1173
1174static bool vlv_power_well_enabled(struct intel_display *display,
1175 struct i915_power_well *power_well)
1176{
1177 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1178 bool enabled = false;
1179 u32 mask;
1180 u32 state;
1181 u32 ctrl;
1182
1183 mask = PUNIT_PWRGT_MASK(pw_idx);
1184 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1185
1186 vlv_punit_get(drm: display->drm);
1187
1188 state = vlv_punit_read(drm: display->drm, PUNIT_REG_PWRGT_STATUS) & mask;
1189 /*
1190 * We only ever set the power-on and power-gate states, anything
1191 * else is unexpected.
1192 */
1193 drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1194 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1195 if (state == ctrl)
1196 enabled = true;
1197
1198 /*
1199 * A transient state at this point would mean some unexpected party
1200 * is poking at the power controls too.
1201 */
1202 ctrl = vlv_punit_read(drm: display->drm, PUNIT_REG_PWRGT_CTRL) & mask;
1203 drm_WARN_ON(display->drm, ctrl != state);
1204
1205 vlv_punit_put(drm: display->drm);
1206
1207 return enabled;
1208}
1209
1210static void vlv_init_display_clock_gating(struct intel_display *display)
1211{
1212 /*
1213 * On driver load, a pipe may be active and driving a DSI display.
1214 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1215 * (and never recovering) in this case. intel_dsi_post_disable() will
1216 * clear it when we turn off the display.
1217 */
1218 intel_de_rmw(display, VLV_DSPCLK_GATE_D,
1219 clear: ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
1220
1221 /*
1222 * Disable trickle feed and enable pnd deadline calculation
1223 */
1224 intel_de_write(display, MI_ARB_VLV,
1225 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1226 intel_de_write(display, CBR1_VLV, val: 0);
1227
1228 drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
1229 intel_de_write(display, RAWCLK_FREQ_VLV,
1230 DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq,
1231 1000));
1232}
1233
1234static void vlv_display_power_well_init(struct intel_display *display)
1235{
1236 struct intel_encoder *encoder;
1237 enum pipe pipe;
1238
1239 /*
1240 * Enable the CRI clock source so we can get at the
1241 * display and the reference clock for VGA
1242 * hotplug / manual detection. Supposedly DSI also
1243 * needs the ref clock up and running.
1244 *
1245 * CHV DPLL B/C have some issues if VGA mode is enabled.
1246 */
1247 for_each_pipe(display, pipe) {
1248 u32 val = intel_de_read(display, DPLL(display, pipe));
1249
1250 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1251 if (pipe != PIPE_A)
1252 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1253
1254 intel_de_write(display, DPLL(display, pipe), val);
1255 }
1256
1257 vlv_init_display_clock_gating(display);
1258
1259 valleyview_enable_display_irqs(display);
1260
1261 /*
1262 * During driver initialization/resume we can avoid restoring the
1263 * part of the HW/SW state that will be inited anyway explicitly.
1264 */
1265 if (display->power.domains.initializing)
1266 return;
1267
1268 intel_hpd_init(display);
1269 intel_hpd_poll_disable(display);
1270
1271 /* Re-enable the ADPA, if we have one */
1272 for_each_intel_encoder(display->drm, encoder) {
1273 if (encoder->type == INTEL_OUTPUT_ANALOG)
1274 intel_crt_reset(encoder: &encoder->base);
1275 }
1276
1277 intel_vga_disable(display);
1278
1279 intel_pps_unlock_regs_wa(display);
1280}
1281
1282static void vlv_display_power_well_deinit(struct intel_display *display)
1283{
1284 struct drm_i915_private *dev_priv = to_i915(dev: display->drm);
1285
1286 valleyview_disable_display_irqs(display);
1287
1288 /* make sure we're done processing display irqs */
1289 intel_synchronize_irq(i915: dev_priv);
1290
1291 vlv_pps_reset_all(display);
1292
1293 /* Prevent us from re-enabling polling on accident in late suspend */
1294 if (!display->drm->dev->power.is_suspended)
1295 intel_hpd_poll_enable(display);
1296}
1297
1298static void vlv_display_power_well_enable(struct intel_display *display,
1299 struct i915_power_well *power_well)
1300{
1301 vlv_set_power_well(display, power_well, enable: true);
1302
1303 vlv_display_power_well_init(display);
1304}
1305
1306static void vlv_display_power_well_disable(struct intel_display *display,
1307 struct i915_power_well *power_well)
1308{
1309 vlv_display_power_well_deinit(display);
1310
1311 vlv_set_power_well(display, power_well, enable: false);
1312}
1313
1314static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
1315 struct i915_power_well *power_well)
1316{
1317 /* since ref/cri clock was enabled */
1318 udelay(usec: 1); /* >10ns for cmnreset, >0ns for sidereset */
1319
1320 vlv_set_power_well(display, power_well, enable: true);
1321
1322 /*
1323 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1324 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1325 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1326 * b. The other bits such as sfr settings / modesel may all
1327 * be set to 0.
1328 *
1329 * This should only be done on init and resume from S3 with
1330 * both PLLs disabled, or we risk losing DPIO and PLL
1331 * synchronization.
1332 */
1333 intel_de_rmw(display, DPIO_CTL, clear: 0, DPIO_CMNRST);
1334}
1335
1336static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
1337 struct i915_power_well *power_well)
1338{
1339 enum pipe pipe;
1340
1341 for_each_pipe(display, pipe)
1342 assert_pll_disabled(display, pipe);
1343
1344 /* Assert common reset */
1345 intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, set: 0);
1346
1347 vlv_set_power_well(display, power_well, enable: false);
1348}
1349
1350#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1351
1352static void assert_chv_phy_status(struct intel_display *display)
1353{
1354 struct i915_power_well *cmn_bc =
1355 lookup_power_well(display, power_well_id: VLV_DISP_PW_DPIO_CMN_BC);
1356 struct i915_power_well *cmn_d =
1357 lookup_power_well(display, power_well_id: CHV_DISP_PW_DPIO_CMN_D);
1358 u32 phy_control = display->power.chv_phy_control;
1359 u32 phy_status = 0;
1360 u32 phy_status_mask = 0xffffffff;
1361 u32 val;
1362
1363 /*
1364 * The BIOS can leave the PHY is some weird state
1365 * where it doesn't fully power down some parts.
1366 * Disable the asserts until the PHY has been fully
1367 * reset (ie. the power well has been disabled at
1368 * least once).
1369 */
1370 if (!display->power.chv_phy_assert[DPIO_PHY0])
1371 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1372 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1373 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1374 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1375 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1376 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1377
1378 if (!display->power.chv_phy_assert[DPIO_PHY1])
1379 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1380 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1381 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1382
1383 if (intel_power_well_is_enabled(display, power_well: cmn_bc)) {
1384 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1385
1386 /* this assumes override is only used to enable lanes */
1387 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1388 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1389
1390 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1391 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1392
1393 /* CL1 is on whenever anything is on in either channel */
1394 if (BITS_SET(phy_control,
1395 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1396 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1397 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1398
1399 /*
1400 * The DPLLB check accounts for the pipe B + port A usage
1401 * with CL2 powered up but all the lanes in the second channel
1402 * powered down.
1403 */
1404 if (BITS_SET(phy_control,
1405 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1406 (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1407 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1408
1409 if (BITS_SET(phy_control,
1410 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1411 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1412 if (BITS_SET(phy_control,
1413 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1414 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1415
1416 if (BITS_SET(phy_control,
1417 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1418 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1419 if (BITS_SET(phy_control,
1420 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1421 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1422 }
1423
1424 if (intel_power_well_is_enabled(display, power_well: cmn_d)) {
1425 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1426
1427 /* this assumes override is only used to enable lanes */
1428 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1429 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1430
1431 if (BITS_SET(phy_control,
1432 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1433 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1434
1435 if (BITS_SET(phy_control,
1436 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1437 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1438 if (BITS_SET(phy_control,
1439 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1440 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1441 }
1442
1443 phy_status &= phy_status_mask;
1444
1445 /*
1446 * The PHY may be busy with some initial calibration and whatnot,
1447 * so the power state can take a while to actually change.
1448 */
1449 if (intel_de_wait_ms(display, DISPLAY_PHY_STATUS,
1450 mask: phy_status_mask, value: phy_status, timeout_ms: 10, out_value: &val))
1451 drm_err(display->drm,
1452 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1453 val & phy_status_mask, phy_status, display->power.chv_phy_control);
1454}
1455
1456#undef BITS_SET
1457
1458static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
1459 struct i915_power_well *power_well)
1460{
1461 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1462 enum dpio_phy phy;
1463 u32 tmp;
1464
1465 drm_WARN_ON_ONCE(display->drm,
1466 id != VLV_DISP_PW_DPIO_CMN_BC &&
1467 id != CHV_DISP_PW_DPIO_CMN_D);
1468
1469 if (id == VLV_DISP_PW_DPIO_CMN_BC)
1470 phy = DPIO_PHY0;
1471 else
1472 phy = DPIO_PHY1;
1473
1474 /* since ref/cri clock was enabled */
1475 udelay(usec: 1); /* >10ns for cmnreset, >0ns for sidereset */
1476 vlv_set_power_well(display, power_well, enable: true);
1477
1478 /* Poll for phypwrgood signal */
1479 if (intel_de_wait_for_set_ms(display, DISPLAY_PHY_STATUS,
1480 PHY_POWERGOOD(phy), timeout_ms: 1))
1481 drm_err(display->drm, "Display PHY %d is not power up\n",
1482 phy);
1483
1484 vlv_dpio_get(drm: display->drm);
1485
1486 /* Enable dynamic power down */
1487 tmp = vlv_dpio_read(drm: display->drm, phy, CHV_CMN_DW28);
1488 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1489 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1490 vlv_dpio_write(drm: display->drm, phy, CHV_CMN_DW28, val: tmp);
1491
1492 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1493 tmp = vlv_dpio_read(drm: display->drm, phy, CHV_CMN_DW6_CH1);
1494 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1495 vlv_dpio_write(drm: display->drm, phy, CHV_CMN_DW6_CH1, val: tmp);
1496 } else {
1497 /*
1498 * Force the non-existing CL2 off. BXT does this
1499 * too, so maybe it saves some power even though
1500 * CL2 doesn't exist?
1501 */
1502 tmp = vlv_dpio_read(drm: display->drm, phy, CHV_CMN_DW30);
1503 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1504 vlv_dpio_write(drm: display->drm, phy, CHV_CMN_DW30, val: tmp);
1505 }
1506
1507 vlv_dpio_put(drm: display->drm);
1508
1509 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1510 intel_de_write(display, DISPLAY_PHY_CONTROL,
1511 val: display->power.chv_phy_control);
1512
1513 drm_dbg_kms(display->drm,
1514 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1515 phy, display->power.chv_phy_control);
1516
1517 assert_chv_phy_status(display);
1518}
1519
1520static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
1521 struct i915_power_well *power_well)
1522{
1523 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1524 enum dpio_phy phy;
1525
1526 drm_WARN_ON_ONCE(display->drm,
1527 id != VLV_DISP_PW_DPIO_CMN_BC &&
1528 id != CHV_DISP_PW_DPIO_CMN_D);
1529
1530 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1531 phy = DPIO_PHY0;
1532 assert_pll_disabled(display, pipe: PIPE_A);
1533 assert_pll_disabled(display, pipe: PIPE_B);
1534 } else {
1535 phy = DPIO_PHY1;
1536 assert_pll_disabled(display, pipe: PIPE_C);
1537 }
1538
1539 display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1540 intel_de_write(display, DISPLAY_PHY_CONTROL,
1541 val: display->power.chv_phy_control);
1542
1543 vlv_set_power_well(display, power_well, enable: false);
1544
1545 drm_dbg_kms(display->drm,
1546 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1547 phy, display->power.chv_phy_control);
1548
1549 /* PHY is fully reset now, so we can enable the PHY state asserts */
1550 display->power.chv_phy_assert[phy] = true;
1551
1552 assert_chv_phy_status(display);
1553}
1554
1555static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
1556 enum dpio_channel ch, bool override, unsigned int mask)
1557{
1558 u32 reg, val, expected, actual;
1559
1560 /*
1561 * The BIOS can leave the PHY is some weird state
1562 * where it doesn't fully power down some parts.
1563 * Disable the asserts until the PHY has been fully
1564 * reset (ie. the power well has been disabled at
1565 * least once).
1566 */
1567 if (!display->power.chv_phy_assert[phy])
1568 return;
1569
1570 if (ch == DPIO_CH0)
1571 reg = CHV_CMN_DW0_CH0;
1572 else
1573 reg = CHV_CMN_DW6_CH1;
1574
1575 vlv_dpio_get(drm: display->drm);
1576 val = vlv_dpio_read(drm: display->drm, phy, reg);
1577 vlv_dpio_put(drm: display->drm);
1578
1579 /*
1580 * This assumes !override is only used when the port is disabled.
1581 * All lanes should power down even without the override when
1582 * the port is disabled.
1583 */
1584 if (!override || mask == 0xf) {
1585 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1586 /*
1587 * If CH1 common lane is not active anymore
1588 * (eg. for pipe B DPLL) the entire channel will
1589 * shut down, which causes the common lane registers
1590 * to read as 0. That means we can't actually check
1591 * the lane power down status bits, but as the entire
1592 * register reads as 0 it's a good indication that the
1593 * channel is indeed entirely powered down.
1594 */
1595 if (ch == DPIO_CH1 && val == 0)
1596 expected = 0;
1597 } else if (mask != 0x0) {
1598 expected = DPIO_ANYDL_POWERDOWN;
1599 } else {
1600 expected = 0;
1601 }
1602
1603 if (ch == DPIO_CH0)
1604 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
1605 DPIO_ALLDL_POWERDOWN_CH0, val);
1606 else
1607 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
1608 DPIO_ALLDL_POWERDOWN_CH1, val);
1609
1610 drm_WARN(display->drm, actual != expected,
1611 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1612 !!(actual & DPIO_ALLDL_POWERDOWN),
1613 !!(actual & DPIO_ANYDL_POWERDOWN),
1614 !!(expected & DPIO_ALLDL_POWERDOWN),
1615 !!(expected & DPIO_ANYDL_POWERDOWN),
1616 reg, val);
1617}
1618
1619bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
1620 enum dpio_channel ch, bool override)
1621{
1622 struct i915_power_domains *power_domains = &display->power.domains;
1623 bool was_override;
1624
1625 mutex_lock(&power_domains->lock);
1626
1627 was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1628
1629 if (override == was_override)
1630 goto out;
1631
1632 if (override)
1633 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1634 else
1635 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1636
1637 intel_de_write(display, DISPLAY_PHY_CONTROL,
1638 val: display->power.chv_phy_control);
1639
1640 drm_dbg_kms(display->drm,
1641 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1642 phy, ch, display->power.chv_phy_control);
1643
1644 assert_chv_phy_status(display);
1645
1646out:
1647 mutex_unlock(lock: &power_domains->lock);
1648
1649 return was_override;
1650}
1651
1652void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1653 bool override, unsigned int mask)
1654{
1655 struct intel_display *display = to_intel_display(encoder);
1656 struct i915_power_domains *power_domains = &display->power.domains;
1657 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port: enc_to_dig_port(encoder));
1658 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port: enc_to_dig_port(encoder));
1659
1660 mutex_lock(&power_domains->lock);
1661
1662 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1663 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1664
1665 if (override)
1666 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1667 else
1668 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1669
1670 intel_de_write(display, DISPLAY_PHY_CONTROL,
1671 val: display->power.chv_phy_control);
1672
1673 drm_dbg_kms(display->drm,
1674 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1675 phy, ch, mask, display->power.chv_phy_control);
1676
1677 assert_chv_phy_status(display);
1678
1679 assert_chv_phy_powergate(display, phy, ch, override, mask);
1680
1681 mutex_unlock(lock: &power_domains->lock);
1682}
1683
1684static bool chv_pipe_power_well_enabled(struct intel_display *display,
1685 struct i915_power_well *power_well)
1686{
1687 enum pipe pipe = PIPE_A;
1688 bool enabled;
1689 u32 state, ctrl;
1690
1691 vlv_punit_get(drm: display->drm);
1692
1693 state = vlv_punit_read(drm: display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1694 /*
1695 * We only ever set the power-on and power-gate states, anything
1696 * else is unexpected.
1697 */
1698 drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) &&
1699 state != DP_SSS_PWR_GATE(pipe));
1700 enabled = state == DP_SSS_PWR_ON(pipe);
1701
1702 /*
1703 * A transient state at this point would mean some unexpected party
1704 * is poking at the power controls too.
1705 */
1706 ctrl = vlv_punit_read(drm: display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1707 drm_WARN_ON(display->drm, ctrl << 16 != state);
1708
1709 vlv_punit_put(drm: display->drm);
1710
1711 return enabled;
1712}
1713
1714static void chv_set_pipe_power_well(struct intel_display *display,
1715 struct i915_power_well *power_well,
1716 bool enable)
1717{
1718 enum pipe pipe = PIPE_A;
1719 u32 state;
1720 u32 ctrl;
1721 int ret;
1722
1723 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1724
1725 vlv_punit_get(drm: display->drm);
1726
1727 ctrl = vlv_punit_read(drm: display->drm, PUNIT_REG_DSPSSPM);
1728 if ((ctrl & DP_SSS_MASK(pipe)) == state)
1729 goto out;
1730
1731 ctrl &= ~DP_SSC_MASK(pipe);
1732 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1733 vlv_punit_write(drm: display->drm, PUNIT_REG_DSPSSPM, val: ctrl);
1734
1735 ret = poll_timeout_us(ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
1736 (ctrl & DP_SSS_MASK(pipe)) == state,
1737 500, 100 * 1000, false);
1738 if (ret)
1739 drm_err(display->drm,
1740 "timeout setting power well state %08x (%08x)\n",
1741 state,
1742 vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM));
1743
1744#undef COND
1745
1746out:
1747 vlv_punit_put(drm: display->drm);
1748}
1749
1750static void chv_pipe_power_well_sync_hw(struct intel_display *display,
1751 struct i915_power_well *power_well)
1752{
1753 intel_de_write(display, DISPLAY_PHY_CONTROL,
1754 val: display->power.chv_phy_control);
1755}
1756
1757static void chv_pipe_power_well_enable(struct intel_display *display,
1758 struct i915_power_well *power_well)
1759{
1760 chv_set_pipe_power_well(display, power_well, enable: true);
1761
1762 vlv_display_power_well_init(display);
1763}
1764
1765static void chv_pipe_power_well_disable(struct intel_display *display,
1766 struct i915_power_well *power_well)
1767{
1768 vlv_display_power_well_deinit(display);
1769
1770 chv_set_pipe_power_well(display, power_well, enable: false);
1771}
1772
1773static void
1774tgl_tc_cold_request(struct intel_display *display, bool block)
1775{
1776 u8 tries = 0;
1777 int ret;
1778
1779 while (1) {
1780 u32 low_val;
1781 u32 high_val = 0;
1782
1783 if (block)
1784 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
1785 else
1786 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
1787
1788 /*
1789 * Spec states that we should timeout the request after 200us
1790 * but the function below will timeout after 500us
1791 */
1792 ret = intel_pcode_read(drm: display->drm, TGL_PCODE_TCCOLD, val: &low_val, val1: &high_val);
1793 if (ret == 0) {
1794 if (block &&
1795 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
1796 ret = -EIO;
1797 else
1798 break;
1799 }
1800
1801 if (++tries == 3)
1802 break;
1803
1804 msleep(msecs: 1);
1805 }
1806
1807 if (ret)
1808 drm_err(display->drm, "TC cold %sblock failed\n", block ? "" : "un");
1809 else
1810 drm_dbg_kms(display->drm, "TC cold %sblock succeeded\n",
1811 block ? "" : "un");
1812}
1813
1814static void
1815tgl_tc_cold_off_power_well_enable(struct intel_display *display,
1816 struct i915_power_well *power_well)
1817{
1818 tgl_tc_cold_request(display, block: true);
1819}
1820
1821static void
1822tgl_tc_cold_off_power_well_disable(struct intel_display *display,
1823 struct i915_power_well *power_well)
1824{
1825 tgl_tc_cold_request(display, block: false);
1826}
1827
1828static void
1829tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display,
1830 struct i915_power_well *power_well)
1831{
1832 if (intel_power_well_refcount(power_well) > 0)
1833 tgl_tc_cold_off_power_well_enable(display, power_well);
1834 else
1835 tgl_tc_cold_off_power_well_disable(display, power_well);
1836}
1837
1838static bool
1839tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
1840 struct i915_power_well *power_well)
1841{
1842 /*
1843 * Not the correctly implementation but there is no way to just read it
1844 * from PCODE, so returning count to avoid state mismatch errors
1845 */
1846 return intel_power_well_refcount(power_well);
1847}
1848
1849static void xelpdp_aux_power_well_enable(struct intel_display *display,
1850 struct i915_power_well *power_well)
1851{
1852 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1853 enum phy phy = icl_aux_pw_to_phy(display, power_well);
1854
1855 if (intel_phy_is_tc(display, phy))
1856 icl_tc_port_assert_ref_held(display, power_well,
1857 dig_port: aux_ch_to_digital_port(display, aux_ch));
1858
1859 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1860 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1861 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
1862
1863 /*
1864 * The power status flag cannot be used to determine whether aux
1865 * power wells have finished powering up. Instead we're
1866 * expected to just wait a fixed 600us after raising the request
1867 * bit.
1868 */
1869 if (DISPLAY_VER(display) >= 35) {
1870 if (intel_de_wait_for_set_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1871 XELPDP_DP_AUX_CH_CTL_POWER_STATUS, timeout_ms: 2))
1872 drm_warn(display->drm,
1873 "Timeout waiting for PHY %c AUX channel power to be up\n",
1874 phy_name(phy));
1875 } else {
1876 usleep_range(min: 600, max: 1200);
1877 }
1878}
1879
1880static void xelpdp_aux_power_well_disable(struct intel_display *display,
1881 struct i915_power_well *power_well)
1882{
1883 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1884 enum phy phy = icl_aux_pw_to_phy(display, power_well);
1885
1886 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1887 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1888 set: 0);
1889
1890 if (DISPLAY_VER(display) >= 35) {
1891 if (intel_de_wait_for_clear_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1892 XELPDP_DP_AUX_CH_CTL_POWER_STATUS, timeout_ms: 1))
1893 drm_warn(display->drm,
1894 "Timeout waiting for PHY %c AUX channel to powerdown\n",
1895 phy_name(phy));
1896 } else {
1897 usleep_range(min: 10, max: 30);
1898 }
1899}
1900
1901static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
1902 struct i915_power_well *power_well)
1903{
1904 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1905
1906 return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) &
1907 XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
1908}
1909
1910static void xe2lpd_pica_power_well_enable(struct intel_display *display,
1911 struct i915_power_well *power_well)
1912{
1913 intel_de_write(display, XE2LPD_PICA_PW_CTL,
1914 XE2LPD_PICA_CTL_POWER_REQUEST);
1915
1916 if (intel_de_wait_for_set_ms(display, XE2LPD_PICA_PW_CTL,
1917 XE2LPD_PICA_CTL_POWER_STATUS, timeout_ms: 1)) {
1918 drm_dbg_kms(display->drm, "pica power well enable timeout\n");
1919
1920 drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
1921 }
1922}
1923
1924static void xe2lpd_pica_power_well_disable(struct intel_display *display,
1925 struct i915_power_well *power_well)
1926{
1927 intel_de_write(display, XE2LPD_PICA_PW_CTL, val: 0);
1928
1929 if (intel_de_wait_for_clear_ms(display, XE2LPD_PICA_PW_CTL,
1930 XE2LPD_PICA_CTL_POWER_STATUS, timeout_ms: 1)) {
1931 drm_dbg_kms(display->drm, "pica power well disable timeout\n");
1932
1933 drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
1934 }
1935}
1936
1937static bool xe2lpd_pica_power_well_enabled(struct intel_display *display,
1938 struct i915_power_well *power_well)
1939{
1940 return intel_de_read(display, XE2LPD_PICA_PW_CTL) &
1941 XE2LPD_PICA_CTL_POWER_STATUS;
1942}
1943
1944const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1945 .sync_hw = i9xx_power_well_sync_hw_noop,
1946 .enable = i9xx_always_on_power_well_noop,
1947 .disable = i9xx_always_on_power_well_noop,
1948 .is_enabled = i9xx_always_on_power_well_enabled,
1949};
1950
1951const struct i915_power_well_ops chv_pipe_power_well_ops = {
1952 .sync_hw = chv_pipe_power_well_sync_hw,
1953 .enable = chv_pipe_power_well_enable,
1954 .disable = chv_pipe_power_well_disable,
1955 .is_enabled = chv_pipe_power_well_enabled,
1956};
1957
1958const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1959 .sync_hw = i9xx_power_well_sync_hw_noop,
1960 .enable = chv_dpio_cmn_power_well_enable,
1961 .disable = chv_dpio_cmn_power_well_disable,
1962 .is_enabled = vlv_power_well_enabled,
1963};
1964
1965const struct i915_power_well_ops i830_pipes_power_well_ops = {
1966 .sync_hw = i830_pipes_power_well_sync_hw,
1967 .enable = i830_pipes_power_well_enable,
1968 .disable = i830_pipes_power_well_disable,
1969 .is_enabled = i830_pipes_power_well_enabled,
1970};
1971
1972static const struct i915_power_well_regs hsw_power_well_regs = {
1973 .bios = HSW_PWR_WELL_CTL1,
1974 .driver = HSW_PWR_WELL_CTL2,
1975 .kvmr = HSW_PWR_WELL_CTL3,
1976 .debug = HSW_PWR_WELL_CTL4,
1977};
1978
1979const struct i915_power_well_ops hsw_power_well_ops = {
1980 .regs = &hsw_power_well_regs,
1981 .sync_hw = hsw_power_well_sync_hw,
1982 .enable = hsw_power_well_enable,
1983 .disable = hsw_power_well_disable,
1984 .is_enabled = hsw_power_well_enabled,
1985};
1986
1987const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1988 .sync_hw = i9xx_power_well_sync_hw_noop,
1989 .enable = gen9_dc_off_power_well_enable,
1990 .disable = gen9_dc_off_power_well_disable,
1991 .is_enabled = gen9_dc_off_power_well_enabled,
1992};
1993
1994const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1995 .sync_hw = i9xx_power_well_sync_hw_noop,
1996 .enable = bxt_dpio_cmn_power_well_enable,
1997 .disable = bxt_dpio_cmn_power_well_disable,
1998 .is_enabled = bxt_dpio_cmn_power_well_enabled,
1999};
2000
2001const struct i915_power_well_ops vlv_display_power_well_ops = {
2002 .sync_hw = i9xx_power_well_sync_hw_noop,
2003 .enable = vlv_display_power_well_enable,
2004 .disable = vlv_display_power_well_disable,
2005 .is_enabled = vlv_power_well_enabled,
2006};
2007
2008const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2009 .sync_hw = i9xx_power_well_sync_hw_noop,
2010 .enable = vlv_dpio_cmn_power_well_enable,
2011 .disable = vlv_dpio_cmn_power_well_disable,
2012 .is_enabled = vlv_power_well_enabled,
2013};
2014
2015const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2016 .sync_hw = i9xx_power_well_sync_hw_noop,
2017 .enable = vlv_power_well_enable,
2018 .disable = vlv_power_well_disable,
2019 .is_enabled = vlv_power_well_enabled,
2020};
2021
2022static const struct i915_power_well_regs icl_aux_power_well_regs = {
2023 .bios = ICL_PWR_WELL_CTL_AUX1,
2024 .driver = ICL_PWR_WELL_CTL_AUX2,
2025 .debug = ICL_PWR_WELL_CTL_AUX4,
2026};
2027
2028const struct i915_power_well_ops icl_aux_power_well_ops = {
2029 .regs = &icl_aux_power_well_regs,
2030 .sync_hw = hsw_power_well_sync_hw,
2031 .enable = icl_aux_power_well_enable,
2032 .disable = icl_aux_power_well_disable,
2033 .is_enabled = hsw_power_well_enabled,
2034};
2035
2036static const struct i915_power_well_regs icl_ddi_power_well_regs = {
2037 .bios = ICL_PWR_WELL_CTL_DDI1,
2038 .driver = ICL_PWR_WELL_CTL_DDI2,
2039 .debug = ICL_PWR_WELL_CTL_DDI4,
2040};
2041
2042const struct i915_power_well_ops icl_ddi_power_well_ops = {
2043 .regs = &icl_ddi_power_well_regs,
2044 .sync_hw = hsw_power_well_sync_hw,
2045 .enable = hsw_power_well_enable,
2046 .disable = hsw_power_well_disable,
2047 .is_enabled = hsw_power_well_enabled,
2048};
2049
2050const struct i915_power_well_ops tgl_tc_cold_off_ops = {
2051 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
2052 .enable = tgl_tc_cold_off_power_well_enable,
2053 .disable = tgl_tc_cold_off_power_well_disable,
2054 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
2055};
2056
2057const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
2058 .sync_hw = i9xx_power_well_sync_hw_noop,
2059 .enable = xelpdp_aux_power_well_enable,
2060 .disable = xelpdp_aux_power_well_disable,
2061 .is_enabled = xelpdp_aux_power_well_enabled,
2062};
2063
2064const struct i915_power_well_ops xe2lpd_pica_power_well_ops = {
2065 .sync_hw = i9xx_power_well_sync_hw_noop,
2066 .enable = xe2lpd_pica_power_well_enable,
2067 .disable = xe2lpd_pica_power_well_disable,
2068 .is_enabled = xe2lpd_pica_power_well_enabled,
2069};
2070

source code of linux/drivers/gpu/drm/i915/display/intel_display_power_well.c