1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2025 Intel Corporation
4 */
5
6#include <drm/drm_print.h>
7
8#include "i915_reg.h"
9#include "intel_cx0_phy.h"
10#include "intel_cx0_phy_regs.h"
11#include "intel_ddi.h"
12#include "intel_ddi_buf_trans.h"
13#include "intel_de.h"
14#include "intel_display.h"
15#include "intel_display_types.h"
16#include "intel_display_utils.h"
17#include "intel_dpll_mgr.h"
18#include "intel_hdmi.h"
19#include "intel_lt_phy.h"
20#include "intel_lt_phy_regs.h"
21#include "intel_panel.h"
22#include "intel_psr.h"
23#include "intel_tc.h"
24
25#define for_each_lt_phy_lane_in_mask(__lane_mask, __lane) \
26 for ((__lane) = 0; (__lane) < 2; (__lane)++) \
27 for_each_if((__lane_mask) & BIT(__lane))
28
29#define INTEL_LT_PHY_LANE0 BIT(0)
30#define INTEL_LT_PHY_LANE1 BIT(1)
31#define INTEL_LT_PHY_BOTH_LANES (INTEL_LT_PHY_LANE1 |\
32 INTEL_LT_PHY_LANE0)
33#define MODE_DP 3
34#define Q32_TO_INT(x) ((x) >> 32)
35#define Q32_TO_FRAC(x) ((x) & 0xFFFFFFFF)
36#define DCO_MIN_FREQ_MHZ 11850
37#define REF_CLK_KHZ 38400
38#define TDC_RES_MULTIPLIER 10000000ULL
39
40struct phy_param_t {
41 u32 val;
42 u32 addr;
43};
44
45struct lt_phy_params {
46 struct phy_param_t pll_reg4;
47 struct phy_param_t pll_reg3;
48 struct phy_param_t pll_reg5;
49 struct phy_param_t pll_reg57;
50 struct phy_param_t lf;
51 struct phy_param_t tdc;
52 struct phy_param_t ssc;
53 struct phy_param_t bias2;
54 struct phy_param_t bias_trim;
55 struct phy_param_t dco_med;
56 struct phy_param_t dco_fine;
57 struct phy_param_t ssc_inj;
58 struct phy_param_t surv_bonus;
59};
60
61static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
62 .clock = 162000,
63 .config = {
64 0x83,
65 0x2d,
66 0x0,
67 },
68 .addr_msb = {
69 0x87,
70 0x87,
71 0x87,
72 0x87,
73 0x88,
74 0x88,
75 0x88,
76 0x88,
77 0x88,
78 0x88,
79 0x88,
80 0x88,
81 0x88,
82 },
83 .addr_lsb = {
84 0x10,
85 0x0c,
86 0x14,
87 0xe4,
88 0x0c,
89 0x10,
90 0x14,
91 0x18,
92 0x48,
93 0x40,
94 0x4c,
95 0x24,
96 0x44,
97 },
98 .data = {
99 { 0x0, 0x4c, 0x2, 0x0 },
100 { 0x5, 0xa, 0x2a, 0x20 },
101 { 0x80, 0x0, 0x0, 0x0 },
102 { 0x4, 0x4, 0x82, 0x28 },
103 { 0xfa, 0x16, 0x83, 0x11 },
104 { 0x80, 0x0f, 0xf9, 0x53 },
105 { 0x84, 0x26, 0x5, 0x4 },
106 { 0x0, 0xe0, 0x1, 0x0 },
107 { 0x4b, 0x48, 0x0, 0x0 },
108 { 0x27, 0x8, 0x0, 0x0 },
109 { 0x5a, 0x13, 0x29, 0x13 },
110 { 0x0, 0x5b, 0xe0, 0x0a },
111 { 0x0, 0x0, 0x0, 0x0 },
112 },
113};
114
115static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
116 .clock = 270000,
117 .config = {
118 0x8b,
119 0x2d,
120 0x0,
121 },
122 .addr_msb = {
123 0x87,
124 0x87,
125 0x87,
126 0x87,
127 0x88,
128 0x88,
129 0x88,
130 0x88,
131 0x88,
132 0x88,
133 0x88,
134 0x88,
135 0x88,
136 },
137 .addr_lsb = {
138 0x10,
139 0x0c,
140 0x14,
141 0xe4,
142 0x0c,
143 0x10,
144 0x14,
145 0x18,
146 0x48,
147 0x40,
148 0x4c,
149 0x24,
150 0x44,
151 },
152 .data = {
153 { 0x0, 0x4c, 0x2, 0x0 },
154 { 0x3, 0xca, 0x34, 0xa0 },
155 { 0xe0, 0x0, 0x0, 0x0 },
156 { 0x5, 0x4, 0x81, 0xad },
157 { 0xfa, 0x11, 0x83, 0x11 },
158 { 0x80, 0x0f, 0xf9, 0x53 },
159 { 0x84, 0x26, 0x7, 0x4 },
160 { 0x0, 0xe0, 0x1, 0x0 },
161 { 0x43, 0x48, 0x0, 0x0 },
162 { 0x27, 0x8, 0x0, 0x0 },
163 { 0x5a, 0x13, 0x29, 0x13 },
164 { 0x0, 0x5b, 0xe0, 0x0d },
165 { 0x0, 0x0, 0x0, 0x0 },
166 },
167};
168
169static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
170 .clock = 540000,
171 .config = {
172 0x93,
173 0x2d,
174 0x0,
175 },
176 .addr_msb = {
177 0x87,
178 0x87,
179 0x87,
180 0x87,
181 0x88,
182 0x88,
183 0x88,
184 0x88,
185 0x88,
186 0x88,
187 0x88,
188 0x88,
189 0x88,
190 },
191 .addr_lsb = {
192 0x10,
193 0x0c,
194 0x14,
195 0xe4,
196 0x0c,
197 0x10,
198 0x14,
199 0x18,
200 0x48,
201 0x40,
202 0x4c,
203 0x24,
204 0x44,
205 },
206 .data = {
207 { 0x0, 0x4c, 0x2, 0x0 },
208 { 0x1, 0x4d, 0x34, 0xa0 },
209 { 0xe0, 0x0, 0x0, 0x0 },
210 { 0xa, 0x4, 0x81, 0xda },
211 { 0xfa, 0x11, 0x83, 0x11 },
212 { 0x80, 0x0f, 0xf9, 0x53 },
213 { 0x84, 0x26, 0x7, 0x4 },
214 { 0x0, 0xe0, 0x1, 0x0 },
215 { 0x43, 0x48, 0x0, 0x0 },
216 { 0x27, 0x8, 0x0, 0x0 },
217 { 0x5a, 0x13, 0x29, 0x13 },
218 { 0x0, 0x5b, 0xe0, 0x0d },
219 { 0x0, 0x0, 0x0, 0x0 },
220 },
221};
222
223static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
224 .clock = 810000,
225 .config = {
226 0x9b,
227 0x2d,
228 0x0,
229 },
230 .addr_msb = {
231 0x87,
232 0x87,
233 0x87,
234 0x87,
235 0x88,
236 0x88,
237 0x88,
238 0x88,
239 0x88,
240 0x88,
241 0x88,
242 0x88,
243 0x88,
244 },
245 .addr_lsb = {
246 0x10,
247 0x0c,
248 0x14,
249 0xe4,
250 0x0c,
251 0x10,
252 0x14,
253 0x18,
254 0x48,
255 0x40,
256 0x4c,
257 0x24,
258 0x44,
259 },
260 .data = {
261 { 0x0, 0x4c, 0x2, 0x0 },
262 { 0x1, 0x4a, 0x34, 0xa0 },
263 { 0xe0, 0x0, 0x0, 0x0 },
264 { 0x5, 0x4, 0x80, 0xa8 },
265 { 0xfa, 0x11, 0x83, 0x11 },
266 { 0x80, 0x0f, 0xf9, 0x53 },
267 { 0x84, 0x26, 0x7, 0x4 },
268 { 0x0, 0xe0, 0x1, 0x0 },
269 { 0x43, 0x48, 0x0, 0x0 },
270 { 0x27, 0x8, 0x0, 0x0 },
271 { 0x5a, 0x13, 0x29, 0x13 },
272 { 0x0, 0x5b, 0xe0, 0x0d },
273 { 0x0, 0x0, 0x0, 0x0 },
274 },
275};
276
277static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
278 .clock = 1000000,
279 .config = {
280 0x43,
281 0x2d,
282 0x0,
283 },
284 .addr_msb = {
285 0x85,
286 0x85,
287 0x85,
288 0x85,
289 0x86,
290 0x86,
291 0x86,
292 0x86,
293 0x86,
294 0x86,
295 0x86,
296 0x86,
297 0x86,
298 },
299 .addr_lsb = {
300 0x10,
301 0x0c,
302 0x14,
303 0xe4,
304 0x0c,
305 0x10,
306 0x14,
307 0x18,
308 0x48,
309 0x40,
310 0x4c,
311 0x24,
312 0x44,
313 },
314 .data = {
315 { 0x0, 0x4c, 0x2, 0x0 },
316 { 0x1, 0xa, 0x20, 0x80 },
317 { 0x6a, 0xaa, 0xaa, 0xab },
318 { 0x0, 0x3, 0x4, 0x94 },
319 { 0xfa, 0x1c, 0x83, 0x11 },
320 { 0x80, 0x0f, 0xf9, 0x53 },
321 { 0x84, 0x26, 0x4, 0x4 },
322 { 0x0, 0xe0, 0x1, 0x0 },
323 { 0x45, 0x48, 0x0, 0x0 },
324 { 0x27, 0x8, 0x0, 0x0 },
325 { 0x5a, 0x14, 0x2a, 0x14 },
326 { 0x0, 0x5b, 0xe0, 0x8 },
327 { 0x0, 0x0, 0x0, 0x0 },
328 },
329};
330
331static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
332 .clock = 1350000,
333 .config = {
334 0xcb,
335 0x2d,
336 0x0,
337 },
338 .addr_msb = {
339 0x87,
340 0x87,
341 0x87,
342 0x87,
343 0x88,
344 0x88,
345 0x88,
346 0x88,
347 0x88,
348 0x88,
349 0x88,
350 0x88,
351 0x88,
352 },
353 .addr_lsb = {
354 0x10,
355 0x0c,
356 0x14,
357 0xe4,
358 0x0c,
359 0x10,
360 0x14,
361 0x18,
362 0x48,
363 0x40,
364 0x4c,
365 0x24,
366 0x44,
367 },
368 .data = {
369 { 0x0, 0x4c, 0x2, 0x0 },
370 { 0x2, 0x9, 0x2b, 0xe0 },
371 { 0x90, 0x0, 0x0, 0x0 },
372 { 0x8, 0x4, 0x80, 0xe0 },
373 { 0xfa, 0x15, 0x83, 0x11 },
374 { 0x80, 0x0f, 0xf9, 0x53 },
375 { 0x84, 0x26, 0x6, 0x4 },
376 { 0x0, 0xe0, 0x1, 0x0 },
377 { 0x49, 0x48, 0x0, 0x0 },
378 { 0x27, 0x8, 0x0, 0x0 },
379 { 0x5a, 0x13, 0x29, 0x13 },
380 { 0x0, 0x57, 0xe0, 0x0c },
381 { 0x0, 0x0, 0x0, 0x0 },
382 },
383};
384
385static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
386 .clock = 2000000,
387 .config = {
388 0x53,
389 0x2d,
390 0x0,
391 },
392 .addr_msb = {
393 0x85,
394 0x85,
395 0x85,
396 0x85,
397 0x86,
398 0x86,
399 0x86,
400 0x86,
401 0x86,
402 0x86,
403 0x86,
404 0x86,
405 0x86,
406 },
407 .addr_lsb = {
408 0x10,
409 0x0c,
410 0x14,
411 0xe4,
412 0x0c,
413 0x10,
414 0x14,
415 0x18,
416 0x48,
417 0x40,
418 0x4c,
419 0x24,
420 0x44,
421 },
422 .data = {
423 { 0x0, 0x4c, 0x2, 0x0 },
424 { 0x1, 0xa, 0x20, 0x80 },
425 { 0x6a, 0xaa, 0xaa, 0xab },
426 { 0x0, 0x3, 0x4, 0x94 },
427 { 0xfa, 0x1c, 0x83, 0x11 },
428 { 0x80, 0x0f, 0xf9, 0x53 },
429 { 0x84, 0x26, 0x4, 0x4 },
430 { 0x0, 0xe0, 0x1, 0x0 },
431 { 0x45, 0x48, 0x0, 0x0 },
432 { 0x27, 0x8, 0x0, 0x0 },
433 { 0x5a, 0x14, 0x2a, 0x14 },
434 { 0x0, 0x5b, 0xe0, 0x8 },
435 { 0x0, 0x0, 0x0, 0x0 },
436 },
437};
438
439static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = {
440 &xe3plpd_lt_dp_rbr,
441 &xe3plpd_lt_dp_hbr1,
442 &xe3plpd_lt_dp_hbr2,
443 &xe3plpd_lt_dp_hbr3,
444 &xe3plpd_lt_dp_uhbr10,
445 &xe3plpd_lt_dp_uhbr13_5,
446 &xe3plpd_lt_dp_uhbr20,
447 NULL,
448};
449
450static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
451 .clock = 216000,
452 .config = {
453 0xa3,
454 0x2d,
455 0x1,
456 },
457 .addr_msb = {
458 0x87,
459 0x87,
460 0x87,
461 0x87,
462 0x88,
463 0x88,
464 0x88,
465 0x88,
466 0x88,
467 0x88,
468 0x88,
469 0x88,
470 0x88,
471 },
472 .addr_lsb = {
473 0x10,
474 0x0c,
475 0x14,
476 0xe4,
477 0x0c,
478 0x10,
479 0x14,
480 0x18,
481 0x48,
482 0x40,
483 0x4c,
484 0x24,
485 0x44,
486 },
487 .data = {
488 { 0x0, 0x4c, 0x2, 0x0 },
489 { 0x3, 0xca, 0x2a, 0x20 },
490 { 0x80, 0x0, 0x0, 0x0 },
491 { 0x6, 0x4, 0x81, 0xbc },
492 { 0xfa, 0x16, 0x83, 0x11 },
493 { 0x80, 0x0f, 0xf9, 0x53 },
494 { 0x84, 0x26, 0x5, 0x4 },
495 { 0x0, 0xe0, 0x1, 0x0 },
496 { 0x4b, 0x48, 0x0, 0x0 },
497 { 0x27, 0x8, 0x0, 0x0 },
498 { 0x5a, 0x13, 0x29, 0x13 },
499 { 0x0, 0x5b, 0xe0, 0x0a },
500 { 0x0, 0x0, 0x0, 0x0 },
501 },
502};
503
504static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
505 .clock = 243000,
506 .config = {
507 0xab,
508 0x2d,
509 0x1,
510 },
511 .addr_msb = {
512 0x87,
513 0x87,
514 0x87,
515 0x87,
516 0x88,
517 0x88,
518 0x88,
519 0x88,
520 0x88,
521 0x88,
522 0x88,
523 0x88,
524 0x88,
525 },
526 .addr_lsb = {
527 0x10,
528 0x0c,
529 0x14,
530 0xe4,
531 0x0c,
532 0x10,
533 0x14,
534 0x18,
535 0x48,
536 0x40,
537 0x4c,
538 0x24,
539 0x44,
540 },
541 .data = {
542 { 0x0, 0x4c, 0x2, 0x0 },
543 { 0x3, 0xca, 0x2f, 0x60 },
544 { 0xb0, 0x0, 0x0, 0x0 },
545 { 0x6, 0x4, 0x81, 0xbc },
546 { 0xfa, 0x13, 0x83, 0x11 },
547 { 0x80, 0x0f, 0xf9, 0x53 },
548 { 0x84, 0x26, 0x6, 0x4 },
549 { 0x0, 0xe0, 0x1, 0x0 },
550 { 0x47, 0x48, 0x0, 0x0 },
551 { 0x0, 0x0, 0x0, 0x0 },
552 { 0x5a, 0x13, 0x29, 0x13 },
553 { 0x0, 0x5b, 0xe0, 0x0c },
554 { 0x0, 0x0, 0x0, 0x0 },
555 },
556};
557
558static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
559 .clock = 324000,
560 .config = {
561 0xb3,
562 0x2d,
563 0x1,
564 },
565 .addr_msb = {
566 0x87,
567 0x87,
568 0x87,
569 0x87,
570 0x88,
571 0x88,
572 0x88,
573 0x88,
574 0x88,
575 0x88,
576 0x88,
577 0x88,
578 0x88,
579 },
580 .addr_lsb = {
581 0x10,
582 0x0c,
583 0x14,
584 0xe4,
585 0x0c,
586 0x10,
587 0x14,
588 0x18,
589 0x48,
590 0x40,
591 0x4c,
592 0x24,
593 0x44,
594 },
595 .data = {
596 { 0x0, 0x4c, 0x2, 0x0 },
597 { 0x2, 0x8a, 0x2a, 0x20 },
598 { 0x80, 0x0, 0x0, 0x0 },
599 { 0x6, 0x4, 0x81, 0x28 },
600 { 0xfa, 0x16, 0x83, 0x11 },
601 { 0x80, 0x0f, 0xf9, 0x53 },
602 { 0x84, 0x26, 0x5, 0x4 },
603 { 0x0, 0xe0, 0x1, 0x0 },
604 { 0x4b, 0x48, 0x0, 0x0 },
605 { 0x27, 0x8, 0x0, 0x0 },
606 { 0x5a, 0x13, 0x29, 0x13 },
607 { 0x0, 0x5b, 0xe0, 0x0a },
608 { 0x0, 0x0, 0x0, 0x0 },
609 },
610};
611
612static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
613 .clock = 432000,
614 .config = {
615 0xbb,
616 0x2d,
617 0x1,
618 },
619 .addr_msb = {
620 0x87,
621 0x87,
622 0x87,
623 0x87,
624 0x88,
625 0x88,
626 0x88,
627 0x88,
628 0x88,
629 0x88,
630 0x88,
631 0x88,
632 0x88,
633 },
634 .addr_lsb = {
635 0x10,
636 0x0c,
637 0x14,
638 0xe4,
639 0x0c,
640 0x10,
641 0x14,
642 0x18,
643 0x48,
644 0x40,
645 0x4c,
646 0x24,
647 0x44,
648 },
649 .data = {
650 { 0x0, 0x4c, 0x2, 0x0 },
651 { 0x1, 0x4d, 0x2a, 0x20 },
652 { 0x80, 0x0, 0x0, 0x0 },
653 { 0xc, 0x4, 0x81, 0xbc },
654 { 0xfa, 0x16, 0x83, 0x11 },
655 { 0x80, 0x0f, 0xf9, 0x53 },
656 { 0x84, 0x26, 0x5, 0x4 },
657 { 0x0, 0xe0, 0x1, 0x0 },
658 { 0x4b, 0x48, 0x0, 0x0 },
659 { 0x27, 0x8, 0x0, 0x0 },
660 { 0x5a, 0x13, 0x29, 0x13 },
661 { 0x0, 0x5b, 0xe0, 0x0a },
662 { 0x0, 0x0, 0x0, 0x0 },
663 },
664};
665
666static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
667 .clock = 675000,
668 .config = {
669 0xdb,
670 0x2d,
671 0x1,
672 },
673 .addr_msb = {
674 0x87,
675 0x87,
676 0x87,
677 0x87,
678 0x88,
679 0x88,
680 0x88,
681 0x88,
682 0x88,
683 0x88,
684 0x88,
685 0x88,
686 0x88,
687 },
688 .addr_lsb = {
689 0x10,
690 0x0c,
691 0x14,
692 0xe4,
693 0x0c,
694 0x10,
695 0x14,
696 0x18,
697 0x48,
698 0x40,
699 0x4c,
700 0x24,
701 0x44,
702 },
703 .data = {
704 { 0x0, 0x4c, 0x2, 0x0 },
705 { 0x1, 0x4a, 0x2b, 0xe0 },
706 { 0x90, 0x0, 0x0, 0x0 },
707 { 0x6, 0x4, 0x80, 0xa8 },
708 { 0xfa, 0x15, 0x83, 0x11 },
709 { 0x80, 0x0f, 0xf9, 0x53 },
710 { 0x84, 0x26, 0x6, 0x4 },
711 { 0x0, 0xe0, 0x1, 0x0 },
712 { 0x49, 0x48, 0x0, 0x0 },
713 { 0x27, 0x8, 0x0, 0x0 },
714 { 0x5a, 0x13, 0x29, 0x13 },
715 { 0x0, 0x57, 0xe0, 0x0c },
716 { 0x0, 0x0, 0x0, 0x0 },
717 },
718};
719
720static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = {
721 &xe3plpd_lt_dp_rbr,
722 &xe3plpd_lt_edp_2_16,
723 &xe3plpd_lt_edp_2_43,
724 &xe3plpd_lt_dp_hbr1,
725 &xe3plpd_lt_edp_3_24,
726 &xe3plpd_lt_edp_4_32,
727 &xe3plpd_lt_dp_hbr2,
728 &xe3plpd_lt_edp_6_75,
729 &xe3plpd_lt_dp_hbr3,
730 NULL,
731};
732
733static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
734 .clock = 25200,
735 .config = {
736 0x84,
737 0x2d,
738 0x0,
739 },
740 .addr_msb = {
741 0x87,
742 0x87,
743 0x87,
744 0x87,
745 0x88,
746 0x88,
747 0x88,
748 0x88,
749 0x88,
750 0x88,
751 0x88,
752 0x88,
753 0x88,
754 },
755 .addr_lsb = {
756 0x10,
757 0x0c,
758 0x14,
759 0xe4,
760 0x0c,
761 0x10,
762 0x14,
763 0x18,
764 0x48,
765 0x40,
766 0x4c,
767 0x24,
768 0x44,
769 },
770 .data = {
771 { 0x0, 0x4c, 0x2, 0x0 },
772 { 0x0c, 0x15, 0x27, 0x60 },
773 { 0x0, 0x0, 0x0, 0x0 },
774 { 0x8, 0x4, 0x98, 0x28 },
775 { 0x42, 0x0, 0x84, 0x10 },
776 { 0x80, 0x0f, 0xd9, 0xb5 },
777 { 0x86, 0x0, 0x0, 0x0 },
778 { 0x1, 0xa0, 0x1, 0x0 },
779 { 0x4b, 0x0, 0x0, 0x0 },
780 { 0x28, 0x0, 0x0, 0x0 },
781 { 0x0, 0x14, 0x2a, 0x14 },
782 { 0x0, 0x0, 0x0, 0x0 },
783 { 0x0, 0x0, 0x0, 0x0 },
784 },
785};
786
787static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = {
788 .clock = 27200,
789 .config = {
790 0x84,
791 0x2d,
792 0x0,
793 },
794 .addr_msb = {
795 0x87,
796 0x87,
797 0x87,
798 0x87,
799 0x88,
800 0x88,
801 0x88,
802 0x88,
803 0x88,
804 0x88,
805 0x88,
806 0x88,
807 0x88,
808 },
809 .addr_lsb = {
810 0x10,
811 0x0c,
812 0x14,
813 0xe4,
814 0x0c,
815 0x10,
816 0x14,
817 0x18,
818 0x48,
819 0x40,
820 0x4c,
821 0x24,
822 0x44,
823 },
824 .data = {
825 { 0x0, 0x4c, 0x2, 0x0 },
826 { 0x0b, 0x15, 0x26, 0xa0 },
827 { 0x60, 0x0, 0x0, 0x0 },
828 { 0x8, 0x4, 0x96, 0x28 },
829 { 0xfa, 0x0c, 0x84, 0x11 },
830 { 0x80, 0x0f, 0xd9, 0x53 },
831 { 0x86, 0x0, 0x0, 0x0 },
832 { 0x1, 0xa0, 0x1, 0x0 },
833 { 0x4b, 0x0, 0x0, 0x0 },
834 { 0x28, 0x0, 0x0, 0x0 },
835 { 0x0, 0x14, 0x2a, 0x14 },
836 { 0x0, 0x0, 0x0, 0x0 },
837 { 0x0, 0x0, 0x0, 0x0 },
838 },
839};
840
841static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
842 .clock = 74250,
843 .config = {
844 0x84,
845 0x2d,
846 0x0,
847 },
848 .addr_msb = {
849 0x87,
850 0x87,
851 0x87,
852 0x87,
853 0x88,
854 0x88,
855 0x88,
856 0x88,
857 0x88,
858 0x88,
859 0x88,
860 0x88,
861 0x88,
862 },
863 .addr_lsb = {
864 0x10,
865 0x0c,
866 0x14,
867 0xe4,
868 0x0c,
869 0x10,
870 0x14,
871 0x18,
872 0x48,
873 0x40,
874 0x4c,
875 0x24,
876 0x44,
877 },
878 .data = {
879 { 0x0, 0x4c, 0x2, 0x0 },
880 { 0x4, 0x15, 0x26, 0xa0 },
881 { 0x60, 0x0, 0x0, 0x0 },
882 { 0x8, 0x4, 0x88, 0x28 },
883 { 0xfa, 0x0c, 0x84, 0x11 },
884 { 0x80, 0x0f, 0xd9, 0x53 },
885 { 0x86, 0x0, 0x0, 0x0 },
886 { 0x1, 0xa0, 0x1, 0x0 },
887 { 0x4b, 0x0, 0x0, 0x0 },
888 { 0x28, 0x0, 0x0, 0x0 },
889 { 0x0, 0x14, 0x2a, 0x14 },
890 { 0x0, 0x0, 0x0, 0x0 },
891 { 0x0, 0x0, 0x0, 0x0 },
892 },
893};
894
895static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
896 .clock = 148500,
897 .config = {
898 0x84,
899 0x2d,
900 0x0,
901 },
902 .addr_msb = {
903 0x87,
904 0x87,
905 0x87,
906 0x87,
907 0x88,
908 0x88,
909 0x88,
910 0x88,
911 0x88,
912 0x88,
913 0x88,
914 0x88,
915 0x88,
916 },
917 .addr_lsb = {
918 0x10,
919 0x0c,
920 0x14,
921 0xe4,
922 0x0c,
923 0x10,
924 0x14,
925 0x18,
926 0x48,
927 0x40,
928 0x4c,
929 0x24,
930 0x44,
931 },
932 .data = {
933 { 0x0, 0x4c, 0x2, 0x0 },
934 { 0x2, 0x15, 0x26, 0xa0 },
935 { 0x60, 0x0, 0x0, 0x0 },
936 { 0x8, 0x4, 0x84, 0x28 },
937 { 0xfa, 0x0c, 0x84, 0x11 },
938 { 0x80, 0x0f, 0xd9, 0x53 },
939 { 0x86, 0x0, 0x0, 0x0 },
940 { 0x1, 0xa0, 0x1, 0x0 },
941 { 0x4b, 0x0, 0x0, 0x0 },
942 { 0x28, 0x0, 0x0, 0x0 },
943 { 0x0, 0x14, 0x2a, 0x14 },
944 { 0x0, 0x0, 0x0, 0x0 },
945 { 0x0, 0x0, 0x0, 0x0 },
946 },
947};
948
949static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
950 .clock = 594000,
951 .config = {
952 0x84,
953 0x2d,
954 0x0,
955 },
956 .addr_msb = {
957 0x87,
958 0x87,
959 0x87,
960 0x87,
961 0x88,
962 0x88,
963 0x88,
964 0x88,
965 0x88,
966 0x88,
967 0x88,
968 0x88,
969 0x88,
970 },
971 .addr_lsb = {
972 0x10,
973 0x0c,
974 0x14,
975 0xe4,
976 0x0c,
977 0x10,
978 0x14,
979 0x18,
980 0x48,
981 0x40,
982 0x4c,
983 0x24,
984 0x44,
985 },
986 .data = {
987 { 0x0, 0x4c, 0x2, 0x0 },
988 { 0x0, 0x95, 0x26, 0xa0 },
989 { 0x60, 0x0, 0x0, 0x0 },
990 { 0x8, 0x4, 0x81, 0x28 },
991 { 0xfa, 0x0c, 0x84, 0x11 },
992 { 0x80, 0x0f, 0xd9, 0x53 },
993 { 0x86, 0x0, 0x0, 0x0 },
994 { 0x1, 0xa0, 0x1, 0x0 },
995 { 0x4b, 0x0, 0x0, 0x0 },
996 { 0x28, 0x0, 0x0, 0x0 },
997 { 0x0, 0x14, 0x2a, 0x14 },
998 { 0x0, 0x0, 0x0, 0x0 },
999 { 0x0, 0x0, 0x0, 0x0 },
1000 },
1001};
1002
1003static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = {
1004 &xe3plpd_lt_hdmi_252,
1005 &xe3plpd_lt_hdmi_272,
1006 &xe3plpd_lt_hdmi_742p5,
1007 &xe3plpd_lt_hdmi_1p485,
1008 &xe3plpd_lt_hdmi_5p94,
1009 NULL,
1010};
1011
1012static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder)
1013{
1014 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1015
1016 if (!intel_tc_port_in_dp_alt_mode(dig_port))
1017 return INTEL_LT_PHY_BOTH_LANES;
1018
1019 return intel_tc_port_max_lane_count(dig_port) > 2
1020 ? INTEL_LT_PHY_BOTH_LANES : INTEL_LT_PHY_LANE0;
1021}
1022
1023static u8 intel_lt_phy_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
1024{
1025 return intel_cx0_read(encoder, lane_mask, addr);
1026}
1027
1028static void intel_lt_phy_write(struct intel_encoder *encoder,
1029 u8 lane_mask, u16 addr, u8 data, bool committed)
1030{
1031 intel_cx0_write(encoder, lane_mask, addr, data, committed);
1032}
1033
1034static void intel_lt_phy_rmw(struct intel_encoder *encoder,
1035 u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
1036{
1037 intel_cx0_rmw(encoder, lane_mask, addr, clear, set, committed);
1038}
1039
1040static void intel_lt_phy_clear_status_p2p(struct intel_encoder *encoder,
1041 int lane)
1042{
1043 struct intel_display *display = to_intel_display(encoder);
1044
1045 intel_de_rmw(display,
1046 XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(encoder->port, lane),
1047 XELPDP_PORT_P2M_RESPONSE_READY, set: 0);
1048}
1049
1050static void
1051assert_dc_off(struct intel_display *display)
1052{
1053 bool enabled;
1054
1055 enabled = intel_display_power_is_enabled(display, domain: POWER_DOMAIN_DC_OFF);
1056 drm_WARN_ON(display->drm, !enabled);
1057}
1058
1059static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
1060 int lane, u16 addr, u8 data,
1061 i915_reg_t mac_reg_addr,
1062 u8 expected_mac_val)
1063{
1064 struct intel_display *display = to_intel_display(encoder);
1065 enum port port = encoder->port;
1066 enum phy phy = intel_encoder_to_phy(encoder);
1067 int ack;
1068 u32 val;
1069
1070 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
1071 XELPDP_PORT_P2P_TRANSACTION_PENDING,
1072 XELPDP_MSGBUS_TIMEOUT_MS)) {
1073 drm_dbg_kms(display->drm,
1074 "PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n",
1075 phy_name(phy));
1076 intel_cx0_bus_reset(encoder, lane);
1077 return -ETIMEDOUT;
1078 }
1079
1080 intel_de_rmw(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), clear: 0, set: 0);
1081
1082 intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
1083 XELPDP_PORT_P2P_TRANSACTION_PENDING |
1084 XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED |
1085 XELPDP_PORT_M2P_DATA(data) |
1086 XELPDP_PORT_M2P_ADDRESS(addr));
1087
1088 ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, val: &val);
1089 if (ack < 0)
1090 return ack;
1091
1092 if (val & XELPDP_PORT_P2M_ERROR_SET) {
1093 drm_dbg_kms(display->drm,
1094 "PHY %c Error occurred during P2P write command. Status: 0x%x\n",
1095 phy_name(phy), val);
1096 intel_lt_phy_clear_status_p2p(encoder, lane);
1097 intel_cx0_bus_reset(encoder, lane);
1098 return -EINVAL;
1099 }
1100
1101 /*
1102 * RE-VISIT:
1103 * This needs to be added to give PHY time to set everything up this was a requirement
1104 * to get the display up and running
1105 * This is the time PHY takes to settle down after programming the PHY.
1106 */
1107 udelay(usec: 150);
1108 intel_clear_response_ready_flag(encoder, lane);
1109 intel_lt_phy_clear_status_p2p(encoder, lane);
1110
1111 return 0;
1112}
1113
1114static void __intel_lt_phy_p2p_write(struct intel_encoder *encoder,
1115 int lane, u16 addr, u8 data,
1116 i915_reg_t mac_reg_addr,
1117 u8 expected_mac_val)
1118{
1119 struct intel_display *display = to_intel_display(encoder);
1120 enum phy phy = intel_encoder_to_phy(encoder);
1121 int i, status;
1122
1123 assert_dc_off(display);
1124
1125 /* 3 tries is assumed to be enough to write successfully */
1126 for (i = 0; i < 3; i++) {
1127 status = __intel_lt_phy_p2p_write_once(encoder, lane, addr, data, mac_reg_addr,
1128 expected_mac_val);
1129
1130 if (status == 0)
1131 return;
1132 }
1133
1134 drm_err_once(display->drm,
1135 "PHY %c P2P Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
1136}
1137
1138static void intel_lt_phy_p2p_write(struct intel_encoder *encoder,
1139 u8 lane_mask, u16 addr, u8 data,
1140 i915_reg_t mac_reg_addr,
1141 u8 expected_mac_val)
1142{
1143 int lane;
1144
1145 for_each_lt_phy_lane_in_mask(lane_mask, lane)
1146 __intel_lt_phy_p2p_write(encoder, lane, addr, data, mac_reg_addr, expected_mac_val);
1147}
1148
1149static void
1150intel_lt_phy_setup_powerdown(struct intel_encoder *encoder, u8 lane_count)
1151{
1152 /*
1153 * The new PORT_BUF_CTL6 stuff for dc5 entry and exit needs to be handled
1154 * by dmc firmware not explicitly mentioned in Bspec. This leaves this
1155 * function as a wrapper only but keeping it expecting future changes.
1156 */
1157 intel_cx0_setup_powerdown(encoder);
1158}
1159
1160static void
1161intel_lt_phy_powerdown_change_sequence(struct intel_encoder *encoder,
1162 u8 lane_mask, u8 state)
1163{
1164 intel_cx0_powerdown_change_sequence(encoder, lane_mask, state);
1165}
1166
1167static void
1168intel_lt_phy_lane_reset(struct intel_encoder *encoder,
1169 u8 lane_count)
1170{
1171 struct intel_display *display = to_intel_display(encoder);
1172 enum port port = encoder->port;
1173 enum phy phy = intel_encoder_to_phy(encoder);
1174 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1175 u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1176 ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
1177 : XELPDP_LANE_PIPE_RESET(0);
1178 u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1179 ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
1180 XELPDP_LANE_PHY_CURRENT_STATUS(1))
1181 : XELPDP_LANE_PHY_CURRENT_STATUS(0);
1182 u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1183 ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
1184 XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
1185 : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
1186
1187 intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
1188 XE3PLPD_MACCLK_RATE_MASK, XE3PLPD_MACCLK_RATE_DEF);
1189
1190 intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
1191 XE3PLPDP_PHY_MODE_MASK, XE3PLPDP_PHY_MODE_DP);
1192
1193 intel_lt_phy_setup_powerdown(encoder, lane_count);
1194 intel_lt_phy_powerdown_change_sequence(encoder, lane_mask: owned_lane_mask,
1195 XELPDP_P2_STATE_RESET);
1196
1197 intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
1198 XE3PLPD_MACCLK_RESET_0, set: 0);
1199
1200 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1201 XELPDP_LANE_PCLK_PLL_REQUEST(0),
1202 XELPDP_LANE_PCLK_PLL_REQUEST(0));
1203
1204 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port),
1205 XELPDP_LANE_PCLK_PLL_ACK(0),
1206 XE3PLPD_MACCLK_TURNON_LATENCY_MS))
1207 drm_warn(display->drm, "PHY %c PLL MacCLK assertion ack not done\n",
1208 phy_name(phy));
1209
1210 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1211 XELPDP_FORWARD_CLOCK_UNGATE,
1212 XELPDP_FORWARD_CLOCK_UNGATE);
1213
1214 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
1215 clear: lane_pipe_reset | lane_phy_pulse_status, set: 0);
1216
1217 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
1218 mask: lane_phy_current_status,
1219 XE3PLPD_RESET_END_LATENCY_MS))
1220 drm_warn(display->drm, "PHY %c failed to bring out of lane reset\n",
1221 phy_name(phy));
1222
1223 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
1224 mask: lane_phy_pulse_status,
1225 XE3PLPD_RATE_CALIB_DONE_LATENCY_MS))
1226 drm_warn(display->drm, "PHY %c PLL rate not changed\n",
1227 phy_name(phy));
1228
1229 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), clear: lane_phy_pulse_status, set: 0);
1230}
1231
1232static void
1233intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder,
1234 const struct intel_crtc_state *crtc_state,
1235 bool lane_reversal)
1236{
1237 struct intel_display *display = to_intel_display(encoder);
1238 u32 val = 0;
1239
1240 intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
1241 XELPDP_PORT_REVERSAL,
1242 set: lane_reversal ? XELPDP_PORT_REVERSAL : 0);
1243
1244 val |= XELPDP_FORWARD_CLOCK_UNGATE;
1245
1246 /*
1247 * We actually mean MACCLK here and not MAXPCLK when using LT Phy
1248 * but since the register bits still remain the same we use
1249 * the same definition
1250 */
1251 if (intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_HDMI) &&
1252 intel_hdmi_is_frl(clock: crtc_state->port_clock))
1253 val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
1254 else
1255 val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
1256
1257 /* DP2.0 10G and 20G rates enable MPLLA*/
1258 if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
1259 val |= XELPDP_SSC_ENABLE_PLLA;
1260 else
1261 val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
1262
1263 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
1264 XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
1265 XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
1266 XELPDP_SSC_ENABLE_PLLB, set: val);
1267}
1268
1269static u32 intel_lt_phy_get_dp_clock(u8 rate)
1270{
1271 switch (rate) {
1272 case 0:
1273 return 162000;
1274 case 1:
1275 return 270000;
1276 case 2:
1277 return 540000;
1278 case 3:
1279 return 810000;
1280 case 4:
1281 return 216000;
1282 case 5:
1283 return 243000;
1284 case 6:
1285 return 324000;
1286 case 7:
1287 return 432000;
1288 case 8:
1289 return 1000000;
1290 case 9:
1291 return 1350000;
1292 case 10:
1293 return 2000000;
1294 case 11:
1295 return 675000;
1296 default:
1297 MISSING_CASE(rate);
1298 return 0;
1299 }
1300}
1301
1302static bool
1303intel_lt_phy_config_changed(struct intel_encoder *encoder,
1304 const struct intel_crtc_state *crtc_state)
1305{
1306 u8 val, rate;
1307 u32 clock;
1308
1309 val = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
1310 LT_PHY_VDR_0_CONFIG);
1311 rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, val);
1312
1313 /*
1314 * The only time we do not reconfigure the PLL is when we are
1315 * using 1.62 Gbps clock since PHY PLL defaults to that
1316 * otherwise we always need to reconfigure it.
1317 */
1318 if (intel_crtc_has_dp_encoder(crtc_state)) {
1319 clock = intel_lt_phy_get_dp_clock(rate);
1320 if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock)
1321 return false;
1322 }
1323
1324 return true;
1325}
1326
1327static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *encoder)
1328{
1329 struct intel_display *display = to_intel_display(encoder);
1330 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1331 intel_wakeref_t wakeref;
1332
1333 intel_psr_pause(intel_dp);
1334 wakeref = intel_display_power_get(display, domain: POWER_DOMAIN_DC_OFF);
1335
1336 return wakeref;
1337}
1338
1339static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
1340{
1341 struct intel_display *display = to_intel_display(encoder);
1342 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1343
1344 intel_psr_resume(intel_dp);
1345 intel_display_power_put(display, domain: POWER_DOMAIN_DC_OFF, wakeref);
1346}
1347
1348static const struct intel_lt_phy_pll_state * const *
1349intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state,
1350 struct intel_encoder *encoder)
1351{
1352 if (intel_crtc_has_dp_encoder(crtc_state)) {
1353 if (intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_EDP))
1354 return xe3plpd_lt_edp_tables;
1355
1356 return xe3plpd_lt_dp_tables;
1357 } else if (intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_HDMI)) {
1358 return xe3plpd_lt_hdmi_tables;
1359 }
1360
1361 MISSING_CASE(encoder->type);
1362 return NULL;
1363}
1364
1365static bool
1366intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state,
1367 struct intel_encoder *encoder)
1368{
1369 struct intel_display *display = to_intel_display(encoder);
1370
1371 if (intel_crtc_has_dp_encoder(crtc_state)) {
1372 if (intel_panel_use_ssc(display)) {
1373 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1374
1375 return (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
1376 }
1377 }
1378
1379 return false;
1380}
1381
1382static u64 mul_q32_u32(u64 a_q32, u32 b)
1383{
1384 u64 p0, p1, carry, result;
1385 u64 x_hi = a_q32 >> 32;
1386 u64 x_lo = a_q32 & 0xFFFFFFFFULL;
1387
1388 p0 = x_lo * (u64)b;
1389 p1 = x_hi * (u64)b;
1390 carry = p0 >> 32;
1391 result = (p1 << 32) + (carry << 32) + (p0 & 0xFFFFFFFFULL);
1392
1393 return result;
1394}
1395
1396static bool
1397calculate_target_dco_and_loop_cnt(u32 frequency_khz, u64 *target_dco_mhz, u32 *loop_cnt)
1398{
1399 u32 ppm_value = 1;
1400 u32 dco_min_freq = DCO_MIN_FREQ_MHZ;
1401 u32 dco_max_freq = 16200;
1402 u32 dco_min_freq_low = 10000;
1403 u32 dco_max_freq_low = 12000;
1404 u64 val = 0;
1405 u64 refclk_khz = REF_CLK_KHZ;
1406 u64 m2div = 0;
1407 u64 val_with_frac = 0;
1408 u64 ppm = 0;
1409 u64 temp0 = 0, temp1, scale;
1410 int ppm_cnt, dco_count, y;
1411
1412 for (ppm_cnt = 0; ppm_cnt < 5; ppm_cnt++) {
1413 ppm_value = ppm_cnt == 2 ? 2 : 1;
1414 for (dco_count = 0; dco_count < 2; dco_count++) {
1415 if (dco_count == 1) {
1416 dco_min_freq = dco_min_freq_low;
1417 dco_max_freq = dco_max_freq_low;
1418 }
1419 for (y = 2; y <= 255; y += 2) {
1420 val = div64_u64(dividend: (u64)y * frequency_khz, divisor: 200);
1421 m2div = div64_u64(dividend: ((u64)(val) << 32), divisor: refclk_khz);
1422 m2div = mul_q32_u32(a_q32: m2div, b: 500);
1423 val_with_frac = mul_q32_u32(a_q32: m2div, b: refclk_khz);
1424 val_with_frac = div64_u64(dividend: val_with_frac, divisor: 500);
1425 temp1 = Q32_TO_INT(val_with_frac);
1426 temp0 = (temp1 > val) ? (temp1 - val) :
1427 (val - temp1);
1428 ppm = div64_u64(dividend: temp0, divisor: val);
1429 if (temp1 >= dco_min_freq &&
1430 temp1 <= dco_max_freq &&
1431 ppm < ppm_value) {
1432 /* Round to two places */
1433 scale = (1ULL << 32) / 100;
1434 temp0 = DIV_ROUND_UP_ULL(val_with_frac,
1435 scale);
1436 *target_dco_mhz = temp0 * scale;
1437 *loop_cnt = y;
1438 return true;
1439 }
1440 }
1441 }
1442 }
1443
1444 return false;
1445}
1446
1447static void set_phy_vdr_addresses(struct lt_phy_params *p, int pll_type)
1448{
1449 p->pll_reg4.addr = PLL_REG_ADDR(PLL_REG4_ADDR, pll_type);
1450 p->pll_reg3.addr = PLL_REG_ADDR(PLL_REG3_ADDR, pll_type);
1451 p->pll_reg5.addr = PLL_REG_ADDR(PLL_REG5_ADDR, pll_type);
1452 p->pll_reg57.addr = PLL_REG_ADDR(PLL_REG57_ADDR, pll_type);
1453 p->lf.addr = PLL_REG_ADDR(PLL_LF_ADDR, pll_type);
1454 p->tdc.addr = PLL_REG_ADDR(PLL_TDC_ADDR, pll_type);
1455 p->ssc.addr = PLL_REG_ADDR(PLL_SSC_ADDR, pll_type);
1456 p->bias2.addr = PLL_REG_ADDR(PLL_BIAS2_ADDR, pll_type);
1457 p->bias_trim.addr = PLL_REG_ADDR(PLL_BIAS_TRIM_ADDR, pll_type);
1458 p->dco_med.addr = PLL_REG_ADDR(PLL_DCO_MED_ADDR, pll_type);
1459 p->dco_fine.addr = PLL_REG_ADDR(PLL_DCO_FINE_ADDR, pll_type);
1460 p->ssc_inj.addr = PLL_REG_ADDR(PLL_SSC_INJ_ADDR, pll_type);
1461 p->surv_bonus.addr = PLL_REG_ADDR(PLL_SURV_BONUS_ADDR, pll_type);
1462}
1463
1464static void compute_ssc(struct lt_phy_params *p, u32 ana_cfg)
1465{
1466 int ssc_stepsize = 0;
1467 int ssc_steplen = 0;
1468 int ssc_steplog = 0;
1469
1470 p->ssc.val = (1 << 31) | (ana_cfg << 24) | (ssc_steplog << 16) |
1471 (ssc_stepsize << 8) | ssc_steplen;
1472}
1473
1474static void compute_bias2(struct lt_phy_params *p)
1475{
1476 u32 ssc_en_local = 0;
1477 u64 dynctrl_ovrd_en = 0;
1478
1479 p->bias2.val = (dynctrl_ovrd_en << 31) | (ssc_en_local << 30) |
1480 (1 << 23) | (1 << 24) | (32 << 16) | (1 << 8);
1481}
1482
1483static void compute_tdc(struct lt_phy_params *p, u64 tdc_fine)
1484{
1485 u32 settling_time = 15;
1486 u32 bias_ovr_en = 1;
1487 u32 coldstart = 1;
1488 u32 true_lock = 2;
1489 u32 early_lock = 1;
1490 u32 lock_ovr_en = 1;
1491 u32 lock_thr = tdc_fine ? 3 : 5;
1492 u32 unlock_thr = tdc_fine ? 5 : 11;
1493
1494 p->tdc.val = (u32)((2 << 30) + (settling_time << 16) + (bias_ovr_en << 15) +
1495 (lock_ovr_en << 14) + (coldstart << 12) + (true_lock << 10) +
1496 (early_lock << 8) + (unlock_thr << 4) + lock_thr);
1497}
1498
1499static void compute_dco_med(struct lt_phy_params *p)
1500{
1501 u32 cselmed_en = 0;
1502 u32 cselmed_dyn_adj = 0;
1503 u32 cselmed_ratio = 39;
1504 u32 cselmed_thr = 8;
1505
1506 p->dco_med.val = (cselmed_en << 31) + (cselmed_dyn_adj << 30) +
1507 (cselmed_ratio << 24) + (cselmed_thr << 21);
1508}
1509
1510static void compute_dco_fine(struct lt_phy_params *p, u32 dco_12g)
1511{
1512 u32 dco_fine0_tune_2_0 = 0;
1513 u32 dco_fine1_tune_2_0 = 0;
1514 u32 dco_fine2_tune_2_0 = 0;
1515 u32 dco_fine3_tune_2_0 = 0;
1516 u32 dco_dith0_tune_2_0 = 0;
1517 u32 dco_dith1_tune_2_0 = 0;
1518
1519 dco_fine0_tune_2_0 = dco_12g ? 4 : 3;
1520 dco_fine1_tune_2_0 = 2;
1521 dco_fine2_tune_2_0 = dco_12g ? 2 : 1;
1522 dco_fine3_tune_2_0 = 5;
1523 dco_dith0_tune_2_0 = dco_12g ? 4 : 3;
1524 dco_dith1_tune_2_0 = 2;
1525
1526 p->dco_fine.val = (dco_dith1_tune_2_0 << 19) +
1527 (dco_dith0_tune_2_0 << 16) +
1528 (dco_fine3_tune_2_0 << 11) +
1529 (dco_fine2_tune_2_0 << 8) +
1530 (dco_fine1_tune_2_0 << 3) +
1531 dco_fine0_tune_2_0;
1532}
1533
1534int
1535intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
1536 u32 frequency_khz)
1537{
1538#define DATA_ASSIGN(i, pll_reg) \
1539 do { \
1540 lt_state->data[i][0] = (u8)((((pll_reg).val) & 0xFF000000) >> 24); \
1541 lt_state->data[i][1] = (u8)((((pll_reg).val) & 0x00FF0000) >> 16); \
1542 lt_state->data[i][2] = (u8)((((pll_reg).val) & 0x0000FF00) >> 8); \
1543 lt_state->data[i][3] = (u8)((((pll_reg).val) & 0x000000FF)); \
1544 } while (0)
1545#define ADDR_ASSIGN(i, pll_reg) \
1546 do { \
1547 lt_state->addr_msb[i] = ((pll_reg).addr >> 8) & 0xFF; \
1548 lt_state->addr_lsb[i] = (pll_reg).addr & 0xFF; \
1549 } while (0)
1550
1551 bool found = false;
1552 struct lt_phy_params p;
1553 u32 dco_fmin = DCO_MIN_FREQ_MHZ;
1554 u64 refclk_khz = REF_CLK_KHZ;
1555 u32 refclk_mhz_int = REF_CLK_KHZ / 1000;
1556 u64 m2div = 0;
1557 u64 target_dco_mhz = 0;
1558 u64 tdc_fine, tdc_targetcnt;
1559 u64 feedfwd_gain ,feedfwd_cal_en;
1560 u64 tdc_res = 30;
1561 u32 prop_coeff;
1562 u32 int_coeff;
1563 u32 ndiv = 1;
1564 u32 m1div = 1, m2div_int, m2div_frac;
1565 u32 frac_en;
1566 u32 ana_cfg;
1567 u32 loop_cnt = 0;
1568 u32 gain_ctrl = 2;
1569 u32 postdiv = 0;
1570 u32 dco_12g = 0;
1571 u32 pll_type = 0;
1572 u32 d1 = 2, d3 = 5, d4 = 0, d5 = 0;
1573 u32 d6 = 0, d6_new = 0;
1574 u32 d7, d8 = 0;
1575 u32 bonus_7_0 = 0;
1576 u32 csel2fo = 11;
1577 u32 csel2fo_ovrd_en = 1;
1578 u64 temp0, temp1, temp2, temp3;
1579
1580 p.surv_bonus.val = (bonus_7_0 << 16);
1581 p.pll_reg4.val = (refclk_mhz_int << 17) +
1582 (ndiv << 9) + (1 << 4);
1583 p.bias_trim.val = (csel2fo_ovrd_en << 30) + (csel2fo << 24);
1584 p.ssc_inj.val = 0;
1585 found = calculate_target_dco_and_loop_cnt(frequency_khz, target_dco_mhz: &target_dco_mhz, loop_cnt: &loop_cnt);
1586 if (!found)
1587 return -EINVAL;
1588
1589 m2div = div64_u64(dividend: target_dco_mhz, divisor: (refclk_khz * ndiv * m1div));
1590 m2div = mul_q32_u32(a_q32: m2div, b: 1000);
1591 if (Q32_TO_INT(m2div) > 511)
1592 return -EINVAL;
1593
1594 m2div_int = (u32)Q32_TO_INT(m2div);
1595 m2div_frac = (u32)(Q32_TO_FRAC(m2div));
1596 frac_en = (m2div_frac > 0) ? 1 : 0;
1597
1598 if (frac_en > 0)
1599 tdc_res = 70;
1600 else
1601 tdc_res = 36;
1602 tdc_fine = tdc_res > 50 ? 1 : 0;
1603 temp0 = tdc_res * 40 * 11;
1604 temp1 = div64_u64(dividend: ((4 * TDC_RES_MULTIPLIER) + temp0) * 500, divisor: temp0 * refclk_khz);
1605 temp2 = div64_u64(dividend: temp0 * refclk_khz, divisor: 1000);
1606 temp3 = div64_u64(dividend: ((8 * TDC_RES_MULTIPLIER) + temp2), divisor: temp2);
1607 tdc_targetcnt = tdc_res < 50 ? (int)(temp1) : (int)(temp3);
1608 tdc_targetcnt = (int)(tdc_targetcnt / 2);
1609 temp0 = mul_q32_u32(a_q32: target_dco_mhz, b: tdc_res);
1610 temp0 >>= 32;
1611 feedfwd_gain = (m2div_frac > 0) ? div64_u64(dividend: m1div * TDC_RES_MULTIPLIER, divisor: temp0) : 0;
1612 feedfwd_cal_en = frac_en;
1613
1614 temp0 = (u32)Q32_TO_INT(target_dco_mhz);
1615 prop_coeff = (temp0 >= dco_fmin) ? 3 : 4;
1616 int_coeff = (temp0 >= dco_fmin) ? 7 : 8;
1617 ana_cfg = (temp0 >= dco_fmin) ? 8 : 6;
1618 dco_12g = (temp0 >= dco_fmin) ? 0 : 1;
1619
1620 if (temp0 > 12960)
1621 d7 = 10;
1622 else
1623 d7 = 8;
1624
1625 d8 = loop_cnt / 2;
1626 d4 = d8 * 2;
1627
1628 /* Compute pll_reg3,5,57 & lf */
1629 p.pll_reg3.val = (u32)((d4 << 21) + (d3 << 18) + (d1 << 15) + (m2div_int << 5));
1630 p.pll_reg5.val = m2div_frac;
1631 postdiv = (d5 == 0) ? 9 : d5;
1632 d6_new = (d6 == 0) ? 40 : d6;
1633 p.pll_reg57.val = (d7 << 24) + (postdiv << 15) + (d8 << 7) + d6_new;
1634 p.lf.val = (u32)((frac_en << 31) + (1 << 30) + (frac_en << 29) +
1635 (feedfwd_cal_en << 28) + (tdc_fine << 27) +
1636 (gain_ctrl << 24) + (feedfwd_gain << 16) +
1637 (int_coeff << 12) + (prop_coeff << 8) + tdc_targetcnt);
1638
1639 compute_ssc(p: &p, ana_cfg);
1640 compute_bias2(p: &p);
1641 compute_tdc(p: &p, tdc_fine);
1642 compute_dco_med(p: &p);
1643 compute_dco_fine(p: &p, dco_12g);
1644
1645 pll_type = ((frequency_khz == 10000) || (frequency_khz == 20000) ||
1646 (frequency_khz == 2500) || (dco_12g == 1)) ? 0 : 1;
1647 set_phy_vdr_addresses(p: &p, pll_type);
1648
1649 lt_state->config[0] = 0x84;
1650 lt_state->config[1] = 0x2d;
1651 ADDR_ASSIGN(0, p.pll_reg4);
1652 ADDR_ASSIGN(1, p.pll_reg3);
1653 ADDR_ASSIGN(2, p.pll_reg5);
1654 ADDR_ASSIGN(3, p.pll_reg57);
1655 ADDR_ASSIGN(4, p.lf);
1656 ADDR_ASSIGN(5, p.tdc);
1657 ADDR_ASSIGN(6, p.ssc);
1658 ADDR_ASSIGN(7, p.bias2);
1659 ADDR_ASSIGN(8, p.bias_trim);
1660 ADDR_ASSIGN(9, p.dco_med);
1661 ADDR_ASSIGN(10, p.dco_fine);
1662 ADDR_ASSIGN(11, p.ssc_inj);
1663 ADDR_ASSIGN(12, p.surv_bonus);
1664 DATA_ASSIGN(0, p.pll_reg4);
1665 DATA_ASSIGN(1, p.pll_reg3);
1666 DATA_ASSIGN(2, p.pll_reg5);
1667 DATA_ASSIGN(3, p.pll_reg57);
1668 DATA_ASSIGN(4, p.lf);
1669 DATA_ASSIGN(5, p.tdc);
1670 DATA_ASSIGN(6, p.ssc);
1671 DATA_ASSIGN(7, p.bias2);
1672 DATA_ASSIGN(8, p.bias_trim);
1673 DATA_ASSIGN(9, p.dco_med);
1674 DATA_ASSIGN(10, p.dco_fine);
1675 DATA_ASSIGN(11, p.ssc_inj);
1676 DATA_ASSIGN(12, p.surv_bonus);
1677
1678 return 0;
1679}
1680
1681static int
1682intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
1683{
1684#define REGVAL(i) ( \
1685 (lt_state->data[i][3]) | \
1686 (lt_state->data[i][2] << 8) | \
1687 (lt_state->data[i][1] << 16) | \
1688 (lt_state->data[i][0] << 24) \
1689)
1690
1691 struct intel_display *display = to_intel_display(crtc_state);
1692 const struct intel_lt_phy_pll_state *lt_state =
1693 &crtc_state->dpll_hw_state.ltpll;
1694 int clk = 0;
1695 u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
1696 u64 temp0, temp1;
1697 /*
1698 * The algorithm uses '+' to combine bitfields when
1699 * constructing PLL_reg3 and PLL_reg57:
1700 * PLL_reg57 = (D7 << 24) + (postdiv << 15) + (D8 << 7) + D6_new;
1701 * PLL_reg3 = (D4 << 21) + (D3 << 18) + (D1 << 15) + (m2div_int << 5);
1702 *
1703 * However, this is likely intended to be a bitwise OR operation,
1704 * as each field occupies distinct, non-overlapping bits in the register.
1705 *
1706 * PLL_reg57 is composed of following fields packed into a 32-bit value:
1707 * - D7: max value 10 -> fits in 4 bits -> placed at bits 24-27
1708 * - postdiv: max value 9 -> fits in 4 bits -> placed at bits 15-18
1709 * - D8: derived from loop_cnt / 2, max 127 -> fits in 7 bits
1710 * (though 8 bits are given to it) -> placed at bits 7-14
1711 * - D6_new: fits in lower 7 bits -> placed at bits 0-6
1712 * PLL_reg57 = (D7 << 24) | (postdiv << 15) | (D8 << 7) | D6_new;
1713 *
1714 * Similarly, PLL_reg3 is packed as:
1715 * - D4: max value 256 -> fits in 9 bits -> placed at bits 21-29
1716 * - D3: max value 9 -> fits in 4 bits -> placed at bits 18-21
1717 * - D1: max value 2 -> fits in 2 bits -> placed at bits 15-16
1718 * - m2div_int: max value 511 -> fits in 9 bits (10 bits allocated)
1719 * -> placed at bits 5-14
1720 * PLL_reg3 = (D4 << 21) | (D3 << 18) | (D1 << 15) | (m2div_int << 5);
1721 */
1722 pll_reg_5 = REGVAL(2);
1723 pll_reg_3 = REGVAL(1);
1724 pll_reg_57 = REGVAL(3);
1725 m2div_frac = pll_reg_5;
1726
1727 /*
1728 * From forward algorithm we know
1729 * m2div = 2 * m2
1730 * val = y * frequency * 5
1731 * So now,
1732 * frequency = (m2 * 2 * refclk_khz / (d8 * 10))
1733 * frequency = (m2div * refclk_khz / (d8 * 10))
1734 */
1735 d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7;
1736 if (d8 == 0) {
1737 drm_WARN_ON(display->drm,
1738 "Invalid port clock using lowest HDMI portclock\n");
1739 return xe3plpd_lt_hdmi_252.clock;
1740 }
1741 m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5;
1742 temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
1743 temp1 = (u64)m2div_int * REF_CLK_KHZ;
1744
1745 clk = div_u64(dividend: (temp1 + temp0), divisor: d8 * 10);
1746
1747 return clk;
1748}
1749
1750int
1751intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
1752 const struct intel_crtc_state *crtc_state)
1753{
1754 int clk;
1755 const struct intel_lt_phy_pll_state *lt_state =
1756 &crtc_state->dpll_hw_state.ltpll;
1757 u8 mode, rate;
1758
1759 mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK,
1760 lt_state->config[0]);
1761 /*
1762 * For edp/dp read the clock value from the tables
1763 * and return the clock as the algorithm used for
1764 * calculating the port clock does not exactly matches
1765 * with edp/dp clock.
1766 */
1767 if (mode == MODE_DP) {
1768 rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK,
1769 lt_state->config[0]);
1770 clk = intel_lt_phy_get_dp_clock(rate);
1771 } else {
1772 clk = intel_lt_phy_calc_hdmi_port_clock(crtc_state);
1773 }
1774
1775 return clk;
1776}
1777
1778int
1779intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
1780 struct intel_encoder *encoder)
1781{
1782 const struct intel_lt_phy_pll_state * const *tables;
1783 int i;
1784
1785 tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
1786 if (!tables)
1787 return -EINVAL;
1788
1789 for (i = 0; tables[i]; i++) {
1790 if (crtc_state->port_clock == tables[i]->clock) {
1791 crtc_state->dpll_hw_state.ltpll = *tables[i];
1792 if (intel_crtc_has_dp_encoder(crtc_state)) {
1793 if (intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_EDP))
1794 crtc_state->dpll_hw_state.ltpll.config[2] = 1;
1795 }
1796 crtc_state->dpll_hw_state.ltpll.ssc_enabled =
1797 intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder);
1798 return 0;
1799 }
1800 }
1801
1802 if (intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_HDMI)) {
1803 return intel_lt_phy_calculate_hdmi_state(lt_state: &crtc_state->dpll_hw_state.ltpll,
1804 frequency_khz: crtc_state->port_clock);
1805 }
1806
1807 return -EINVAL;
1808}
1809
1810static void
1811intel_lt_phy_program_pll(struct intel_encoder *encoder,
1812 const struct intel_crtc_state *crtc_state)
1813{
1814 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1815 int i, j, k;
1816
1817 intel_lt_phy_write(encoder, lane_mask: owned_lane_mask, LT_PHY_VDR_0_CONFIG,
1818 data: crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED);
1819 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG,
1820 data: crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED);
1821 intel_lt_phy_write(encoder, lane_mask: owned_lane_mask, LT_PHY_VDR_2_CONFIG,
1822 data: crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED);
1823
1824 for (i = 0; i <= 12; i++) {
1825 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i),
1826 data: crtc_state->dpll_hw_state.ltpll.addr_msb[i],
1827 MB_WRITE_COMMITTED);
1828 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i),
1829 data: crtc_state->dpll_hw_state.ltpll.addr_lsb[i],
1830 MB_WRITE_COMMITTED);
1831
1832 for (j = 3, k = 0; j >= 0; j--, k++)
1833 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0,
1834 LT_PHY_VDR_X_DATAY(i, j),
1835 data: crtc_state->dpll_hw_state.ltpll.data[i][k],
1836 MB_WRITE_COMMITTED);
1837 }
1838}
1839
1840static void
1841intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder,
1842 const struct intel_crtc_state *crtc_state)
1843{
1844 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1845 bool lane_reversal = dig_port->lane_reversal;
1846 u8 lane_count = crtc_state->lane_count;
1847 bool is_dp_alt =
1848 intel_tc_port_in_dp_alt_mode(dig_port);
1849 enum intel_tc_pin_assignment tc_pin =
1850 intel_tc_port_get_pin_assignment(dig_port);
1851 u8 transmitter_mask = 0;
1852
1853 /*
1854 * We have a two transmitters per lane and total of 2 PHY lanes so a total
1855 * of 4 transmitters. We prepare a mask of the lanes that need to be activated
1856 * and the transmitter which need to be activated for each lane. TX 0,1 correspond
1857 * to LANE0 and TX 2, 3 correspond to LANE1.
1858 */
1859
1860 switch (lane_count) {
1861 case 1:
1862 transmitter_mask = lane_reversal ? REG_BIT8(3) : REG_BIT8(0);
1863 if (is_dp_alt) {
1864 if (tc_pin == INTEL_TC_PIN_ASSIGNMENT_D)
1865 transmitter_mask = REG_BIT8(0);
1866 else
1867 transmitter_mask = REG_BIT8(1);
1868 }
1869 break;
1870 case 2:
1871 transmitter_mask = lane_reversal ? REG_GENMASK8(3, 2) : REG_GENMASK8(1, 0);
1872 if (is_dp_alt)
1873 transmitter_mask = REG_GENMASK8(1, 0);
1874 break;
1875 case 3:
1876 transmitter_mask = lane_reversal ? REG_GENMASK8(3, 1) : REG_GENMASK8(2, 0);
1877 if (is_dp_alt)
1878 transmitter_mask = REG_GENMASK8(2, 0);
1879 break;
1880 case 4:
1881 transmitter_mask = REG_GENMASK8(3, 0);
1882 break;
1883 default:
1884 MISSING_CASE(lane_count);
1885 transmitter_mask = REG_GENMASK8(3, 0);
1886 break;
1887 }
1888
1889 if (transmitter_mask & BIT(0)) {
1890 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
1891 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
1892 LT_PHY_TX_LANE_ENABLE);
1893 } else {
1894 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
1895 data: 0, LT_PHY_TXY_CTL10_MAC(0), expected_mac_val: 0);
1896 }
1897
1898 if (transmitter_mask & BIT(1)) {
1899 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
1900 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
1901 LT_PHY_TX_LANE_ENABLE);
1902 } else {
1903 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
1904 data: 0, LT_PHY_TXY_CTL10_MAC(1), expected_mac_val: 0);
1905 }
1906
1907 if (transmitter_mask & BIT(2)) {
1908 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
1909 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
1910 LT_PHY_TX_LANE_ENABLE);
1911 } else {
1912 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
1913 data: 0, LT_PHY_TXY_CTL10_MAC(0), expected_mac_val: 0);
1914 }
1915
1916 if (transmitter_mask & BIT(3)) {
1917 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
1918 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
1919 LT_PHY_TX_LANE_ENABLE);
1920 } else {
1921 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
1922 data: 0, LT_PHY_TXY_CTL10_MAC(1), expected_mac_val: 0);
1923 }
1924}
1925
1926void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
1927 const struct intel_crtc_state *crtc_state)
1928{
1929 struct intel_display *display = to_intel_display(encoder);
1930 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1931 bool lane_reversal = dig_port->lane_reversal;
1932 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1933 enum phy phy = intel_encoder_to_phy(encoder);
1934 enum port port = encoder->port;
1935 intel_wakeref_t wakeref = 0;
1936 u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1937 ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
1938 XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
1939 : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
1940 u8 rate_update;
1941
1942 wakeref = intel_lt_phy_transaction_begin(encoder);
1943
1944 /* 1. Enable MacCLK at default 162 MHz frequency. */
1945 intel_lt_phy_lane_reset(encoder, lane_count: crtc_state->lane_count);
1946
1947 /* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */
1948 intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
1949
1950 /* 3. Change owned PHY lanes power to Ready state. */
1951 intel_lt_phy_powerdown_change_sequence(encoder, lane_mask: owned_lane_mask,
1952 XELPDP_P2_STATE_READY);
1953
1954 /*
1955 * 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type,
1956 * encoded rate and encoded mode.
1957 */
1958 if (intel_lt_phy_config_changed(encoder, crtc_state)) {
1959 /*
1960 * 5. Program the PHY internal PLL registers over PHY message bus for the desired
1961 * frequency and protocol type
1962 */
1963 intel_lt_phy_program_pll(encoder, crtc_state);
1964
1965 /* 6. Use the P2P transaction flow */
1966 /*
1967 * 6.1. Set the PHY VDR register 0xCC4[Rate Control VDR Update] = 1 over PHY message
1968 * bus for Owned PHY Lanes.
1969 */
1970 /*
1971 * 6.2. Poll for P2P Transaction Ready = "1" and read the MAC message bus VDR
1972 * register at offset 0xC00 for Owned PHY Lanes*.
1973 */
1974 /* 6.3. Clear P2P transaction Ready bit. */
1975 intel_lt_phy_p2p_write(encoder, lane_mask: owned_lane_mask, LT_PHY_RATE_UPDATE,
1976 LT_PHY_RATE_CONTROL_VDR_UPDATE, LT_PHY_MAC_VDR,
1977 LT_PHY_PCLKIN_GATE);
1978
1979 /* 7. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
1980 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1981 XELPDP_LANE_PCLK_PLL_REQUEST(0), set: 0);
1982
1983 /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
1984 if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
1985 XELPDP_LANE_PCLK_PLL_ACK(0),
1986 XE3PLPD_MACCLK_TURNOFF_LATENCY_US))
1987 drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n",
1988 phy_name(phy));
1989
1990 /*
1991 * 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency
1992 * Change. We handle this step in bxt_set_cdclk().
1993 */
1994 /* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */
1995 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
1996 val: crtc_state->port_clock);
1997
1998 /* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */
1999 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
2000 XELPDP_LANE_PCLK_PLL_REQUEST(0),
2001 XELPDP_LANE_PCLK_PLL_REQUEST(0));
2002
2003 /* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */
2004 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port),
2005 XELPDP_LANE_PCLK_PLL_ACK(0),
2006 XE3PLPD_MACCLK_TURNON_LATENCY_MS))
2007 drm_warn(display->drm, "PHY %c PLL MacCLK ack assertion timeout\n",
2008 phy_name(phy));
2009
2010 /*
2011 * 13. Ungate the forward clock by setting
2012 * PORT_CLOCK_CTL[Forward Clock Ungate] = 1.
2013 */
2014 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
2015 XELPDP_FORWARD_CLOCK_UNGATE,
2016 XELPDP_FORWARD_CLOCK_UNGATE);
2017
2018 /* 14. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
2019 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
2020 clear: lane_phy_pulse_status,
2021 set: lane_phy_pulse_status);
2022 /*
2023 * 15. Clear the PHY VDR register 0xCC4[Rate Control VDR Update] over
2024 * PHY message bus for Owned PHY Lanes.
2025 */
2026 rate_update = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_RATE_UPDATE);
2027 rate_update &= ~LT_PHY_RATE_CONTROL_VDR_UPDATE;
2028 intel_lt_phy_write(encoder, lane_mask: owned_lane_mask, LT_PHY_RATE_UPDATE,
2029 data: rate_update, MB_WRITE_COMMITTED);
2030
2031 /* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */
2032 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
2033 mask: lane_phy_pulse_status,
2034 XE3PLPD_RATE_CALIB_DONE_LATENCY_MS))
2035 drm_warn(display->drm, "PHY %c PLL rate not changed\n",
2036 phy_name(phy));
2037
2038 /* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
2039 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
2040 clear: lane_phy_pulse_status,
2041 set: lane_phy_pulse_status);
2042 } else {
2043 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), val: crtc_state->port_clock);
2044 }
2045
2046 /*
2047 * 18. Follow the Display Voltage Frequency Switching - Sequence After Frequency Change.
2048 * We handle this step in bxt_set_cdclk()
2049 */
2050 /* 19. Move the PHY powerdown state to Active and program to enable/disable transmitters */
2051 intel_lt_phy_powerdown_change_sequence(encoder, lane_mask: owned_lane_mask,
2052 XELPDP_P0_STATE_ACTIVE);
2053
2054 intel_lt_phy_enable_disable_tx(encoder, crtc_state);
2055 intel_lt_phy_transaction_end(encoder, wakeref);
2056}
2057
2058void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
2059{
2060 struct intel_display *display = to_intel_display(encoder);
2061 enum phy phy = intel_encoder_to_phy(encoder);
2062 enum port port = encoder->port;
2063 intel_wakeref_t wakeref;
2064 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
2065 u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
2066 ? (XELPDP_LANE_PIPE_RESET(0) |
2067 XELPDP_LANE_PIPE_RESET(1))
2068 : XELPDP_LANE_PIPE_RESET(0);
2069 u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
2070 ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
2071 XELPDP_LANE_PHY_CURRENT_STATUS(1))
2072 : XELPDP_LANE_PHY_CURRENT_STATUS(0);
2073 u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
2074 ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
2075 XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
2076 : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
2077
2078 wakeref = intel_lt_phy_transaction_begin(encoder);
2079
2080 /* 1. Clear PORT_BUF_CTL2 [PHY Pulse Status]. */
2081 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
2082 clear: lane_phy_pulse_status,
2083 set: lane_phy_pulse_status);
2084
2085 /* 2. Set PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> Pipe Reset to 1. */
2086 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), clear: lane_pipe_reset,
2087 set: lane_pipe_reset);
2088
2089 /* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */
2090 if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port),
2091 mask: lane_phy_current_status,
2092 XE3PLPD_RESET_START_LATENCY_US))
2093 drm_warn(display->drm, "PHY %c failed to reset lane\n",
2094 phy_name(phy));
2095
2096 /* 4. Clear for PHY pulse status on owned PHY lanes. */
2097 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
2098 clear: lane_phy_pulse_status,
2099 set: lane_phy_pulse_status);
2100
2101 /*
2102 * 5. Follow the Display Voltage Frequency Switching -
2103 * Sequence Before Frequency Change. We handle this step in bxt_set_cdclk().
2104 */
2105 /* 6. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
2106 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
2107 XELPDP_LANE_PCLK_PLL_REQUEST(0), set: 0);
2108
2109 /* 7. Program DDI_CLK_VALFREQ to 0. */
2110 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), val: 0);
2111
2112 /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
2113 if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
2114 XELPDP_LANE_PCLK_PLL_ACK(0),
2115 XE3PLPD_MACCLK_TURNOFF_LATENCY_US))
2116 drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n",
2117 phy_name(phy));
2118
2119 /*
2120 * 9. Follow the Display Voltage Frequency Switching -
2121 * Sequence After Frequency Change. We handle this step in bxt_set_cdclk().
2122 */
2123 /* 10. Program PORT_CLOCK_CTL register to disable and gate clocks. */
2124 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
2125 XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, set: 0);
2126
2127 /* 11. Program PORT_BUF_CTL5[MacCLK Reset_0] = 1 to assert MacCLK reset. */
2128 intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
2129 XE3PLPD_MACCLK_RESET_0, XE3PLPD_MACCLK_RESET_0);
2130
2131 intel_lt_phy_transaction_end(encoder, wakeref);
2132}
2133
2134void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
2135 const struct intel_crtc_state *crtc_state)
2136{
2137 struct intel_display *display = to_intel_display(encoder);
2138 const struct intel_ddi_buf_trans *trans;
2139 u8 owned_lane_mask;
2140 intel_wakeref_t wakeref;
2141 int n_entries, ln;
2142 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2143
2144 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2145 return;
2146
2147 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
2148
2149 wakeref = intel_lt_phy_transaction_begin(encoder);
2150
2151 trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
2152 if (drm_WARN_ON_ONCE(display->drm, !trans)) {
2153 intel_lt_phy_transaction_end(encoder, wakeref);
2154 return;
2155 }
2156
2157 for (ln = 0; ln < crtc_state->lane_count; ln++) {
2158 int level = intel_ddi_level(encoder, crtc_state, lane: ln);
2159 int lane = ln / 2;
2160 int tx = ln % 2;
2161 u8 lane_mask = lane == 0 ? INTEL_LT_PHY_LANE0 : INTEL_LT_PHY_LANE1;
2162
2163 if (!(lane_mask & owned_lane_mask))
2164 continue;
2165
2166 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL8(tx),
2167 LT_PHY_TX_SWING_LEVEL_MASK | LT_PHY_TX_SWING_MASK,
2168 LT_PHY_TX_SWING_LEVEL(trans->entries[level].lt.txswing_level) |
2169 LT_PHY_TX_SWING(trans->entries[level].lt.txswing),
2170 MB_WRITE_COMMITTED);
2171
2172 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL2(tx),
2173 LT_PHY_TX_CURSOR_MASK,
2174 LT_PHY_TX_CURSOR(trans->entries[level].lt.pre_cursor),
2175 MB_WRITE_COMMITTED);
2176 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL3(tx),
2177 LT_PHY_TX_CURSOR_MASK,
2178 LT_PHY_TX_CURSOR(trans->entries[level].lt.main_cursor),
2179 MB_WRITE_COMMITTED);
2180 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL4(tx),
2181 LT_PHY_TX_CURSOR_MASK,
2182 LT_PHY_TX_CURSOR(trans->entries[level].lt.post_cursor),
2183 MB_WRITE_COMMITTED);
2184 }
2185
2186 intel_lt_phy_transaction_end(encoder, wakeref);
2187}
2188
2189void intel_lt_phy_dump_hw_state(struct intel_display *display,
2190 const struct intel_lt_phy_pll_state *hw_state)
2191{
2192 int i, j;
2193
2194 drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n");
2195 for (i = 0; i < 3; i++) {
2196 drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n",
2197 i, hw_state->config[i]);
2198 }
2199
2200 for (i = 0; i <= 12; i++)
2201 for (j = 3; j >= 0; j--)
2202 drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n",
2203 i, j, hw_state->data[i][j]);
2204}
2205
2206bool
2207intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
2208 const struct intel_lt_phy_pll_state *b)
2209{
2210 if (memcmp(p: &a->config, q: &b->config, size: sizeof(a->config)) != 0)
2211 return false;
2212
2213 if (memcmp(p: &a->data, q: &b->data, size: sizeof(a->data)) != 0)
2214 return false;
2215
2216 return true;
2217}
2218
2219void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
2220 const struct intel_crtc_state *crtc_state,
2221 struct intel_lt_phy_pll_state *pll_state)
2222{
2223 u8 owned_lane_mask;
2224 u8 lane;
2225 intel_wakeref_t wakeref;
2226 int i, j, k;
2227
2228 pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(dig_port: enc_to_dig_port(encoder));
2229 if (pll_state->tbt_mode)
2230 return;
2231
2232 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
2233 lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1;
2234 wakeref = intel_lt_phy_transaction_begin(encoder);
2235
2236 pll_state->config[0] = intel_lt_phy_read(encoder, lane_mask: lane, LT_PHY_VDR_0_CONFIG);
2237 pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG);
2238 pll_state->config[2] = intel_lt_phy_read(encoder, lane_mask: lane, LT_PHY_VDR_2_CONFIG);
2239
2240 for (i = 0; i <= 12; i++) {
2241 for (j = 3, k = 0; j >= 0; j--, k++)
2242 pll_state->data[i][k] =
2243 intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
2244 LT_PHY_VDR_X_DATAY(i, j));
2245 }
2246
2247 pll_state->clock =
2248 intel_lt_phy_calc_port_clock(encoder, crtc_state);
2249 intel_lt_phy_transaction_end(encoder, wakeref);
2250}
2251
2252void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
2253 struct intel_crtc *crtc)
2254{
2255 struct intel_display *display = to_intel_display(state);
2256 struct intel_digital_port *dig_port;
2257 const struct intel_crtc_state *new_crtc_state =
2258 intel_atomic_get_new_crtc_state(state, crtc);
2259 struct intel_encoder *encoder;
2260 struct intel_lt_phy_pll_state pll_hw_state = {};
2261 const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll;
2262 int clock;
2263 int i, j;
2264
2265 if (DISPLAY_VER(display) < 35)
2266 return;
2267
2268 if (!new_crtc_state->hw.active)
2269 return;
2270
2271 /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
2272 if (!intel_crtc_needs_modeset(crtc_state: new_crtc_state) &&
2273 !intel_crtc_needs_fastset(crtc_state: new_crtc_state))
2274 return;
2275
2276 encoder = intel_get_crtc_new_encoder(state, crtc_state: new_crtc_state);
2277 intel_lt_phy_pll_readout_hw_state(encoder, crtc_state: new_crtc_state, pll_state: &pll_hw_state);
2278 clock = intel_lt_phy_calc_port_clock(encoder, crtc_state: new_crtc_state);
2279
2280 dig_port = enc_to_dig_port(encoder);
2281 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2282 return;
2283
2284 INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.clock != clock,
2285 "[CRTC:%d:%s] mismatch in LT PHY: Register CLOCK (expected %d, found %d)",
2286 crtc->base.base.id, crtc->base.name,
2287 pll_sw_state->clock, pll_hw_state.clock);
2288
2289 for (i = 0; i < 3; i++) {
2290 INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[i] != pll_sw_state->config[i],
2291 "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG%d: (expected 0x%04x, found 0x%04x)",
2292 crtc->base.base.id, crtc->base.name, i,
2293 pll_sw_state->config[i], pll_hw_state.config[i]);
2294 }
2295
2296 for (i = 0; i <= 12; i++) {
2297 for (j = 3; j >= 0; j--)
2298 INTEL_DISPLAY_STATE_WARN(display,
2299 pll_hw_state.data[i][j] !=
2300 pll_sw_state->data[i][j],
2301 "[CRTC:%d:%s] mismatch in LT PHY PLL DATA[%d][%d]: (expected 0x%04x, found 0x%04x)",
2302 crtc->base.base.id, crtc->base.name, i, j,
2303 pll_sw_state->data[i][j], pll_hw_state.data[i][j]);
2304 }
2305}
2306
2307void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
2308 const struct intel_crtc_state *crtc_state)
2309{
2310 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2311
2312 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2313 intel_mtl_tbt_pll_enable(encoder, crtc_state);
2314 else
2315 intel_lt_phy_pll_enable(encoder, crtc_state);
2316}
2317
2318void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
2319{
2320 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2321
2322 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2323 intel_mtl_tbt_pll_disable(encoder);
2324 else
2325 intel_lt_phy_pll_disable(encoder);
2326
2327}
2328

source code of linux/drivers/gpu/drm/i915/display/intel_lt_phy.c