1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include "xe_display.h"
7#include "regs/xe_irq_regs.h"
8
9#include <linux/fb.h>
10
11#include <drm/drm_client.h>
12#include <drm/drm_client_event.h>
13#include <drm/drm_drv.h>
14#include <drm/drm_managed.h>
15#include <drm/drm_probe_helper.h>
16#include <drm/intel/display_member.h>
17#include <drm/intel/display_parent_interface.h>
18#include <uapi/drm/xe_drm.h>
19
20#include "soc/intel_dram.h"
21#include "intel_acpi.h"
22#include "intel_audio.h"
23#include "intel_bw.h"
24#include "intel_display.h"
25#include "intel_display_device.h"
26#include "intel_display_driver.h"
27#include "intel_display_irq.h"
28#include "intel_display_types.h"
29#include "intel_dmc.h"
30#include "intel_dmc_wl.h"
31#include "intel_dp.h"
32#include "intel_encoder.h"
33#include "intel_fbdev.h"
34#include "intel_hdcp.h"
35#include "intel_hotplug.h"
36#include "intel_opregion.h"
37#include "skl_watermark.h"
38#include "xe_display_rpm.h"
39#include "xe_module.h"
40
41/* Ensure drm and display members are placed properly. */
42INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display);
43
44/* Xe device functions */
45
46/**
47 * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
48 * early on
49 * @pdev: PCI device
50 *
51 * Note: This is called before xe or display device creation.
52 *
53 * Returns: true if probe needs to be deferred, false otherwise
54 */
55bool xe_display_driver_probe_defer(struct pci_dev *pdev)
56{
57 if (!xe_modparam.probe_display)
58 return 0;
59
60 return intel_display_driver_probe_defer(pdev);
61}
62
63/**
64 * xe_display_driver_set_hooks - Add driver flags and hooks for display
65 * @driver: DRM device driver
66 *
67 * Set features and function hooks in @driver that are needed for driving the
68 * display IP. This sets the driver's capability of driving display, regardless
69 * if the device has it enabled
70 *
71 * Note: This is called before xe or display device creation.
72 */
73void xe_display_driver_set_hooks(struct drm_driver *driver)
74{
75 if (!xe_modparam.probe_display)
76 return;
77
78#ifdef CONFIG_DRM_FBDEV_EMULATION
79 driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
80#endif
81
82 driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
83}
84
85static void unset_display_features(struct xe_device *xe)
86{
87 xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
88}
89
90static void xe_display_fini_early(void *arg)
91{
92 struct xe_device *xe = arg;
93 struct intel_display *display = xe->display;
94
95 if (!xe->info.probe_display)
96 return;
97
98 intel_hpd_cancel_work(display);
99 intel_display_driver_remove_nogem(display);
100 intel_display_driver_remove_noirq(display);
101 intel_opregion_cleanup(display);
102 intel_power_domains_cleanup(display);
103}
104
105int xe_display_init_early(struct xe_device *xe)
106{
107 struct intel_display *display = xe->display;
108 int err;
109
110 if (!xe->info.probe_display)
111 return 0;
112
113 /* Fake uncore lock */
114 spin_lock_init(&xe->uncore.lock);
115
116 intel_display_driver_early_probe(display);
117
118 /* Early display init.. */
119 intel_opregion_setup(display);
120
121 /*
122 * Fill the dram structure to get the system dram info. This will be
123 * used for memory latency calculation.
124 */
125 err = intel_dram_detect(xe);
126 if (err)
127 goto err_opregion;
128
129 intel_bw_init_hw(display);
130
131 intel_display_device_info_runtime_init(display);
132
133 err = intel_display_driver_probe_noirq(display);
134 if (err)
135 goto err_opregion;
136
137 err = intel_display_driver_probe_nogem(display);
138 if (err)
139 goto err_noirq;
140
141 return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
142err_noirq:
143 intel_display_driver_remove_noirq(display);
144 intel_power_domains_cleanup(display);
145err_opregion:
146 intel_opregion_cleanup(display);
147 return err;
148}
149
150static void xe_display_fini(void *arg)
151{
152 struct xe_device *xe = arg;
153 struct intel_display *display = xe->display;
154
155 intel_hpd_poll_fini(display);
156 intel_hdcp_component_fini(display);
157 intel_audio_deinit(display);
158 intel_display_driver_remove(display);
159}
160
161int xe_display_init(struct xe_device *xe)
162{
163 struct intel_display *display = xe->display;
164 int err;
165
166 if (!xe->info.probe_display)
167 return 0;
168
169 err = intel_display_driver_probe(display);
170 if (err)
171 return err;
172
173 return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
174}
175
176void xe_display_register(struct xe_device *xe)
177{
178 struct intel_display *display = xe->display;
179
180 if (!xe->info.probe_display)
181 return;
182
183 intel_display_driver_register(display);
184 intel_power_domains_enable(display);
185}
186
187void xe_display_unregister(struct xe_device *xe)
188{
189 struct intel_display *display = xe->display;
190
191 if (!xe->info.probe_display)
192 return;
193
194 intel_power_domains_disable(display);
195 intel_display_driver_unregister(display);
196}
197
198/* IRQ-related functions */
199
200void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
201{
202 struct intel_display *display = xe->display;
203
204 if (!xe->info.probe_display)
205 return;
206
207 if (master_ctl & DISPLAY_IRQ)
208 gen11_display_irq_handler(display);
209}
210
211void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
212{
213 struct intel_display *display = xe->display;
214
215 if (!xe->info.probe_display)
216 return;
217
218 if (gu_misc_iir & GU_MISC_GSE)
219 intel_opregion_asle_intr(display);
220}
221
222void xe_display_irq_reset(struct xe_device *xe)
223{
224 struct intel_display *display = xe->display;
225
226 if (!xe->info.probe_display)
227 return;
228
229 gen11_display_irq_reset(display);
230}
231
232void xe_display_irq_postinstall(struct xe_device *xe)
233{
234 struct intel_display *display = xe->display;
235
236 if (!xe->info.probe_display)
237 return;
238
239 gen11_de_irq_postinstall(display);
240}
241
242static bool suspend_to_idle(void)
243{
244#if IS_ENABLED(CONFIG_ACPI_SLEEP)
245 if (acpi_target_system_state() < ACPI_STATE_S3)
246 return true;
247#endif
248 return false;
249}
250
251static void xe_display_flush_cleanup_work(struct xe_device *xe)
252{
253 struct intel_crtc *crtc;
254
255 for_each_intel_crtc(&xe->drm, crtc) {
256 struct drm_crtc_commit *commit;
257
258 spin_lock(lock: &crtc->base.commit_lock);
259 commit = list_first_entry_or_null(&crtc->base.commit_list,
260 struct drm_crtc_commit, commit_entry);
261 if (commit)
262 drm_crtc_commit_get(commit);
263 spin_unlock(lock: &crtc->base.commit_lock);
264
265 if (commit) {
266 wait_for_completion(&commit->cleanup_done);
267 drm_crtc_commit_put(commit);
268 }
269 }
270}
271
272static void xe_display_enable_d3cold(struct xe_device *xe)
273{
274 struct intel_display *display = xe->display;
275
276 if (!xe->info.probe_display)
277 return;
278
279 /*
280 * We do a lot of poking in a lot of registers, make sure they work
281 * properly.
282 */
283 intel_power_domains_disable(display);
284
285 xe_display_flush_cleanup_work(xe);
286
287 intel_opregion_suspend(display, PCI_D3cold);
288
289 intel_dmc_suspend(display);
290
291 if (intel_display_device_present(display))
292 intel_hpd_poll_enable(display);
293}
294
295static void xe_display_disable_d3cold(struct xe_device *xe)
296{
297 struct intel_display *display = xe->display;
298
299 if (!xe->info.probe_display)
300 return;
301
302 intel_dmc_resume(display);
303
304 if (intel_display_device_present(display))
305 drm_mode_config_reset(dev: &xe->drm);
306
307 intel_display_driver_init_hw(display);
308
309 intel_hpd_init(display);
310
311 if (intel_display_device_present(display))
312 intel_hpd_poll_disable(display);
313
314 intel_opregion_resume(display);
315
316 intel_power_domains_enable(display);
317}
318
319void xe_display_pm_suspend(struct xe_device *xe)
320{
321 struct intel_display *display = xe->display;
322 bool s2idle = suspend_to_idle();
323
324 if (!xe->info.probe_display)
325 return;
326
327 /*
328 * We do a lot of poking in a lot of registers, make sure they work
329 * properly.
330 */
331 intel_power_domains_disable(display);
332 drm_client_dev_suspend(dev: &xe->drm);
333
334 if (intel_display_device_present(display)) {
335 drm_kms_helper_poll_disable(dev: &xe->drm);
336 intel_display_driver_disable_user_access(display);
337 intel_display_driver_suspend(display);
338 }
339
340 xe_display_flush_cleanup_work(xe);
341
342 intel_encoder_block_all_hpds(display);
343
344 intel_hpd_cancel_work(display);
345
346 if (intel_display_device_present(display)) {
347 intel_display_driver_suspend_access(display);
348 intel_encoder_suspend_all(display);
349 }
350
351 intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
352
353 intel_dmc_suspend(display);
354}
355
356void xe_display_pm_shutdown(struct xe_device *xe)
357{
358 struct intel_display *display = xe->display;
359
360 if (!xe->info.probe_display)
361 return;
362
363 intel_power_domains_disable(display);
364 drm_client_dev_suspend(dev: &xe->drm);
365
366 if (intel_display_device_present(display)) {
367 drm_kms_helper_poll_disable(dev: &xe->drm);
368 intel_display_driver_disable_user_access(display);
369 intel_display_driver_suspend(display);
370 }
371
372 xe_display_flush_cleanup_work(xe);
373 intel_dp_mst_suspend(display);
374 intel_encoder_block_all_hpds(display);
375 intel_hpd_cancel_work(display);
376
377 if (intel_display_device_present(display))
378 intel_display_driver_suspend_access(display);
379
380 intel_encoder_suspend_all(display);
381 intel_encoder_shutdown_all(display);
382
383 intel_opregion_suspend(display, PCI_D3cold);
384
385 intel_dmc_suspend(display);
386}
387
388void xe_display_pm_runtime_suspend(struct xe_device *xe)
389{
390 struct intel_display *display = xe->display;
391
392 if (!xe->info.probe_display)
393 return;
394
395 if (xe->d3cold.allowed) {
396 xe_display_enable_d3cold(xe);
397 return;
398 }
399
400 intel_hpd_poll_enable(display);
401}
402
403void xe_display_pm_suspend_late(struct xe_device *xe)
404{
405 struct intel_display *display = xe->display;
406 bool s2idle = suspend_to_idle();
407
408 if (!xe->info.probe_display)
409 return;
410
411 intel_display_power_suspend_late(display, s2idle);
412}
413
414void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
415{
416 struct intel_display *display = xe->display;
417
418 if (!xe->info.probe_display)
419 return;
420
421 if (xe->d3cold.allowed)
422 xe_display_pm_suspend_late(xe);
423
424 /*
425 * If xe_display_pm_suspend_late() is not called, it is likely
426 * that we will be on dynamic DC states with DMC wakelock enabled. We
427 * need to flush the release work in that case.
428 */
429 intel_dmc_wl_flush_release_work(display);
430}
431
432void xe_display_pm_shutdown_late(struct xe_device *xe)
433{
434 struct intel_display *display = xe->display;
435
436 if (!xe->info.probe_display)
437 return;
438
439 /*
440 * The only requirement is to reboot with display DC states disabled,
441 * for now leaving all display power wells in the INIT power domain
442 * enabled.
443 */
444 intel_power_domains_driver_remove(display);
445}
446
447void xe_display_pm_resume_early(struct xe_device *xe)
448{
449 struct intel_display *display = xe->display;
450
451 if (!xe->info.probe_display)
452 return;
453
454 intel_display_power_resume_early(display);
455}
456
457void xe_display_pm_resume(struct xe_device *xe)
458{
459 struct intel_display *display = xe->display;
460
461 if (!xe->info.probe_display)
462 return;
463
464 intel_dmc_resume(display);
465
466 if (intel_display_device_present(display))
467 drm_mode_config_reset(dev: &xe->drm);
468
469 intel_display_driver_init_hw(display);
470
471 if (intel_display_device_present(display))
472 intel_display_driver_resume_access(display);
473
474 intel_hpd_init(display);
475
476 intel_encoder_unblock_all_hpds(display);
477
478 if (intel_display_device_present(display)) {
479 intel_display_driver_resume(display);
480 drm_kms_helper_poll_enable(dev: &xe->drm);
481 intel_display_driver_enable_user_access(display);
482 }
483
484 if (intel_display_device_present(display))
485 intel_hpd_poll_disable(display);
486
487 intel_opregion_resume(display);
488
489 drm_client_dev_resume(dev: &xe->drm);
490
491 intel_power_domains_enable(display);
492}
493
494void xe_display_pm_runtime_resume(struct xe_device *xe)
495{
496 struct intel_display *display = xe->display;
497
498 if (!xe->info.probe_display)
499 return;
500
501 if (xe->d3cold.allowed) {
502 xe_display_disable_d3cold(xe);
503 return;
504 }
505
506 intel_hpd_init(display);
507 intel_hpd_poll_disable(display);
508 skl_watermark_ipc_update(display);
509}
510
511
512static void display_device_remove(struct drm_device *dev, void *arg)
513{
514 struct intel_display *display = arg;
515
516 intel_display_device_remove(display);
517}
518
519static const struct intel_display_parent_interface parent = {
520 .rpm = &xe_display_rpm_interface,
521};
522
523/**
524 * xe_display_probe - probe display and create display struct
525 * @xe: XE device instance
526 *
527 * Initialize all fields used by the display part.
528 *
529 * TODO: once everything can be inside a single struct, make the struct opaque
530 * to the rest of xe and return it to be xe->display.
531 *
532 * Returns: 0 on success
533 */
534int xe_display_probe(struct xe_device *xe)
535{
536 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
537 struct intel_display *display;
538 int err;
539
540 if (!xe->info.probe_display)
541 goto no_display;
542
543 display = intel_display_device_probe(pdev, &parent);
544 if (IS_ERR(ptr: display))
545 return PTR_ERR(ptr: display);
546
547 err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
548 if (err)
549 return err;
550
551 xe->display = display;
552
553 if (intel_display_device_present(display))
554 return 0;
555
556no_display:
557 xe->info.probe_display = false;
558 unset_display_features(xe);
559 return 0;
560}
561

source code of linux/drivers/gpu/drm/xe/display/xe_display.c