1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8#ifndef __MSM_DRV_H__
9#define __MSM_DRV_H__
10
11#include <linux/kernel.h>
12#include <linux/clk.h>
13#include <linux/cpufreq.h>
14#include <linux/devfreq.h>
15#include <linux/module.h>
16#include <linux/component.h>
17#include <linux/platform_device.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/slab.h>
21#include <linux/list.h>
22#include <linux/iommu.h>
23#include <linux/types.h>
24#include <linux/of_graph.h>
25#include <linux/of_device.h>
26#include <linux/sizes.h>
27#include <linux/kthread.h>
28
29#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_helper.h>
31#include <drm/drm_print.h>
32#include <drm/drm_probe_helper.h>
33#include <drm/display/drm_dsc.h>
34#include <drm/msm_drm.h>
35#include <drm/drm_gem.h>
36
37extern struct fault_attr fail_gem_alloc;
38extern struct fault_attr fail_gem_iova;
39
40struct drm_fb_helper;
41struct drm_fb_helper_surface_size;
42
43struct msm_kms;
44struct msm_gpu;
45struct msm_mmu;
46struct msm_mdss;
47struct msm_rd_state;
48struct msm_perf_state;
49struct msm_gem_submit;
50struct msm_fence_context;
51struct msm_disp_state;
52
53#define MAX_CRTCS 8
54
55#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
56
57enum msm_dp_controller {
58 MSM_DP_CONTROLLER_0,
59 MSM_DP_CONTROLLER_1,
60 MSM_DP_CONTROLLER_2,
61 MSM_DP_CONTROLLER_3,
62 MSM_DP_CONTROLLER_COUNT,
63};
64
65enum msm_dsi_controller {
66 MSM_DSI_CONTROLLER_0,
67 MSM_DSI_CONTROLLER_1,
68 MSM_DSI_CONTROLLER_COUNT,
69};
70
71#define MSM_GPU_MAX_RINGS 4
72
73struct msm_drm_private {
74
75 struct drm_device *dev;
76
77 struct msm_kms *kms;
78 int (*kms_init)(struct drm_device *dev);
79
80 /* subordinate devices, if present: */
81 struct platform_device *gpu_pdev;
82
83 /* when we have more than one 'msm_gpu' these need to be an array: */
84 struct msm_gpu *gpu;
85
86 /* gpu is only set on open(), but we need this info earlier */
87 bool is_a2xx;
88 bool has_cached_coherent;
89
90 struct msm_rd_state *rd; /* debugfs to dump all submits */
91 struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
92 struct msm_perf_state *perf;
93
94 /**
95 * total_mem: Total/global amount of memory backing GEM objects.
96 */
97 atomic64_t total_mem;
98
99 /**
100 * List of all GEM objects (mainly for debugfs, protected by obj_lock
101 * (acquire before per GEM object lock)
102 */
103 struct list_head objects;
104 struct mutex obj_lock;
105
106 /**
107 * lru:
108 *
109 * The various LRU's that a GEM object is in at various stages of
110 * it's lifetime. Objects start out in the unbacked LRU. When
111 * pinned (for scannout or permanently mapped GPU buffers, like
112 * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
113 * unpinned, it moves into willneed or dontneed LRU depending on
114 * madvise state. When backing pages are evicted (willneed) or
115 * purged (dontneed) it moves back into the unbacked LRU.
116 *
117 * The dontneed LRU is considered by the shrinker for objects
118 * that are candidate for purging, and the willneed LRU is
119 * considered for objects that could be evicted.
120 */
121 struct {
122 /**
123 * unbacked:
124 *
125 * The LRU for GEM objects without backing pages allocated.
126 * This mostly exists so that objects are always is one
127 * LRU.
128 */
129 struct drm_gem_lru unbacked;
130
131 /**
132 * pinned:
133 *
134 * The LRU for pinned GEM objects
135 */
136 struct drm_gem_lru pinned;
137
138 /**
139 * willneed:
140 *
141 * The LRU for unpinned GEM objects which are in madvise
142 * WILLNEED state (ie. can be evicted)
143 */
144 struct drm_gem_lru willneed;
145
146 /**
147 * dontneed:
148 *
149 * The LRU for unpinned GEM objects which are in madvise
150 * DONTNEED state (ie. can be purged)
151 */
152 struct drm_gem_lru dontneed;
153
154 /**
155 * lock:
156 *
157 * Protects manipulation of all of the LRUs.
158 */
159 struct mutex lock;
160 } lru;
161
162 struct notifier_block vmap_notifier;
163 struct shrinker *shrinker;
164
165 /**
166 * hangcheck_period: For hang detection, in ms
167 *
168 * Note that in practice, a submit/job will get at least two hangcheck
169 * periods, due to checking for progress being implemented as simply
170 * "have the CP position registers changed since last time?"
171 */
172 unsigned int hangcheck_period;
173
174 /** gpu_devfreq_config: Devfreq tuning config for the GPU. */
175 struct devfreq_simple_ondemand_data gpu_devfreq_config;
176
177 /**
178 * gpu_clamp_to_idle: Enable clamping to idle freq when inactive
179 */
180 bool gpu_clamp_to_idle;
181
182 /**
183 * disable_err_irq:
184 *
185 * Disable handling of GPU hw error interrupts, to force fallback to
186 * sw hangcheck timer. Written (via debugfs) by igt tests to test
187 * the sw hangcheck mechanism.
188 */
189 bool disable_err_irq;
190
191 /**
192 * @fault_stall_lock:
193 *
194 * Serialize changes to stall-on-fault state.
195 */
196 spinlock_t fault_stall_lock;
197
198 /**
199 * @fault_stall_reenable_time:
200 *
201 * If stall_enabled is false, when to reenable stall-on-fault.
202 * Protected by @fault_stall_lock.
203 */
204 ktime_t stall_reenable_time;
205
206 /**
207 * @stall_enabled:
208 *
209 * Whether stall-on-fault is currently enabled. Protected by
210 * @fault_stall_lock.
211 */
212 bool stall_enabled;
213};
214
215const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
216
217struct msm_pending_timer;
218
219int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
220 struct msm_kms *kms, int crtc_idx);
221void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
222void msm_atomic_commit_tail(struct drm_atomic_state *state);
223int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
224struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
225
226int msm_crtc_enable_vblank(struct drm_crtc *crtc);
227void msm_crtc_disable_vblank(struct drm_crtc *crtc);
228
229int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
230void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
231
232struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev);
233bool msm_use_mmu(struct drm_device *dev);
234
235int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
236 struct drm_file *file);
237int msm_ioctl_vm_bind(struct drm_device *dev, void *data,
238 struct drm_file *file);
239
240#ifdef CONFIG_DEBUG_FS
241unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
242#endif
243
244int msm_gem_shrinker_init(struct drm_device *dev);
245void msm_gem_shrinker_cleanup(struct drm_device *dev);
246
247struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
248int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
249void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
250struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, struct dma_buf *buf);
251struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
252 struct dma_buf_attachment *attach, struct sg_table *sg);
253struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
254int msm_gem_prime_pin(struct drm_gem_object *obj);
255void msm_gem_prime_unpin(struct drm_gem_object *obj);
256
257int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb);
258void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb);
259uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane);
260struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
261const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
262struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
263 struct drm_file *file, const struct drm_format_info *info,
264 const struct drm_mode_fb_cmd2 *mode_cmd);
265struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
266 int w, int h, int p, uint32_t format);
267
268#ifdef CONFIG_DRM_MSM_KMS_FBDEV
269int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
270 struct drm_fb_helper_surface_size *sizes);
271#define MSM_FBDEV_DRIVER_OPS \
272 .fbdev_probe = msm_fbdev_driver_fbdev_probe
273#else
274#define MSM_FBDEV_DRIVER_OPS \
275 .fbdev_probe = NULL
276#endif
277
278struct hdmi;
279#ifdef CONFIG_DRM_MSM_HDMI
280int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
281 struct drm_encoder *encoder);
282void __init msm_hdmi_register(void);
283void __exit msm_hdmi_unregister(void);
284#else
285static inline int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
286 struct drm_encoder *encoder)
287{
288 return -EINVAL;
289}
290static inline void __init msm_hdmi_register(void) {}
291static inline void __exit msm_hdmi_unregister(void) {}
292#endif
293
294struct msm_dsi;
295#ifdef CONFIG_DRM_MSM_DSI
296int dsi_dev_attach(struct platform_device *pdev);
297void dsi_dev_detach(struct platform_device *pdev);
298void __init msm_dsi_register(void);
299void __exit msm_dsi_unregister(void);
300int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
301 struct drm_encoder *encoder);
302void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
303bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
304bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
305bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
306bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi);
307struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
308const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi);
309#else
310static inline void __init msm_dsi_register(void)
311{
312}
313static inline void __exit msm_dsi_unregister(void)
314{
315}
316static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
317 struct drm_device *dev,
318 struct drm_encoder *encoder)
319{
320 return -EINVAL;
321}
322static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
323{
324}
325static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
326{
327 return false;
328}
329static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
330{
331 return false;
332}
333static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
334{
335 return false;
336}
337static inline bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi)
338{
339 return false;
340}
341
342static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
343{
344 return NULL;
345}
346
347static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
348{
349 return NULL;
350}
351#endif
352
353struct msm_dp;
354#ifdef CONFIG_DRM_MSM_DP
355int __init msm_dp_register(void);
356void __exit msm_dp_unregister(void);
357int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
358 struct drm_encoder *encoder, bool yuv_supported);
359void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
360bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
361 const struct drm_display_mode *mode);
362bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
363 const struct drm_display_mode *mode);
364bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
365
366#else
367static inline int __init msm_dp_register(void)
368{
369 return -EINVAL;
370}
371static inline void __exit msm_dp_unregister(void)
372{
373}
374static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
375 struct drm_device *dev,
376 struct drm_encoder *encoder,
377 bool yuv_supported)
378{
379 return -EINVAL;
380}
381
382static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
383{
384}
385
386static inline bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
387 const struct drm_display_mode *mode)
388{
389 return false;
390}
391
392static inline bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
393 const struct drm_display_mode *mode)
394{
395 return false;
396}
397
398static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
399{
400 return false;
401}
402
403#endif
404
405#ifdef CONFIG_DRM_MSM_MDP4
406void msm_mdp4_register(void);
407void msm_mdp4_unregister(void);
408#else
409static inline void msm_mdp4_register(void) {}
410static inline void msm_mdp4_unregister(void) {}
411#endif
412
413#ifdef CONFIG_DRM_MSM_MDP5
414void msm_mdp_register(void);
415void msm_mdp_unregister(void);
416#else
417static inline void msm_mdp_register(void) {}
418static inline void msm_mdp_unregister(void) {}
419#endif
420
421#ifdef CONFIG_DRM_MSM_DPU
422void msm_dpu_register(void);
423void msm_dpu_unregister(void);
424#else
425static inline void msm_dpu_register(void) {}
426static inline void msm_dpu_unregister(void) {}
427#endif
428
429#ifdef CONFIG_DRM_MSM_MDSS
430void msm_mdss_register(void);
431void msm_mdss_unregister(void);
432#else
433static inline void msm_mdss_register(void) {}
434static inline void msm_mdss_unregister(void) {}
435#endif
436
437#ifdef CONFIG_DEBUG_FS
438void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
439int msm_debugfs_late_init(struct drm_device *dev);
440int msm_rd_debugfs_init(struct drm_minor *minor);
441void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
442__printf(3, 4)
443void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
444 const char *fmt, ...);
445int msm_perf_debugfs_init(struct drm_minor *minor);
446void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
447#else
448static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
449__printf(3, 4)
450static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
451 struct msm_gem_submit *submit,
452 const char *fmt, ...) {}
453static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
454static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
455#endif
456
457struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
458
459struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
460 const char *name);
461void __iomem *msm_ioremap(struct platform_device *pdev, const char *name);
462void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
463 phys_addr_t *size);
464void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
465void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev,
466 struct platform_device *dev,
467 const char *name);
468
469struct icc_path *msm_icc_get(struct device *dev, const char *name);
470
471static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or)
472{
473 u32 val = readl(addr);
474
475 val &= ~mask;
476 writel(val: val | or, addr);
477}
478
479/**
480 * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
481 *
482 * @timer: hrtimer to control when the kthread work is triggered
483 * @work: the kthread work
484 * @worker: the kthread worker the work will be scheduled on
485 */
486struct msm_hrtimer_work {
487 struct hrtimer timer;
488 struct kthread_work work;
489 struct kthread_worker *worker;
490};
491
492void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
493 ktime_t wakeup_time,
494 enum hrtimer_mode mode);
495void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
496 struct kthread_worker *worker,
497 kthread_work_func_t fn,
498 clockid_t clock_id,
499 enum hrtimer_mode mode);
500
501/* Helper for returning a UABI error with optional logging which can make
502 * it easier for userspace to understand what it is doing wrong.
503 */
504#define UERR(err, drm, fmt, ...) \
505 ({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
506
507#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
508#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
509
510static inline int align_pitch(int width, int bpp)
511{
512 int bytespp = (bpp + 7) / 8;
513 /* adreno needs pitch aligned to 32 pixels: */
514 return bytespp * ALIGN(width, 32);
515}
516
517/* for the generated headers: */
518#define INVALID_IDX(idx) ({BUG(); 0;})
519#define fui(x) ({BUG(); 0;})
520#define _mesa_float_to_half(x) ({BUG(); 0;})
521
522
523#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
524
525/* for conditionally setting boolean flag(s): */
526#define COND(bool, val) ((bool) ? (val) : 0)
527
528static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
529{
530 ktime_t now = ktime_get();
531
532 if (ktime_compare(cmp1: *timeout, cmp2: now) <= 0)
533 return 0;
534
535 ktime_t rem = ktime_sub(*timeout, now);
536 s64 remaining_jiffies = ktime_divns(kt: rem, NSEC_PER_SEC / HZ);
537 return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
538}
539
540/* Driver helpers */
541
542extern const struct component_master_ops msm_drm_ops;
543
544int msm_kms_pm_prepare(struct device *dev);
545void msm_kms_pm_complete(struct device *dev);
546
547int msm_gpu_probe(struct platform_device *pdev,
548 const struct component_ops *ops);
549void msm_gpu_remove(struct platform_device *pdev,
550 const struct component_ops *ops);
551int msm_drv_probe(struct device *dev,
552 int (*kms_init)(struct drm_device *dev),
553 struct msm_kms *kms);
554void msm_kms_shutdown(struct platform_device *pdev);
555
556bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver);
557
558bool msm_gpu_no_components(void);
559
560#endif /* __MSM_DRV_H__ */
561

source code of linux/drivers/gpu/drm/msm/msm_drv.h