| 1 | /* SPDX-License-Identifier: MIT */ |
| 2 | /* |
| 3 | * Copyright © 2019 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #ifndef __INTEL_RUNTIME_PM_H__ |
| 7 | #define __INTEL_RUNTIME_PM_H__ |
| 8 | |
| 9 | #include <linux/pm_runtime.h> |
| 10 | #include <linux/types.h> |
| 11 | |
| 12 | #include "intel_wakeref.h" |
| 13 | |
| 14 | struct device; |
| 15 | struct drm_i915_private; |
| 16 | struct drm_printer; |
| 17 | struct intel_display_rpm_interface; |
| 18 | |
| 19 | /* |
| 20 | * This struct helps tracking the state needed for runtime PM, which puts the |
| 21 | * device in PCI D3 state. Notice that when this happens, nothing on the |
| 22 | * graphics device works, even register access, so we don't get interrupts nor |
| 23 | * anything else. |
| 24 | * |
| 25 | * Every piece of our code that needs to actually touch the hardware needs to |
| 26 | * either call intel_runtime_pm_get or call intel_display_power_get with the |
| 27 | * appropriate power domain. |
| 28 | * |
| 29 | * Our driver uses the autosuspend delay feature, which means we'll only really |
| 30 | * suspend if we stay with zero refcount for a certain amount of time. The |
| 31 | * default value is currently very conservative (see intel_runtime_pm_enable), but |
| 32 | * it can be changed with the standard runtime PM files from sysfs. |
| 33 | * |
| 34 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and |
| 35 | * goes back to false exactly before we re-enable the IRQs. We use this variable |
| 36 | * to check if someone is trying to enable/disable IRQs while they're supposed |
| 37 | * to be disabled. This shouldn't happen and we'll print some error messages in |
| 38 | * case it happens. |
| 39 | * |
| 40 | * For more, read the Documentation/power/runtime_pm.rst. |
| 41 | */ |
| 42 | struct intel_runtime_pm { |
| 43 | atomic_t wakeref_count; |
| 44 | struct device *kdev; /* points to i915->drm.dev */ |
| 45 | bool available; |
| 46 | bool no_wakeref_tracking; |
| 47 | |
| 48 | /* |
| 49 | * Protects access to lmem usefault list. |
| 50 | * It is required, if we are outside of the runtime suspend path, |
| 51 | * access to @lmem_userfault_list requires always first grabbing the |
| 52 | * runtime pm, to ensure we can't race against runtime suspend. |
| 53 | * Once we have that we also need to grab @lmem_userfault_lock, |
| 54 | * at which point we have exclusive access. |
| 55 | * The runtime suspend path is special since it doesn't really hold any locks, |
| 56 | * but instead has exclusive access by virtue of all other accesses requiring |
| 57 | * holding the runtime pm wakeref. |
| 58 | */ |
| 59 | spinlock_t lmem_userfault_lock; |
| 60 | |
| 61 | /* |
| 62 | * Keep list of userfaulted gem obj, which require to release their |
| 63 | * mmap mappings at runtime suspend path. |
| 64 | */ |
| 65 | struct list_head lmem_userfault_list; |
| 66 | |
| 67 | /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */ |
| 68 | struct intel_wakeref_auto userfault_wakeref; |
| 69 | |
| 70 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
| 71 | /* |
| 72 | * To aide detection of wakeref leaks and general misuse, we |
| 73 | * track all wakeref holders. With manual markup (i.e. returning |
| 74 | * a cookie to each rpm_get caller which they then supply to their |
| 75 | * paired rpm_put) we can remove corresponding pairs of and keep |
| 76 | * the array trimmed to active wakerefs. |
| 77 | */ |
| 78 | struct ref_tracker_dir debug; |
| 79 | #endif |
| 80 | }; |
| 81 | |
| 82 | #define BITS_PER_WAKEREF \ |
| 83 | BITS_PER_TYPE(typeof_member(struct intel_runtime_pm, wakeref_count)) |
| 84 | #define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2) |
| 85 | #define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT) |
| 86 | #define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1) |
| 87 | |
| 88 | static inline int |
| 89 | intel_rpm_raw_wakeref_count(int wakeref_count) |
| 90 | { |
| 91 | return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK; |
| 92 | } |
| 93 | |
| 94 | static inline int |
| 95 | intel_rpm_wakelock_count(int wakeref_count) |
| 96 | { |
| 97 | return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT; |
| 98 | } |
| 99 | |
| 100 | static inline bool |
| 101 | intel_runtime_pm_suspended(struct intel_runtime_pm *rpm) |
| 102 | { |
| 103 | return pm_runtime_suspended(dev: rpm->kdev); |
| 104 | } |
| 105 | |
| 106 | static inline void |
| 107 | assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm) |
| 108 | { |
| 109 | WARN_ONCE(intel_runtime_pm_suspended(rpm), |
| 110 | "Device suspended during HW access\n" ); |
| 111 | } |
| 112 | |
| 113 | static inline void |
| 114 | __assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count) |
| 115 | { |
| 116 | assert_rpm_device_not_suspended(rpm); |
| 117 | WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count), |
| 118 | "RPM raw-wakeref not held\n" ); |
| 119 | } |
| 120 | |
| 121 | static inline void |
| 122 | __assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count) |
| 123 | { |
| 124 | __assert_rpm_raw_wakeref_held(rpm, wakeref_count); |
| 125 | WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count), |
| 126 | "RPM wakelock ref not held during HW access\n" ); |
| 127 | } |
| 128 | |
| 129 | static inline void |
| 130 | assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm) |
| 131 | { |
| 132 | __assert_rpm_raw_wakeref_held(rpm, wakeref_count: atomic_read(v: &rpm->wakeref_count)); |
| 133 | } |
| 134 | |
| 135 | static inline void |
| 136 | assert_rpm_wakelock_held(struct intel_runtime_pm *rpm) |
| 137 | { |
| 138 | __assert_rpm_wakelock_held(rpm, wakeref_count: atomic_read(v: &rpm->wakeref_count)); |
| 139 | } |
| 140 | |
| 141 | /** |
| 142 | * disable_rpm_wakeref_asserts - disable the RPM assert checks |
| 143 | * @rpm: the intel_runtime_pm structure |
| 144 | * |
| 145 | * This function disable asserts that check if we hold an RPM wakelock |
| 146 | * reference, while keeping the device-not-suspended checks still enabled. |
| 147 | * It's meant to be used only in special circumstances where our rule about |
| 148 | * the wakelock refcount wrt. the device power state doesn't hold. According |
| 149 | * to this rule at any point where we access the HW or want to keep the HW in |
| 150 | * an active state we must hold an RPM wakelock reference acquired via one of |
| 151 | * the intel_runtime_pm_get() helpers. Currently there are a few special spots |
| 152 | * where this rule doesn't hold: the IRQ and suspend/resume handlers, the |
| 153 | * forcewake release timer, and the GPU RPS and hangcheck works. All other |
| 154 | * users should avoid using this function. |
| 155 | * |
| 156 | * Any calls to this function must have a symmetric call to |
| 157 | * enable_rpm_wakeref_asserts(). |
| 158 | */ |
| 159 | static inline void |
| 160 | disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) |
| 161 | { |
| 162 | atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1, |
| 163 | v: &rpm->wakeref_count); |
| 164 | } |
| 165 | |
| 166 | /** |
| 167 | * enable_rpm_wakeref_asserts - re-enable the RPM assert checks |
| 168 | * @rpm: the intel_runtime_pm structure |
| 169 | * |
| 170 | * This function re-enables the RPM assert checks after disabling them with |
| 171 | * disable_rpm_wakeref_asserts. It's meant to be used only in special |
| 172 | * circumstances otherwise its use should be avoided. |
| 173 | * |
| 174 | * Any calls to this function must have a symmetric call to |
| 175 | * disable_rpm_wakeref_asserts(). |
| 176 | */ |
| 177 | static inline void |
| 178 | enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) |
| 179 | { |
| 180 | atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1, |
| 181 | v: &rpm->wakeref_count); |
| 182 | } |
| 183 | |
| 184 | void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm); |
| 185 | void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); |
| 186 | void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); |
| 187 | void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); |
| 188 | void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm); |
| 189 | |
| 190 | intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); |
| 191 | intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); |
| 192 | intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm); |
| 193 | intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); |
| 194 | intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); |
| 195 | |
| 196 | #define with_intel_runtime_pm(rpm, wf) \ |
| 197 | for ((wf) = intel_runtime_pm_get(rpm); (wf); \ |
| 198 | intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) |
| 199 | |
| 200 | #define with_intel_runtime_pm_if_in_use(rpm, wf) \ |
| 201 | for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ |
| 202 | intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) |
| 203 | |
| 204 | #define with_intel_runtime_pm_if_active(rpm, wf) \ |
| 205 | for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \ |
| 206 | intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) |
| 207 | |
| 208 | void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); |
| 209 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
| 210 | void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref); |
| 211 | #else |
| 212 | static inline void |
| 213 | intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) |
| 214 | { |
| 215 | intel_runtime_pm_put_unchecked(rpm); |
| 216 | } |
| 217 | #endif |
| 218 | void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref); |
| 219 | |
| 220 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
| 221 | void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
| 222 | struct drm_printer *p); |
| 223 | #else |
| 224 | static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
| 225 | struct drm_printer *p) |
| 226 | { |
| 227 | } |
| 228 | #endif |
| 229 | |
| 230 | extern const struct intel_display_rpm_interface i915_display_rpm_interface; |
| 231 | |
| 232 | #endif /* __INTEL_RUNTIME_PM_H__ */ |
| 233 | |