| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_SCHED_IDLE_H |
| 3 | #define _LINUX_SCHED_IDLE_H |
| 4 | |
| 5 | #include <linux/sched.h> |
| 6 | |
| 7 | enum cpu_idle_type { |
| 8 | __CPU_NOT_IDLE = 0, |
| 9 | CPU_IDLE, |
| 10 | CPU_NEWLY_IDLE, |
| 11 | CPU_MAX_IDLE_TYPES |
| 12 | }; |
| 13 | |
| 14 | extern void wake_up_if_idle(int cpu); |
| 15 | |
| 16 | /* |
| 17 | * Idle thread specific functions to determine the need_resched |
| 18 | * polling state. |
| 19 | */ |
| 20 | #ifdef TIF_POLLING_NRFLAG |
| 21 | |
| 22 | #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H |
| 23 | |
| 24 | static __always_inline void __current_set_polling(void) |
| 25 | { |
| 26 | arch_set_bit(TIF_POLLING_NRFLAG, |
| 27 | addr: (unsigned long *)(¤t_thread_info()->flags)); |
| 28 | } |
| 29 | |
| 30 | static __always_inline void __current_clr_polling(void) |
| 31 | { |
| 32 | arch_clear_bit(TIF_POLLING_NRFLAG, |
| 33 | addr: (unsigned long *)(¤t_thread_info()->flags)); |
| 34 | } |
| 35 | |
| 36 | #else |
| 37 | |
| 38 | static __always_inline void __current_set_polling(void) |
| 39 | { |
| 40 | set_bit(TIF_POLLING_NRFLAG, |
| 41 | (unsigned long *)(¤t_thread_info()->flags)); |
| 42 | } |
| 43 | |
| 44 | static __always_inline void __current_clr_polling(void) |
| 45 | { |
| 46 | clear_bit(TIF_POLLING_NRFLAG, |
| 47 | (unsigned long *)(¤t_thread_info()->flags)); |
| 48 | } |
| 49 | |
| 50 | #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */ |
| 51 | |
| 52 | static __always_inline bool __must_check current_set_polling_and_test(void) |
| 53 | { |
| 54 | __current_set_polling(); |
| 55 | |
| 56 | /* |
| 57 | * Polling state must be visible before we test NEED_RESCHED, |
| 58 | * paired by resched_curr() |
| 59 | */ |
| 60 | smp_mb__after_atomic(); |
| 61 | |
| 62 | return unlikely(tif_need_resched()); |
| 63 | } |
| 64 | |
| 65 | static __always_inline bool __must_check current_clr_polling_and_test(void) |
| 66 | { |
| 67 | __current_clr_polling(); |
| 68 | |
| 69 | /* |
| 70 | * Polling state must be visible before we test NEED_RESCHED, |
| 71 | * paired by resched_curr() |
| 72 | */ |
| 73 | smp_mb__after_atomic(); |
| 74 | |
| 75 | return unlikely(tif_need_resched()); |
| 76 | } |
| 77 | |
| 78 | static __always_inline void current_clr_polling(void) |
| 79 | { |
| 80 | __current_clr_polling(); |
| 81 | |
| 82 | /* |
| 83 | * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. |
| 84 | * Once the bit is cleared, we'll get IPIs with every new |
| 85 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also |
| 86 | * fold. |
| 87 | */ |
| 88 | smp_mb__after_atomic(); /* paired with resched_curr() */ |
| 89 | |
| 90 | preempt_fold_need_resched(); |
| 91 | } |
| 92 | |
| 93 | #else |
| 94 | static inline void __current_set_polling(void) { } |
| 95 | static inline void __current_clr_polling(void) { } |
| 96 | |
| 97 | static inline bool __must_check current_set_polling_and_test(void) |
| 98 | { |
| 99 | return unlikely(tif_need_resched()); |
| 100 | } |
| 101 | static inline bool __must_check current_clr_polling_and_test(void) |
| 102 | { |
| 103 | return unlikely(tif_need_resched()); |
| 104 | } |
| 105 | |
| 106 | static __always_inline void current_clr_polling(void) |
| 107 | { |
| 108 | __current_clr_polling(); |
| 109 | |
| 110 | smp_mb(); /* paired with resched_curr() */ |
| 111 | |
| 112 | preempt_fold_need_resched(); |
| 113 | } |
| 114 | #endif |
| 115 | |
| 116 | #endif /* _LINUX_SCHED_IDLE_H */ |
| 117 | |