| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_SCHED_CPUTIME_H |
| 3 | #define _LINUX_SCHED_CPUTIME_H |
| 4 | |
| 5 | #include <linux/sched/signal.h> |
| 6 | |
| 7 | /* |
| 8 | * cputime accounting APIs: |
| 9 | */ |
| 10 | |
| 11 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| 12 | extern bool task_cputime(struct task_struct *t, |
| 13 | u64 *utime, u64 *stime); |
| 14 | extern u64 task_gtime(struct task_struct *t); |
| 15 | #else |
| 16 | static inline bool task_cputime(struct task_struct *t, |
| 17 | u64 *utime, u64 *stime) |
| 18 | { |
| 19 | *utime = t->utime; |
| 20 | *stime = t->stime; |
| 21 | return false; |
| 22 | } |
| 23 | |
| 24 | static inline u64 task_gtime(struct task_struct *t) |
| 25 | { |
| 26 | return t->gtime; |
| 27 | } |
| 28 | #endif |
| 29 | |
| 30 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
| 31 | static inline void task_cputime_scaled(struct task_struct *t, |
| 32 | u64 *utimescaled, |
| 33 | u64 *stimescaled) |
| 34 | { |
| 35 | *utimescaled = t->utimescaled; |
| 36 | *stimescaled = t->stimescaled; |
| 37 | } |
| 38 | #else |
| 39 | static inline void task_cputime_scaled(struct task_struct *t, |
| 40 | u64 *utimescaled, |
| 41 | u64 *stimescaled) |
| 42 | { |
| 43 | task_cputime(t, utime: utimescaled, stime: stimescaled); |
| 44 | } |
| 45 | #endif |
| 46 | |
| 47 | extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); |
| 48 | extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); |
| 49 | extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, |
| 50 | u64 *ut, u64 *st); |
| 51 | |
| 52 | /* |
| 53 | * Thread group CPU time accounting. |
| 54 | */ |
| 55 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); |
| 56 | void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples); |
| 57 | |
| 58 | /* |
| 59 | * The following are functions that support scheduler-internal time accounting. |
| 60 | * These functions are generally called at the timer tick. None of this depends |
| 61 | * on CONFIG_SCHEDSTATS. |
| 62 | */ |
| 63 | |
| 64 | /** |
| 65 | * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active |
| 66 | * |
| 67 | * @tsk: Pointer to target task. |
| 68 | */ |
| 69 | #ifdef CONFIG_POSIX_TIMERS |
| 70 | static inline |
| 71 | struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) |
| 72 | { |
| 73 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
| 74 | |
| 75 | /* |
| 76 | * Check whether posix CPU timers are active. If not the thread |
| 77 | * group accounting is not active either. Lockless check. |
| 78 | */ |
| 79 | if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active)) |
| 80 | return NULL; |
| 81 | |
| 82 | /* |
| 83 | * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime |
| 84 | * in __exit_signal(), we won't account to the signal struct further |
| 85 | * cputime consumed by that task, even though the task can still be |
| 86 | * ticking after __exit_signal(). |
| 87 | * |
| 88 | * In order to keep a consistent behaviour between thread group cputime |
| 89 | * and thread group cputimer accounting, lets also ignore the cputime |
| 90 | * elapsing after __exit_signal() in any thread group timer running. |
| 91 | * |
| 92 | * This makes sure that POSIX CPU clocks and timers are synchronized, so |
| 93 | * that a POSIX CPU timer won't expire while the corresponding POSIX CPU |
| 94 | * clock delta is behind the expiring timer value. |
| 95 | */ |
| 96 | if (unlikely(!tsk->sighand)) |
| 97 | return NULL; |
| 98 | |
| 99 | return cputimer; |
| 100 | } |
| 101 | #else |
| 102 | static inline |
| 103 | struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) |
| 104 | { |
| 105 | return NULL; |
| 106 | } |
| 107 | #endif |
| 108 | |
| 109 | /** |
| 110 | * account_group_user_time - Maintain utime for a thread group. |
| 111 | * |
| 112 | * @tsk: Pointer to task structure. |
| 113 | * @cputime: Time value by which to increment the utime field of the |
| 114 | * thread_group_cputime structure. |
| 115 | * |
| 116 | * If thread group time is being maintained, get the structure for the |
| 117 | * running CPU and update the utime field there. |
| 118 | */ |
| 119 | static inline void account_group_user_time(struct task_struct *tsk, |
| 120 | u64 cputime) |
| 121 | { |
| 122 | struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); |
| 123 | |
| 124 | if (!cputimer) |
| 125 | return; |
| 126 | |
| 127 | atomic64_add(i: cputime, v: &cputimer->cputime_atomic.utime); |
| 128 | } |
| 129 | |
| 130 | /** |
| 131 | * account_group_system_time - Maintain stime for a thread group. |
| 132 | * |
| 133 | * @tsk: Pointer to task structure. |
| 134 | * @cputime: Time value by which to increment the stime field of the |
| 135 | * thread_group_cputime structure. |
| 136 | * |
| 137 | * If thread group time is being maintained, get the structure for the |
| 138 | * running CPU and update the stime field there. |
| 139 | */ |
| 140 | static inline void account_group_system_time(struct task_struct *tsk, |
| 141 | u64 cputime) |
| 142 | { |
| 143 | struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); |
| 144 | |
| 145 | if (!cputimer) |
| 146 | return; |
| 147 | |
| 148 | atomic64_add(i: cputime, v: &cputimer->cputime_atomic.stime); |
| 149 | } |
| 150 | |
| 151 | /** |
| 152 | * account_group_exec_runtime - Maintain exec runtime for a thread group. |
| 153 | * |
| 154 | * @tsk: Pointer to task structure. |
| 155 | * @ns: Time value by which to increment the sum_exec_runtime field |
| 156 | * of the thread_group_cputime structure. |
| 157 | * |
| 158 | * If thread group time is being maintained, get the structure for the |
| 159 | * running CPU and update the sum_exec_runtime field there. |
| 160 | */ |
| 161 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
| 162 | unsigned long long ns) |
| 163 | { |
| 164 | struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); |
| 165 | |
| 166 | if (!cputimer) |
| 167 | return; |
| 168 | |
| 169 | atomic64_add(i: ns, v: &cputimer->cputime_atomic.sum_exec_runtime); |
| 170 | } |
| 171 | |
| 172 | static inline void prev_cputime_init(struct prev_cputime *prev) |
| 173 | { |
| 174 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
| 175 | prev->utime = prev->stime = 0; |
| 176 | raw_spin_lock_init(&prev->lock); |
| 177 | #endif |
| 178 | } |
| 179 | |
| 180 | extern unsigned long long |
| 181 | task_sched_runtime(struct task_struct *task); |
| 182 | |
| 183 | #endif /* _LINUX_SCHED_CPUTIME_H */ |
| 184 | |