| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_SCHED_TOPOLOGY_H |
| 3 | #define _LINUX_SCHED_TOPOLOGY_H |
| 4 | |
| 5 | #include <linux/topology.h> |
| 6 | |
| 7 | #include <linux/sched/idle.h> |
| 8 | |
| 9 | /* |
| 10 | * sched-domains (multiprocessor balancing) declarations: |
| 11 | */ |
| 12 | |
| 13 | /* Generate SD flag indexes */ |
| 14 | #define SD_FLAG(name, mflags) __##name, |
| 15 | enum { |
| 16 | #include <linux/sched/sd_flags.h> |
| 17 | __SD_FLAG_CNT, |
| 18 | }; |
| 19 | #undef SD_FLAG |
| 20 | /* Generate SD flag bits */ |
| 21 | #define SD_FLAG(name, mflags) name = 1 << __##name, |
| 22 | enum { |
| 23 | #include <linux/sched/sd_flags.h> |
| 24 | }; |
| 25 | #undef SD_FLAG |
| 26 | |
| 27 | struct sd_flag_debug { |
| 28 | unsigned int meta_flags; |
| 29 | char *name; |
| 30 | }; |
| 31 | extern const struct sd_flag_debug sd_flag_debug[]; |
| 32 | |
| 33 | struct sched_domain_topology_level; |
| 34 | |
| 35 | #ifdef CONFIG_SCHED_SMT |
| 36 | extern int cpu_smt_flags(void); |
| 37 | extern const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu); |
| 38 | #endif |
| 39 | |
| 40 | #ifdef CONFIG_SCHED_CLUSTER |
| 41 | extern int cpu_cluster_flags(void); |
| 42 | extern const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu); |
| 43 | #endif |
| 44 | |
| 45 | #ifdef CONFIG_SCHED_MC |
| 46 | extern int cpu_core_flags(void); |
| 47 | extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu); |
| 48 | #endif |
| 49 | |
| 50 | extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu); |
| 51 | |
| 52 | extern int arch_asym_cpu_priority(int cpu); |
| 53 | |
| 54 | struct sched_domain_attr { |
| 55 | int relax_domain_level; |
| 56 | }; |
| 57 | |
| 58 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ |
| 59 | .relax_domain_level = -1, \ |
| 60 | } |
| 61 | |
| 62 | extern int sched_domain_level_max; |
| 63 | |
| 64 | struct sched_group; |
| 65 | |
| 66 | struct sched_domain_shared { |
| 67 | atomic_t ref; |
| 68 | atomic_t nr_busy_cpus; |
| 69 | int has_idle_cores; |
| 70 | int nr_idle_scan; |
| 71 | }; |
| 72 | |
| 73 | struct sched_domain { |
| 74 | /* These fields must be setup */ |
| 75 | struct sched_domain __rcu *parent; /* top domain must be null terminated */ |
| 76 | struct sched_domain __rcu *child; /* bottom domain must be null terminated */ |
| 77 | struct sched_group *groups; /* the balancing groups of the domain */ |
| 78 | unsigned long min_interval; /* Minimum balance interval ms */ |
| 79 | unsigned long max_interval; /* Maximum balance interval ms */ |
| 80 | unsigned int busy_factor; /* less balancing by factor if busy */ |
| 81 | unsigned int imbalance_pct; /* No balance until over watermark */ |
| 82 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
| 83 | unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */ |
| 84 | |
| 85 | int nohz_idle; /* NOHZ IDLE status */ |
| 86 | int flags; /* See SD_* */ |
| 87 | int level; |
| 88 | |
| 89 | /* Runtime fields. */ |
| 90 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
| 91 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
| 92 | unsigned int nr_balance_failed; /* initialise to 0 */ |
| 93 | |
| 94 | /* idle_balance() stats */ |
| 95 | unsigned int newidle_call; |
| 96 | unsigned int newidle_success; |
| 97 | unsigned int newidle_ratio; |
| 98 | u64 max_newidle_lb_cost; |
| 99 | unsigned long last_decay_max_lb_cost; |
| 100 | |
| 101 | #ifdef CONFIG_SCHEDSTATS |
| 102 | /* sched_balance_rq() stats */ |
| 103 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
| 104 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; |
| 105 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; |
| 106 | unsigned int lb_imbalance_load[CPU_MAX_IDLE_TYPES]; |
| 107 | unsigned int lb_imbalance_util[CPU_MAX_IDLE_TYPES]; |
| 108 | unsigned int lb_imbalance_task[CPU_MAX_IDLE_TYPES]; |
| 109 | unsigned int lb_imbalance_misfit[CPU_MAX_IDLE_TYPES]; |
| 110 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; |
| 111 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; |
| 112 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; |
| 113 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; |
| 114 | |
| 115 | /* Active load balancing */ |
| 116 | unsigned int alb_count; |
| 117 | unsigned int alb_failed; |
| 118 | unsigned int alb_pushed; |
| 119 | |
| 120 | /* SD_BALANCE_EXEC stats */ |
| 121 | unsigned int sbe_count; |
| 122 | unsigned int sbe_balanced; |
| 123 | unsigned int sbe_pushed; |
| 124 | |
| 125 | /* SD_BALANCE_FORK stats */ |
| 126 | unsigned int sbf_count; |
| 127 | unsigned int sbf_balanced; |
| 128 | unsigned int sbf_pushed; |
| 129 | |
| 130 | /* try_to_wake_up() stats */ |
| 131 | unsigned int ttwu_wake_remote; |
| 132 | unsigned int ttwu_move_affine; |
| 133 | unsigned int ttwu_move_balance; |
| 134 | #endif |
| 135 | char *name; |
| 136 | union { |
| 137 | void *private; /* used during construction */ |
| 138 | struct rcu_head rcu; /* used during destruction */ |
| 139 | }; |
| 140 | struct sched_domain_shared *shared; |
| 141 | |
| 142 | unsigned int span_weight; |
| 143 | /* |
| 144 | * Span of all CPUs in this domain. |
| 145 | * |
| 146 | * NOTE: this field is variable length. (Allocated dynamically |
| 147 | * by attaching extra space to the end of the structure, |
| 148 | * depending on how many CPUs the kernel has booted up with) |
| 149 | */ |
| 150 | unsigned long span[]; |
| 151 | }; |
| 152 | |
| 153 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
| 154 | { |
| 155 | return to_cpumask(sd->span); |
| 156 | } |
| 157 | |
| 158 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 159 | struct sched_domain_attr *dattr_new); |
| 160 | |
| 161 | /* Allocate an array of sched domains, for partition_sched_domains(). */ |
| 162 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); |
| 163 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); |
| 164 | |
| 165 | bool cpus_equal_capacity(int this_cpu, int that_cpu); |
| 166 | bool cpus_share_cache(int this_cpu, int that_cpu); |
| 167 | bool cpus_share_resources(int this_cpu, int that_cpu); |
| 168 | |
| 169 | typedef const struct cpumask *(*sched_domain_mask_f)(struct sched_domain_topology_level *tl, int cpu); |
| 170 | typedef int (*sched_domain_flags_f)(void); |
| 171 | |
| 172 | struct sd_data { |
| 173 | struct sched_domain *__percpu *sd; |
| 174 | struct sched_domain_shared *__percpu *sds; |
| 175 | struct sched_group *__percpu *sg; |
| 176 | struct sched_group_capacity *__percpu *sgc; |
| 177 | }; |
| 178 | |
| 179 | struct sched_domain_topology_level { |
| 180 | sched_domain_mask_f mask; |
| 181 | sched_domain_flags_f sd_flags; |
| 182 | int numa_level; |
| 183 | struct sd_data data; |
| 184 | char *name; |
| 185 | }; |
| 186 | |
| 187 | extern void __init set_sched_topology(struct sched_domain_topology_level *tl); |
| 188 | extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio); |
| 189 | |
| 190 | #define SDTL_INIT(maskfn, flagsfn, dname) ((struct sched_domain_topology_level) \ |
| 191 | { .mask = maskfn, .sd_flags = flagsfn, .name = #dname }) |
| 192 | |
| 193 | #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
| 194 | extern void rebuild_sched_domains_energy(void); |
| 195 | #else |
| 196 | static inline void rebuild_sched_domains_energy(void) |
| 197 | { |
| 198 | } |
| 199 | #endif |
| 200 | |
| 201 | #ifndef arch_scale_cpu_capacity |
| 202 | /** |
| 203 | * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. |
| 204 | * @cpu: the CPU in question. |
| 205 | * |
| 206 | * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e. |
| 207 | * |
| 208 | * max_perf(cpu) |
| 209 | * ----------------------------- * SCHED_CAPACITY_SCALE |
| 210 | * max(max_perf(c) : c \in CPUs) |
| 211 | */ |
| 212 | static __always_inline |
| 213 | unsigned long arch_scale_cpu_capacity(int cpu) |
| 214 | { |
| 215 | return SCHED_CAPACITY_SCALE; |
| 216 | } |
| 217 | #endif |
| 218 | |
| 219 | #ifndef arch_scale_hw_pressure |
| 220 | static __always_inline |
| 221 | unsigned long arch_scale_hw_pressure(int cpu) |
| 222 | { |
| 223 | return 0; |
| 224 | } |
| 225 | #endif |
| 226 | |
| 227 | #ifndef arch_update_hw_pressure |
| 228 | static __always_inline |
| 229 | void arch_update_hw_pressure(const struct cpumask *cpus, |
| 230 | unsigned long capped_frequency) |
| 231 | { } |
| 232 | #endif |
| 233 | |
| 234 | #ifndef arch_scale_freq_ref |
| 235 | static __always_inline |
| 236 | unsigned int arch_scale_freq_ref(int cpu) |
| 237 | { |
| 238 | return 0; |
| 239 | } |
| 240 | #endif |
| 241 | |
| 242 | static inline int task_node(const struct task_struct *p) |
| 243 | { |
| 244 | return cpu_to_node(cpu: task_cpu(p)); |
| 245 | } |
| 246 | |
| 247 | #endif /* _LINUX_SCHED_TOPOLOGY_H */ |
| 248 | |