1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Infrastructure for migratable timers
4 *
5 * Copyright(C) 2022 linutronix GmbH
6 */
7#include <linux/cpuhotplug.h>
8#include <linux/slab.h>
9#include <linux/smp.h>
10#include <linux/spinlock.h>
11#include <linux/timerqueue.h>
12#include <trace/events/ipi.h>
13#include <linux/sched/isolation.h>
14
15#include "timer_migration.h"
16#include "tick-internal.h"
17
18#define CREATE_TRACE_POINTS
19#include <trace/events/timer_migration.h>
20
21/*
22 * The timer migration mechanism is built on a hierarchy of groups. The
23 * lowest level group contains CPUs, the next level groups of CPU groups
24 * and so forth. The CPU groups are kept per node so for the normal case
25 * lock contention won't happen across nodes. Depending on the number of
26 * CPUs per node even the next level might be kept as groups of CPU groups
27 * per node and only the levels above cross the node topology.
28 *
29 * Example topology for a two node system with 24 CPUs each.
30 *
31 * LVL 2 [GRP2:0]
32 * GRP1:0 = GRP1:M
33 *
34 * LVL 1 [GRP1:0] [GRP1:1]
35 * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5
36 *
37 * LVL 0 [GRP0:0] [GRP0:1] [GRP0:2] [GRP0:3] [GRP0:4] [GRP0:5]
38 * CPUS 0-7 8-15 16-23 24-31 32-39 40-47
39 *
40 * The groups hold a timer queue of events sorted by expiry time. These
41 * queues are updated when CPUs go in idle. When they come out of idle
42 * ignore flag of events is set.
43 *
44 * Each group has a designated migrator CPU/group as long as a CPU/group is
45 * active in the group. This designated role is necessary to avoid that all
46 * active CPUs in a group try to migrate expired timers from other CPUs,
47 * which would result in massive lock bouncing.
48 *
49 * When a CPU is awake, it checks in it's own timer tick the group
50 * hierarchy up to the point where it is assigned the migrator role or if
51 * no CPU is active, it also checks the groups where no migrator is set
52 * (TMIGR_NONE).
53 *
54 * If it finds expired timers in one of the group queues it pulls them over
55 * from the idle CPU and runs the timer function. After that it updates the
56 * group and the parent groups if required.
57 *
58 * CPUs which go idle arm their CPU local timer hardware for the next local
59 * (pinned) timer event. If the next migratable timer expires after the
60 * next local timer or the CPU has no migratable timer pending then the
61 * CPU does not queue an event in the LVL0 group. If the next migratable
62 * timer expires before the next local timer then the CPU queues that timer
63 * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0
64 * group.
65 *
66 * When CPU comes out of idle and when a group has at least a single active
67 * child, the ignore flag of the tmigr_event is set. This indicates, that
68 * the event is ignored even if it is still enqueued in the parent groups
69 * timer queue. It will be removed when touching the timer queue the next
70 * time. This spares locking in active path as the lock protects (after
71 * setup) only event information. For more information about locking,
72 * please read the section "Locking rules".
73 *
74 * If the CPU is the migrator of the group then it delegates that role to
75 * the next active CPU in the group or sets migrator to TMIGR_NONE when
76 * there is no active CPU in the group. This delegation needs to be
77 * propagated up the hierarchy so hand over from other leaves can happen at
78 * all hierarchy levels w/o doing a search.
79 *
80 * When the last CPU in the system goes idle, then it drops all migrator
81 * duties up to the top level of the hierarchy (LVL2 in the example). It
82 * then has to make sure, that it arms it's own local hardware timer for
83 * the earliest event in the system.
84 *
85 *
86 * Lifetime rules:
87 * ---------------
88 *
89 * The groups are built up at init time or when CPUs come online. They are
90 * not destroyed when a group becomes empty due to offlining. The group
91 * just won't participate in the hierarchy management anymore. Destroying
92 * groups would result in interesting race conditions which would just make
93 * the whole mechanism slow and complex.
94 *
95 *
96 * Locking rules:
97 * --------------
98 *
99 * For setting up new groups and handling events it's required to lock both
100 * child and parent group. The lock ordering is always bottom up. This also
101 * includes the per CPU locks in struct tmigr_cpu. For updating the migrator and
102 * active CPU/group information atomic_try_cmpxchg() is used instead and only
103 * the per CPU tmigr_cpu->lock is held.
104 *
105 * During the setup of groups tmigr_level_list is required. It is protected by
106 * @tmigr_mutex.
107 *
108 * When @timer_base->lock as well as tmigr related locks are required, the lock
109 * ordering is: first @timer_base->lock, afterwards tmigr related locks.
110 *
111 *
112 * Protection of the tmigr group state information:
113 * ------------------------------------------------
114 *
115 * The state information with the list of active children and migrator needs to
116 * be protected by a sequence counter. It prevents a race when updates in child
117 * groups are propagated in changed order. The state update is performed
118 * lockless and group wise. The following scenario describes what happens
119 * without updating the sequence counter:
120 *
121 * Therefore, let's take three groups and four CPUs (CPU2 and CPU3 as well
122 * as GRP0:1 will not change during the scenario):
123 *
124 * LVL 1 [GRP1:0]
125 * migrator = GRP0:1
126 * active = GRP0:0, GRP0:1
127 * / \
128 * LVL 0 [GRP0:0] [GRP0:1]
129 * migrator = CPU0 migrator = CPU2
130 * active = CPU0 active = CPU2
131 * / \ / \
132 * CPUs 0 1 2 3
133 * active idle active idle
134 *
135 *
136 * 1. CPU0 goes idle. As the update is performed group wise, in the first step
137 * only GRP0:0 is updated. The update of GRP1:0 is pending as CPU0 has to
138 * walk the hierarchy.
139 *
140 * LVL 1 [GRP1:0]
141 * migrator = GRP0:1
142 * active = GRP0:0, GRP0:1
143 * / \
144 * LVL 0 [GRP0:0] [GRP0:1]
145 * --> migrator = TMIGR_NONE migrator = CPU2
146 * --> active = active = CPU2
147 * / \ / \
148 * CPUs 0 1 2 3
149 * --> idle idle active idle
150 *
151 * 2. While CPU0 goes idle and continues to update the state, CPU1 comes out of
152 * idle. CPU1 updates GRP0:0. The update for GRP1:0 is pending as CPU1 also
153 * has to walk the hierarchy. Both CPUs (CPU0 and CPU1) now walk the
154 * hierarchy to perform the needed update from their point of view. The
155 * currently visible state looks the following:
156 *
157 * LVL 1 [GRP1:0]
158 * migrator = GRP0:1
159 * active = GRP0:0, GRP0:1
160 * / \
161 * LVL 0 [GRP0:0] [GRP0:1]
162 * --> migrator = CPU1 migrator = CPU2
163 * --> active = CPU1 active = CPU2
164 * / \ / \
165 * CPUs 0 1 2 3
166 * idle --> active active idle
167 *
168 * 3. Here is the race condition: CPU1 managed to propagate its changes (from
169 * step 2) through the hierarchy to GRP1:0 before CPU0 (step 1) did. The
170 * active members of GRP1:0 remain unchanged after the update since it is
171 * still valid from CPU1 current point of view:
172 *
173 * LVL 1 [GRP1:0]
174 * --> migrator = GRP0:1
175 * --> active = GRP0:0, GRP0:1
176 * / \
177 * LVL 0 [GRP0:0] [GRP0:1]
178 * migrator = CPU1 migrator = CPU2
179 * active = CPU1 active = CPU2
180 * / \ / \
181 * CPUs 0 1 2 3
182 * idle active active idle
183 *
184 * 4. Now CPU0 finally propagates its changes (from step 1) to GRP1:0.
185 *
186 * LVL 1 [GRP1:0]
187 * --> migrator = GRP0:1
188 * --> active = GRP0:1
189 * / \
190 * LVL 0 [GRP0:0] [GRP0:1]
191 * migrator = CPU1 migrator = CPU2
192 * active = CPU1 active = CPU2
193 * / \ / \
194 * CPUs 0 1 2 3
195 * idle active active idle
196 *
197 *
198 * The race of CPU0 vs. CPU1 led to an inconsistent state in GRP1:0. CPU1 is
199 * active and is correctly listed as active in GRP0:0. However GRP1:0 does not
200 * have GRP0:0 listed as active, which is wrong. The sequence counter has been
201 * added to avoid inconsistent states during updates. The state is updated
202 * atomically only if all members, including the sequence counter, match the
203 * expected value (compare-and-exchange).
204 *
205 * Looking back at the previous example with the addition of the sequence
206 * counter: The update as performed by CPU0 in step 4 will fail. CPU1 changed
207 * the sequence number during the update in step 3 so the expected old value (as
208 * seen by CPU0 before starting the walk) does not match.
209 *
210 * Prevent race between new event and last CPU going inactive
211 * ----------------------------------------------------------
212 *
213 * When the last CPU is going idle and there is a concurrent update of a new
214 * first global timer of an idle CPU, the group and child states have to be read
215 * while holding the lock in tmigr_update_events(). The following scenario shows
216 * what happens, when this is not done.
217 *
218 * 1. Only CPU2 is active:
219 *
220 * LVL 1 [GRP1:0]
221 * migrator = GRP0:1
222 * active = GRP0:1
223 * next_expiry = KTIME_MAX
224 * / \
225 * LVL 0 [GRP0:0] [GRP0:1]
226 * migrator = TMIGR_NONE migrator = CPU2
227 * active = active = CPU2
228 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
229 * / \ / \
230 * CPUs 0 1 2 3
231 * idle idle active idle
232 *
233 * 2. Now CPU 2 goes idle (and has no global timer, that has to be handled) and
234 * propagates that to GRP0:1:
235 *
236 * LVL 1 [GRP1:0]
237 * migrator = GRP0:1
238 * active = GRP0:1
239 * next_expiry = KTIME_MAX
240 * / \
241 * LVL 0 [GRP0:0] [GRP0:1]
242 * migrator = TMIGR_NONE --> migrator = TMIGR_NONE
243 * active = --> active =
244 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
245 * / \ / \
246 * CPUs 0 1 2 3
247 * idle idle --> idle idle
248 *
249 * 3. Now the idle state is propagated up to GRP1:0. As this is now the last
250 * child going idle in top level group, the expiry of the next group event
251 * has to be handed back to make sure no event is lost. As there is no event
252 * enqueued, KTIME_MAX is handed back to CPU2.
253 *
254 * LVL 1 [GRP1:0]
255 * --> migrator = TMIGR_NONE
256 * --> active =
257 * next_expiry = KTIME_MAX
258 * / \
259 * LVL 0 [GRP0:0] [GRP0:1]
260 * migrator = TMIGR_NONE migrator = TMIGR_NONE
261 * active = active =
262 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
263 * / \ / \
264 * CPUs 0 1 2 3
265 * idle idle --> idle idle
266 *
267 * 4. CPU 0 has a new timer queued from idle and it expires at TIMER0. CPU0
268 * propagates that to GRP0:0:
269 *
270 * LVL 1 [GRP1:0]
271 * migrator = TMIGR_NONE
272 * active =
273 * next_expiry = KTIME_MAX
274 * / \
275 * LVL 0 [GRP0:0] [GRP0:1]
276 * migrator = TMIGR_NONE migrator = TMIGR_NONE
277 * active = active =
278 * --> next_expiry = TIMER0 next_expiry = KTIME_MAX
279 * / \ / \
280 * CPUs 0 1 2 3
281 * idle idle idle idle
282 *
283 * 5. GRP0:0 is not active, so the new timer has to be propagated to
284 * GRP1:0. Therefore the GRP1:0 state has to be read. When the stalled value
285 * (from step 2) is read, the timer is enqueued into GRP1:0, but nothing is
286 * handed back to CPU0, as it seems that there is still an active child in
287 * top level group.
288 *
289 * LVL 1 [GRP1:0]
290 * migrator = TMIGR_NONE
291 * active =
292 * --> next_expiry = TIMER0
293 * / \
294 * LVL 0 [GRP0:0] [GRP0:1]
295 * migrator = TMIGR_NONE migrator = TMIGR_NONE
296 * active = active =
297 * next_expiry = TIMER0 next_expiry = KTIME_MAX
298 * / \ / \
299 * CPUs 0 1 2 3
300 * idle idle idle idle
301 *
302 * This is prevented by reading the state when holding the lock (when a new
303 * timer has to be propagated from idle path)::
304 *
305 * CPU2 (tmigr_inactive_up()) CPU0 (tmigr_new_timer_up())
306 * -------------------------- ---------------------------
307 * // step 3:
308 * cmpxchg(&GRP1:0->state);
309 * tmigr_update_events() {
310 * spin_lock(&GRP1:0->lock);
311 * // ... update events ...
312 * // hand back first expiry when GRP1:0 is idle
313 * spin_unlock(&GRP1:0->lock);
314 * // ^^^ release state modification
315 * }
316 * tmigr_update_events() {
317 * spin_lock(&GRP1:0->lock)
318 * // ^^^ acquire state modification
319 * group_state = atomic_read(&GRP1:0->state)
320 * // .... update events ...
321 * // hand back first expiry when GRP1:0 is idle
322 * spin_unlock(&GRP1:0->lock) <3>
323 * // ^^^ makes state visible for other
324 * // callers of tmigr_new_timer_up()
325 * }
326 *
327 * When CPU0 grabs the lock directly after cmpxchg, the first timer is reported
328 * back to CPU0 and also later on to CPU2. So no timer is missed. A concurrent
329 * update of the group state from active path is no problem, as the upcoming CPU
330 * will take care of the group events.
331 *
332 * Required event and timerqueue update after a remote expiry:
333 * -----------------------------------------------------------
334 *
335 * After expiring timers of a remote CPU, a walk through the hierarchy and
336 * update of events and timerqueues is required. It is obviously needed if there
337 * is a 'new' global timer but also if there is no new global timer but the
338 * remote CPU is still idle.
339 *
340 * 1. CPU0 and CPU1 are idle and have both a global timer expiring at the same
341 * time. So both have an event enqueued in the timerqueue of GRP0:0. CPU3 is
342 * also idle and has no global timer pending. CPU2 is the only active CPU and
343 * thus also the migrator:
344 *
345 * LVL 1 [GRP1:0]
346 * migrator = GRP0:1
347 * active = GRP0:1
348 * --> timerqueue = evt-GRP0:0
349 * / \
350 * LVL 0 [GRP0:0] [GRP0:1]
351 * migrator = TMIGR_NONE migrator = CPU2
352 * active = active = CPU2
353 * groupevt.ignore = false groupevt.ignore = true
354 * groupevt.cpu = CPU0 groupevt.cpu =
355 * timerqueue = evt-CPU0, timerqueue =
356 * evt-CPU1
357 * / \ / \
358 * CPUs 0 1 2 3
359 * idle idle active idle
360 *
361 * 2. CPU2 starts to expire remote timers. It starts with LVL0 group
362 * GRP0:1. There is no event queued in the timerqueue, so CPU2 continues with
363 * the parent of GRP0:1: GRP1:0. In GRP1:0 it dequeues the first event. It
364 * looks at tmigr_event::cpu struct member and expires the pending timer(s)
365 * of CPU0.
366 *
367 * LVL 1 [GRP1:0]
368 * migrator = GRP0:1
369 * active = GRP0:1
370 * --> timerqueue =
371 * / \
372 * LVL 0 [GRP0:0] [GRP0:1]
373 * migrator = TMIGR_NONE migrator = CPU2
374 * active = active = CPU2
375 * groupevt.ignore = false groupevt.ignore = true
376 * --> groupevt.cpu = CPU0 groupevt.cpu =
377 * timerqueue = evt-CPU0, timerqueue =
378 * evt-CPU1
379 * / \ / \
380 * CPUs 0 1 2 3
381 * idle idle active idle
382 *
383 * 3. Some work has to be done after expiring the timers of CPU0. If we stop
384 * here, then CPU1's pending global timer(s) will not expire in time and the
385 * timerqueue of GRP0:0 has still an event for CPU0 enqueued which has just
386 * been processed. So it is required to walk the hierarchy from CPU0's point
387 * of view and update it accordingly. CPU0's event will be removed from the
388 * timerqueue because it has no pending timer. If CPU0 would have a timer
389 * pending then it has to expire after CPU1's first timer because all timers
390 * from this period were just expired. Either way CPU1's event will be first
391 * in GRP0:0's timerqueue and therefore set in the CPU field of the group
392 * event which is then enqueued in GRP1:0's timerqueue as GRP0:0 is still not
393 * active:
394 *
395 * LVL 1 [GRP1:0]
396 * migrator = GRP0:1
397 * active = GRP0:1
398 * --> timerqueue = evt-GRP0:0
399 * / \
400 * LVL 0 [GRP0:0] [GRP0:1]
401 * migrator = TMIGR_NONE migrator = CPU2
402 * active = active = CPU2
403 * groupevt.ignore = false groupevt.ignore = true
404 * --> groupevt.cpu = CPU1 groupevt.cpu =
405 * --> timerqueue = evt-CPU1 timerqueue =
406 * / \ / \
407 * CPUs 0 1 2 3
408 * idle idle active idle
409 *
410 * Now CPU2 (migrator) will continue step 2 at GRP1:0 and will expire the
411 * timer(s) of CPU1.
412 *
413 * The hierarchy walk in step 3 can be skipped if the migrator notices that a
414 * CPU of GRP0:0 is active again. The CPU will mark GRP0:0 active and take care
415 * of the group as migrator and any needed updates within the hierarchy.
416 */
417
418static DEFINE_MUTEX(tmigr_mutex);
419static struct list_head *tmigr_level_list __read_mostly;
420
421static unsigned int tmigr_hierarchy_levels __read_mostly;
422static unsigned int tmigr_crossnode_level __read_mostly;
423
424static struct tmigr_group *tmigr_root;
425
426static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
427
428/*
429 * CPUs available for timer migration.
430 * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
431 * Additionally tmigr_available_mutex serializes set/clear operations with each other.
432 */
433static cpumask_var_t tmigr_available_cpumask;
434static DEFINE_MUTEX(tmigr_available_mutex);
435
436/* Enabled during late initcall */
437static DEFINE_STATIC_KEY_FALSE(tmigr_exclude_isolated);
438
439#define TMIGR_NONE 0xFF
440#define BIT_CNT 8
441
442static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
443{
444 return !(tmc->tmgroup && tmc->available);
445}
446
447/*
448 * Returns true if @cpu should be excluded from the hierarchy as isolated.
449 * Domain isolated CPUs don't participate in timer migration, nohz_full CPUs
450 * are still part of the hierarchy but become idle (from a tick and timer
451 * migration perspective) when they stop their tick. This lets the timekeeping
452 * CPU handle their global timers. Marking also isolated CPUs as idle would be
453 * too costly, hence they are completely excluded from the hierarchy.
454 * This check is necessary, for instance, to prevent offline isolated CPUs from
455 * being incorrectly marked as available once getting back online.
456 *
457 * This function returns false during early boot and the isolation logic is
458 * enabled only after isolated CPUs are marked as unavailable at late boot.
459 * The tick CPU can be isolated at boot, however we cannot mark it as
460 * unavailable to avoid having no global migrator for the nohz_full CPUs. This
461 * should be ensured by the callers of this function: implicitly from hotplug
462 * callbacks and explicitly in tmigr_init_isolation() and
463 * tmigr_isolated_exclude_cpumask().
464 */
465static inline bool tmigr_is_isolated(int cpu)
466{
467 if (!static_branch_unlikely(&tmigr_exclude_isolated))
468 return false;
469 return (!housekeeping_cpu(cpu, type: HK_TYPE_DOMAIN) ||
470 cpuset_cpu_is_isolated(cpu)) &&
471 housekeeping_cpu(cpu, type: HK_TYPE_KERNEL_NOISE);
472}
473
474/*
475 * Returns true, when @childmask corresponds to the group migrator or when the
476 * group is not active - so no migrator is set.
477 */
478static bool tmigr_check_migrator(struct tmigr_group *group, u8 childmask)
479{
480 union tmigr_state s;
481
482 s.state = atomic_read(v: &group->migr_state);
483
484 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
485 return true;
486
487 return false;
488}
489
490static bool tmigr_check_migrator_and_lonely(struct tmigr_group *group, u8 childmask)
491{
492 bool lonely, migrator = false;
493 unsigned long active;
494 union tmigr_state s;
495
496 s.state = atomic_read(v: &group->migr_state);
497
498 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
499 migrator = true;
500
501 active = s.active;
502 lonely = bitmap_weight(src: &active, BIT_CNT) <= 1;
503
504 return (migrator && lonely);
505}
506
507static bool tmigr_check_lonely(struct tmigr_group *group)
508{
509 unsigned long active;
510 union tmigr_state s;
511
512 s.state = atomic_read(v: &group->migr_state);
513
514 active = s.active;
515
516 return bitmap_weight(src: &active, BIT_CNT) <= 1;
517}
518
519/**
520 * struct tmigr_walk - data required for walking the hierarchy
521 * @nextexp: Next CPU event expiry information which is handed into
522 * the timer migration code by the timer code
523 * (get_next_timer_interrupt())
524 * @firstexp: Contains the first event expiry information when
525 * hierarchy is completely idle. When CPU itself was the
526 * last going idle, information makes sure, that CPU will
527 * be back in time. When using this value in the remote
528 * expiry case, firstexp is stored in the per CPU tmigr_cpu
529 * struct of CPU which expires remote timers. It is updated
530 * in top level group only. Be aware, there could occur a
531 * new top level of the hierarchy between the 'top level
532 * call' in tmigr_update_events() and the check for the
533 * parent group in walk_groups(). Then @firstexp might
534 * contain a value != KTIME_MAX even if it was not the
535 * final top level. This is not a problem, as the worst
536 * outcome is a CPU which might wake up a little early.
537 * @evt: Pointer to tmigr_event which needs to be queued (of idle
538 * child group)
539 * @childmask: groupmask of child group
540 * @remote: Is set, when the new timer path is executed in
541 * tmigr_handle_remote_cpu()
542 * @basej: timer base in jiffies
543 * @now: timer base monotonic
544 * @check: is set if there is the need to handle remote timers;
545 * required in tmigr_requires_handle_remote() only
546 */
547struct tmigr_walk {
548 u64 nextexp;
549 u64 firstexp;
550 struct tmigr_event *evt;
551 u8 childmask;
552 bool remote;
553 unsigned long basej;
554 u64 now;
555 bool check;
556};
557
558typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *);
559
560static void __walk_groups_from(up_f up, struct tmigr_walk *data,
561 struct tmigr_group *child, struct tmigr_group *group)
562{
563 do {
564 WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
565
566 if (up(group, child, data))
567 break;
568
569 child = group;
570 /*
571 * Pairs with the store release on group connection
572 * to make sure group initialization is visible.
573 */
574 group = READ_ONCE(group->parent);
575 data->childmask = child->groupmask;
576 WARN_ON_ONCE(!data->childmask);
577 } while (group);
578}
579
580static void __walk_groups(up_f up, struct tmigr_walk *data,
581 struct tmigr_cpu *tmc)
582{
583 __walk_groups_from(up, data, NULL, group: tmc->tmgroup);
584}
585
586static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
587{
588 lockdep_assert_held(&tmc->lock);
589
590 __walk_groups(up, data, tmc);
591}
592
593/*
594 * Returns the next event of the timerqueue @group->events
595 *
596 * Removes timers with ignore flag and update next_expiry of the group. Values
597 * of the group event are updated in tmigr_update_events() only.
598 */
599static struct tmigr_event *tmigr_next_groupevt(struct tmigr_group *group)
600{
601 struct timerqueue_node *node = NULL;
602 struct tmigr_event *evt = NULL;
603
604 lockdep_assert_held(&group->lock);
605
606 WRITE_ONCE(group->next_expiry, KTIME_MAX);
607
608 while ((node = timerqueue_getnext(head: &group->events))) {
609 evt = container_of(node, struct tmigr_event, nextevt);
610
611 if (!READ_ONCE(evt->ignore)) {
612 WRITE_ONCE(group->next_expiry, evt->nextevt.expires);
613 return evt;
614 }
615
616 /*
617 * Remove next timers with ignore flag, because the group lock
618 * is held anyway
619 */
620 if (!timerqueue_del(head: &group->events, node))
621 break;
622 }
623
624 return NULL;
625}
626
627/*
628 * Return the next event (with the expiry equal or before @now)
629 *
630 * Event, which is returned, is also removed from the queue.
631 */
632static struct tmigr_event *tmigr_next_expired_groupevt(struct tmigr_group *group,
633 u64 now)
634{
635 struct tmigr_event *evt = tmigr_next_groupevt(group);
636
637 if (!evt || now < evt->nextevt.expires)
638 return NULL;
639
640 /*
641 * The event is ready to expire. Remove it and update next group event.
642 */
643 timerqueue_del(head: &group->events, node: &evt->nextevt);
644 tmigr_next_groupevt(group);
645
646 return evt;
647}
648
649static u64 tmigr_next_groupevt_expires(struct tmigr_group *group)
650{
651 struct tmigr_event *evt;
652
653 evt = tmigr_next_groupevt(group);
654
655 if (!evt)
656 return KTIME_MAX;
657 else
658 return evt->nextevt.expires;
659}
660
661static bool tmigr_active_up(struct tmigr_group *group,
662 struct tmigr_group *child,
663 struct tmigr_walk *data)
664{
665 union tmigr_state curstate, newstate;
666 bool walk_done;
667 u8 childmask;
668
669 childmask = data->childmask;
670 /*
671 * No memory barrier is required here in contrast to
672 * tmigr_inactive_up(), as the group state change does not depend on the
673 * child state.
674 */
675 curstate.state = atomic_read(v: &group->migr_state);
676
677 do {
678 newstate = curstate;
679 walk_done = true;
680
681 if (newstate.migrator == TMIGR_NONE) {
682 newstate.migrator = childmask;
683
684 /* Changes need to be propagated */
685 walk_done = false;
686 }
687
688 newstate.active |= childmask;
689 newstate.seq++;
690
691 } while (!atomic_try_cmpxchg(v: &group->migr_state, old: &curstate.state, new: newstate.state));
692
693 trace_tmigr_group_set_cpu_active(group, state: newstate, childmask);
694
695 /*
696 * The group is active (again). The group event might be still queued
697 * into the parent group's timerqueue but can now be handled by the
698 * migrator of this group. Therefore the ignore flag for the group event
699 * is updated to reflect this.
700 *
701 * The update of the ignore flag in the active path is done lockless. In
702 * worst case the migrator of the parent group observes the change too
703 * late and expires remotely all events belonging to this group. The
704 * lock is held while updating the ignore flag in idle path. So this
705 * state change will not be lost.
706 */
707 WRITE_ONCE(group->groupevt.ignore, true);
708
709 return walk_done;
710}
711
712static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
713{
714 struct tmigr_walk data;
715
716 data.childmask = tmc->groupmask;
717
718 trace_tmigr_cpu_active(tmc);
719
720 tmc->cpuevt.ignore = true;
721 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
722
723 walk_groups(up: &tmigr_active_up, data: &data, tmc);
724}
725
726/**
727 * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy
728 *
729 * Call site timer_clear_idle() is called with interrupts disabled.
730 */
731void tmigr_cpu_activate(void)
732{
733 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
734
735 if (tmigr_is_not_available(tmc))
736 return;
737
738 if (WARN_ON_ONCE(!tmc->idle))
739 return;
740
741 raw_spin_lock(&tmc->lock);
742 tmc->idle = false;
743 __tmigr_cpu_activate(tmc);
744 raw_spin_unlock(&tmc->lock);
745}
746
747/*
748 * Returns true, if there is nothing to be propagated to the next level
749 *
750 * @data->firstexp is set to expiry of first global event of the (top level of
751 * the) hierarchy, but only when hierarchy is completely idle.
752 *
753 * The child and group states need to be read under the lock, to prevent a race
754 * against a concurrent tmigr_inactive_up() run when the last CPU goes idle. See
755 * also section "Prevent race between new event and last CPU going inactive" in
756 * the documentation at the top.
757 *
758 * This is the only place where the group event expiry value is set.
759 */
760static
761bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
762 struct tmigr_walk *data)
763{
764 struct tmigr_event *evt, *first_childevt;
765 union tmigr_state childstate, groupstate;
766 bool remote = data->remote;
767 bool walk_done = false;
768 bool ignore;
769 u64 nextexp;
770
771 if (child) {
772 raw_spin_lock(&child->lock);
773 raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
774
775 childstate.state = atomic_read(v: &child->migr_state);
776 groupstate.state = atomic_read(v: &group->migr_state);
777
778 if (childstate.active) {
779 walk_done = true;
780 goto unlock;
781 }
782
783 first_childevt = tmigr_next_groupevt(group: child);
784 nextexp = child->next_expiry;
785 evt = &child->groupevt;
786
787 /*
788 * This can race with concurrent idle exit (activate).
789 * If the current writer wins, a useless remote expiration may
790 * be scheduled. If the activate wins, the event is properly
791 * ignored.
792 */
793 ignore = (nextexp == KTIME_MAX) ? true : false;
794 WRITE_ONCE(evt->ignore, ignore);
795 } else {
796 nextexp = data->nextexp;
797
798 first_childevt = evt = data->evt;
799 ignore = evt->ignore;
800
801 /*
802 * Walking the hierarchy is required in any case when a
803 * remote expiry was done before. This ensures to not lose
804 * already queued events in non active groups (see section
805 * "Required event and timerqueue update after a remote
806 * expiry" in the documentation at the top).
807 *
808 * The two call sites which are executed without a remote expiry
809 * before, are not prevented from propagating changes through
810 * the hierarchy by the return:
811 * - When entering this path by tmigr_new_timer(), @evt->ignore
812 * is never set.
813 * - tmigr_inactive_up() takes care of the propagation by
814 * itself and ignores the return value. But an immediate
815 * return is possible if there is a parent, sparing group
816 * locking at this level, because the upper walking call to
817 * the parent will take care about removing this event from
818 * within the group and update next_expiry accordingly.
819 *
820 * However if there is no parent, ie: the hierarchy has only a
821 * single level so @group is the top level group, make sure the
822 * first event information of the group is updated properly and
823 * also handled properly, so skip this fast return path.
824 */
825 if (ignore && !remote && group->parent)
826 return true;
827
828 raw_spin_lock(&group->lock);
829
830 childstate.state = 0;
831 groupstate.state = atomic_read(v: &group->migr_state);
832 }
833
834 /*
835 * If the child event is already queued in the group, remove it from the
836 * queue when the expiry time changed only or when it could be ignored.
837 */
838 if (timerqueue_node_queued(node: &evt->nextevt)) {
839 if ((evt->nextevt.expires == nextexp) && !ignore) {
840 /* Make sure not to miss a new CPU event with the same expiry */
841 evt->cpu = first_childevt->cpu;
842 goto check_toplvl;
843 }
844
845 if (!timerqueue_del(head: &group->events, node: &evt->nextevt))
846 WRITE_ONCE(group->next_expiry, KTIME_MAX);
847 }
848
849 if (ignore) {
850 /*
851 * When the next child event could be ignored (nextexp is
852 * KTIME_MAX) and there was no remote timer handling before or
853 * the group is already active, there is no need to walk the
854 * hierarchy even if there is a parent group.
855 *
856 * The other way round: even if the event could be ignored, but
857 * if a remote timer handling was executed before and the group
858 * is not active, walking the hierarchy is required to not miss
859 * an enqueued timer in the non active group. The enqueued timer
860 * of the group needs to be propagated to a higher level to
861 * ensure it is handled.
862 */
863 if (!remote || groupstate.active)
864 walk_done = true;
865 } else {
866 evt->nextevt.expires = nextexp;
867 evt->cpu = first_childevt->cpu;
868
869 if (timerqueue_add(head: &group->events, node: &evt->nextevt))
870 WRITE_ONCE(group->next_expiry, nextexp);
871 }
872
873check_toplvl:
874 if (!group->parent && (groupstate.migrator == TMIGR_NONE)) {
875 walk_done = true;
876
877 /*
878 * Nothing to do when update was done during remote timer
879 * handling. First timer in top level group which needs to be
880 * handled when top level group is not active, is calculated
881 * directly in tmigr_handle_remote_up().
882 */
883 if (remote)
884 goto unlock;
885
886 /*
887 * The top level group is idle and it has to be ensured the
888 * global timers are handled in time. (This could be optimized
889 * by keeping track of the last global scheduled event and only
890 * arming it on the CPU if the new event is earlier. Not sure if
891 * its worth the complexity.)
892 */
893 data->firstexp = tmigr_next_groupevt_expires(group);
894 }
895
896 trace_tmigr_update_events(child, group, childstate, groupstate,
897 nextevt: nextexp);
898
899unlock:
900 raw_spin_unlock(&group->lock);
901
902 if (child)
903 raw_spin_unlock(&child->lock);
904
905 return walk_done;
906}
907
908static bool tmigr_new_timer_up(struct tmigr_group *group,
909 struct tmigr_group *child,
910 struct tmigr_walk *data)
911{
912 return tmigr_update_events(group, child, data);
913}
914
915/*
916 * Returns the expiry of the next timer that needs to be handled. KTIME_MAX is
917 * returned, if an active CPU will handle all the timer migration hierarchy
918 * timers.
919 */
920static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp)
921{
922 struct tmigr_walk data = { .nextexp = nextexp,
923 .firstexp = KTIME_MAX,
924 .evt = &tmc->cpuevt };
925
926 lockdep_assert_held(&tmc->lock);
927
928 if (tmc->remote)
929 return KTIME_MAX;
930
931 trace_tmigr_cpu_new_timer(tmc);
932
933 tmc->cpuevt.ignore = false;
934 data.remote = false;
935
936 walk_groups(up: &tmigr_new_timer_up, data: &data, tmc);
937
938 /* If there is a new first global event, make sure it is handled */
939 return data.firstexp;
940}
941
942static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
943 unsigned long jif)
944{
945 struct timer_events tevt;
946 struct tmigr_walk data;
947 struct tmigr_cpu *tmc;
948
949 tmc = per_cpu_ptr(&tmigr_cpu, cpu);
950
951 raw_spin_lock_irq(&tmc->lock);
952
953 /*
954 * If the remote CPU is offline then the timers have been migrated to
955 * another CPU.
956 *
957 * If tmigr_cpu::remote is set, at the moment another CPU already
958 * expires the timers of the remote CPU.
959 *
960 * If tmigr_event::ignore is set, then the CPU returns from idle and
961 * takes care of its timers.
962 *
963 * If the next event expires in the future, then the event has been
964 * updated and there are no timers to expire right now. The CPU which
965 * updated the event takes care when hierarchy is completely
966 * idle. Otherwise the migrator does it as the event is enqueued.
967 */
968 if (!tmc->available || tmc->remote || tmc->cpuevt.ignore ||
969 now < tmc->cpuevt.nextevt.expires) {
970 raw_spin_unlock_irq(&tmc->lock);
971 return;
972 }
973
974 trace_tmigr_handle_remote_cpu(tmc);
975
976 tmc->remote = true;
977 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
978
979 /* Drop the lock to allow the remote CPU to exit idle */
980 raw_spin_unlock_irq(&tmc->lock);
981
982 if (cpu != smp_processor_id())
983 timer_expire_remote(cpu);
984
985 /*
986 * Lock ordering needs to be preserved - timer_base locks before tmigr
987 * related locks (see section "Locking rules" in the documentation at
988 * the top). During fetching the next timer interrupt, also tmc->lock
989 * needs to be held. Otherwise there is a possible race window against
990 * the CPU itself when it comes out of idle, updates the first timer in
991 * the hierarchy and goes back to idle.
992 *
993 * timer base locks are dropped as fast as possible: After checking
994 * whether the remote CPU went offline in the meantime and after
995 * fetching the next remote timer interrupt. Dropping the locks as fast
996 * as possible keeps the locking region small and prevents holding
997 * several (unnecessary) locks during walking the hierarchy for updating
998 * the timerqueue and group events.
999 */
1000 local_irq_disable();
1001 timer_lock_remote_bases(cpu);
1002 raw_spin_lock(&tmc->lock);
1003
1004 /*
1005 * When the CPU went offline in the meantime, no hierarchy walk has to
1006 * be done for updating the queued events, because the walk was
1007 * already done during marking the CPU offline in the hierarchy.
1008 *
1009 * When the CPU is no longer idle, the CPU takes care of the timers and
1010 * also of the timers in the hierarchy.
1011 *
1012 * (See also section "Required event and timerqueue update after a
1013 * remote expiry" in the documentation at the top)
1014 */
1015 if (!tmc->available || !tmc->idle) {
1016 timer_unlock_remote_bases(cpu);
1017 goto unlock;
1018 }
1019
1020 /* next event of CPU */
1021 fetch_next_timer_interrupt_remote(basej: jif, basem: now, tevt: &tevt, cpu);
1022 timer_unlock_remote_bases(cpu);
1023
1024 data.nextexp = tevt.global;
1025 data.firstexp = KTIME_MAX;
1026 data.evt = &tmc->cpuevt;
1027 data.remote = true;
1028
1029 /*
1030 * The update is done even when there is no 'new' global timer pending
1031 * on the remote CPU (see section "Required event and timerqueue update
1032 * after a remote expiry" in the documentation at the top)
1033 */
1034 walk_groups(up: &tmigr_new_timer_up, data: &data, tmc);
1035
1036unlock:
1037 tmc->remote = false;
1038 raw_spin_unlock_irq(&tmc->lock);
1039}
1040
1041static bool tmigr_handle_remote_up(struct tmigr_group *group,
1042 struct tmigr_group *child,
1043 struct tmigr_walk *data)
1044{
1045 struct tmigr_event *evt;
1046 unsigned long jif;
1047 u8 childmask;
1048 u64 now;
1049
1050 jif = data->basej;
1051 now = data->now;
1052
1053 childmask = data->childmask;
1054
1055 trace_tmigr_handle_remote(group);
1056again:
1057 /*
1058 * Handle the group only if @childmask is the migrator or if the
1059 * group has no migrator. Otherwise the group is active and is
1060 * handled by its own migrator.
1061 */
1062 if (!tmigr_check_migrator(group, childmask))
1063 return true;
1064
1065 raw_spin_lock_irq(&group->lock);
1066
1067 evt = tmigr_next_expired_groupevt(group, now);
1068
1069 if (evt) {
1070 unsigned int remote_cpu = evt->cpu;
1071
1072 raw_spin_unlock_irq(&group->lock);
1073
1074 tmigr_handle_remote_cpu(cpu: remote_cpu, now, jif);
1075
1076 /* check if there is another event, that needs to be handled */
1077 goto again;
1078 }
1079
1080 /*
1081 * Keep track of the expiry of the first event that needs to be handled
1082 * (group->next_expiry was updated by tmigr_next_expired_groupevt(),
1083 * next was set by tmigr_handle_remote_cpu()).
1084 */
1085 data->firstexp = group->next_expiry;
1086
1087 raw_spin_unlock_irq(&group->lock);
1088
1089 return false;
1090}
1091
1092/**
1093 * tmigr_handle_remote() - Handle global timers of remote idle CPUs
1094 *
1095 * Called from the timer soft interrupt with interrupts enabled.
1096 */
1097void tmigr_handle_remote(void)
1098{
1099 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1100 struct tmigr_walk data;
1101
1102 if (tmigr_is_not_available(tmc))
1103 return;
1104
1105 data.childmask = tmc->groupmask;
1106 data.firstexp = KTIME_MAX;
1107
1108 /*
1109 * NOTE: This is a doubled check because the migrator test will be done
1110 * in tmigr_handle_remote_up() anyway. Keep this check to speed up the
1111 * return when nothing has to be done.
1112 */
1113 if (!tmigr_check_migrator(group: tmc->tmgroup, childmask: tmc->groupmask)) {
1114 /*
1115 * If this CPU was an idle migrator, make sure to clear its wakeup
1116 * value so it won't chase timers that have already expired elsewhere.
1117 * This avoids endless requeue from tmigr_new_timer().
1118 */
1119 if (READ_ONCE(tmc->wakeup) == KTIME_MAX)
1120 return;
1121 }
1122
1123 data.now = get_jiffies_update(basej: &data.basej);
1124
1125 /*
1126 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to
1127 * KTIME_MAX. Even if tmc->lock is not held during the whole remote
1128 * handling, tmc->wakeup is fine to be stale as it is called in
1129 * interrupt context and tick_nohz_next_event() is executed in interrupt
1130 * exit path only after processing the last pending interrupt.
1131 */
1132
1133 __walk_groups(up: &tmigr_handle_remote_up, data: &data, tmc);
1134
1135 raw_spin_lock_irq(&tmc->lock);
1136 WRITE_ONCE(tmc->wakeup, data.firstexp);
1137 raw_spin_unlock_irq(&tmc->lock);
1138}
1139
1140static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
1141 struct tmigr_group *child,
1142 struct tmigr_walk *data)
1143{
1144 u8 childmask;
1145
1146 childmask = data->childmask;
1147
1148 /*
1149 * Handle the group only if the child is the migrator or if the group
1150 * has no migrator. Otherwise the group is active and is handled by its
1151 * own migrator.
1152 */
1153 if (!tmigr_check_migrator(group, childmask))
1154 return true;
1155 /*
1156 * The lock is required on 32bit architectures to read the variable
1157 * consistently with a concurrent writer. On 64bit the lock is not
1158 * required because the read operation is not split and so it is always
1159 * consistent.
1160 */
1161 if (IS_ENABLED(CONFIG_64BIT)) {
1162 data->firstexp = READ_ONCE(group->next_expiry);
1163 if (data->now >= data->firstexp) {
1164 data->check = true;
1165 return true;
1166 }
1167 } else {
1168 raw_spin_lock(&group->lock);
1169 data->firstexp = group->next_expiry;
1170 if (data->now >= group->next_expiry) {
1171 data->check = true;
1172 raw_spin_unlock(&group->lock);
1173 return true;
1174 }
1175 raw_spin_unlock(&group->lock);
1176 }
1177
1178 return false;
1179}
1180
1181/**
1182 * tmigr_requires_handle_remote() - Check the need of remote timer handling
1183 *
1184 * Must be called with interrupts disabled.
1185 */
1186bool tmigr_requires_handle_remote(void)
1187{
1188 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1189 struct tmigr_walk data;
1190 unsigned long jif;
1191 bool ret = false;
1192
1193 if (tmigr_is_not_available(tmc))
1194 return ret;
1195
1196 data.now = get_jiffies_update(basej: &jif);
1197 data.childmask = tmc->groupmask;
1198 data.firstexp = KTIME_MAX;
1199 data.check = false;
1200
1201 /*
1202 * If the CPU is active, walk the hierarchy to check whether a remote
1203 * expiry is required.
1204 *
1205 * Check is done lockless as interrupts are disabled and @tmc->idle is
1206 * set only by the local CPU.
1207 */
1208 if (!tmc->idle) {
1209 __walk_groups(up: &tmigr_requires_handle_remote_up, data: &data, tmc);
1210
1211 return data.check;
1212 }
1213
1214 /*
1215 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock
1216 * is required on 32bit architectures to read the variable consistently
1217 * with a concurrent writer. On 64bit the lock is not required because
1218 * the read operation is not split and so it is always consistent.
1219 */
1220 if (IS_ENABLED(CONFIG_64BIT)) {
1221 if (data.now >= READ_ONCE(tmc->wakeup))
1222 return true;
1223 } else {
1224 raw_spin_lock(&tmc->lock);
1225 if (data.now >= tmc->wakeup)
1226 ret = true;
1227 raw_spin_unlock(&tmc->lock);
1228 }
1229
1230 return ret;
1231}
1232
1233/**
1234 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1235 * @nextexp: Next expiry of global timer (or KTIME_MAX if not)
1236 *
1237 * The CPU is already deactivated in the timer migration
1238 * hierarchy. tick_nohz_get_sleep_length() calls tick_nohz_next_event()
1239 * and thereby the timer idle path is executed once more. @tmc->wakeup
1240 * holds the first timer, when the timer migration hierarchy is
1241 * completely idle.
1242 *
1243 * Returns the first timer that needs to be handled by this CPU or KTIME_MAX if
1244 * nothing needs to be done.
1245 */
1246u64 tmigr_cpu_new_timer(u64 nextexp)
1247{
1248 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1249 u64 ret;
1250
1251 if (tmigr_is_not_available(tmc))
1252 return nextexp;
1253
1254 raw_spin_lock(&tmc->lock);
1255
1256 ret = READ_ONCE(tmc->wakeup);
1257 if (nextexp != KTIME_MAX) {
1258 if (nextexp != tmc->cpuevt.nextevt.expires ||
1259 tmc->cpuevt.ignore) {
1260 ret = tmigr_new_timer(tmc, nextexp);
1261 /*
1262 * Make sure the reevaluation of timers in idle path
1263 * will not miss an event.
1264 */
1265 WRITE_ONCE(tmc->wakeup, ret);
1266 }
1267 }
1268 trace_tmigr_cpu_new_timer_idle(tmc, nextevt: nextexp);
1269 raw_spin_unlock(&tmc->lock);
1270 return ret;
1271}
1272
1273static bool tmigr_inactive_up(struct tmigr_group *group,
1274 struct tmigr_group *child,
1275 struct tmigr_walk *data)
1276{
1277 union tmigr_state curstate, newstate, childstate;
1278 bool walk_done;
1279 u8 childmask;
1280
1281 childmask = data->childmask;
1282 childstate.state = 0;
1283
1284 /*
1285 * The memory barrier is paired with the cmpxchg() in tmigr_active_up()
1286 * to make sure the updates of child and group states are ordered. The
1287 * ordering is mandatory, as the group state change depends on the child
1288 * state.
1289 */
1290 curstate.state = atomic_read_acquire(v: &group->migr_state);
1291
1292 for (;;) {
1293 if (child)
1294 childstate.state = atomic_read(v: &child->migr_state);
1295
1296 newstate = curstate;
1297 walk_done = true;
1298
1299 /* Reset active bit when the child is no longer active */
1300 if (!childstate.active)
1301 newstate.active &= ~childmask;
1302
1303 if (newstate.migrator == childmask) {
1304 /*
1305 * Find a new migrator for the group, because the child
1306 * group is idle!
1307 */
1308 if (!childstate.active) {
1309 unsigned long new_migr_bit, active = newstate.active;
1310
1311 new_migr_bit = find_first_bit(addr: &active, BIT_CNT);
1312
1313 if (new_migr_bit != BIT_CNT) {
1314 newstate.migrator = BIT(new_migr_bit);
1315 } else {
1316 newstate.migrator = TMIGR_NONE;
1317
1318 /* Changes need to be propagated */
1319 walk_done = false;
1320 }
1321 }
1322 }
1323
1324 newstate.seq++;
1325
1326 WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active));
1327
1328 if (atomic_try_cmpxchg(v: &group->migr_state, old: &curstate.state, new: newstate.state)) {
1329 trace_tmigr_group_set_cpu_inactive(group, state: newstate, childmask);
1330 break;
1331 }
1332
1333 /*
1334 * The memory barrier is paired with the cmpxchg() in
1335 * tmigr_active_up() to make sure the updates of child and group
1336 * states are ordered. It is required only when the above
1337 * try_cmpxchg() fails.
1338 */
1339 smp_mb__after_atomic();
1340 }
1341
1342 data->remote = false;
1343
1344 /* Event Handling */
1345 tmigr_update_events(group, child, data);
1346
1347 return walk_done;
1348}
1349
1350static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
1351{
1352 struct tmigr_walk data = { .nextexp = nextexp,
1353 .firstexp = KTIME_MAX,
1354 .evt = &tmc->cpuevt,
1355 .childmask = tmc->groupmask };
1356
1357 /*
1358 * If nextexp is KTIME_MAX, the CPU event will be ignored because the
1359 * local timer expires before the global timer, no global timer is set
1360 * or CPU goes offline.
1361 */
1362 if (nextexp != KTIME_MAX)
1363 tmc->cpuevt.ignore = false;
1364
1365 walk_groups(up: &tmigr_inactive_up, data: &data, tmc);
1366 return data.firstexp;
1367}
1368
1369/**
1370 * tmigr_cpu_deactivate() - Put current CPU into inactive state
1371 * @nextexp: The next global timer expiry of the current CPU
1372 *
1373 * Must be called with interrupts disabled.
1374 *
1375 * Return: the next event expiry of the current CPU or the next event expiry
1376 * from the hierarchy if this CPU is the top level migrator or the hierarchy is
1377 * completely idle.
1378 */
1379u64 tmigr_cpu_deactivate(u64 nextexp)
1380{
1381 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1382 u64 ret;
1383
1384 if (tmigr_is_not_available(tmc))
1385 return nextexp;
1386
1387 raw_spin_lock(&tmc->lock);
1388
1389 ret = __tmigr_cpu_deactivate(tmc, nextexp);
1390
1391 tmc->idle = true;
1392
1393 /*
1394 * Make sure the reevaluation of timers in idle path will not miss an
1395 * event.
1396 */
1397 WRITE_ONCE(tmc->wakeup, ret);
1398
1399 trace_tmigr_cpu_idle(tmc, nextevt: nextexp);
1400 raw_spin_unlock(&tmc->lock);
1401 return ret;
1402}
1403
1404/**
1405 * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to
1406 * go idle
1407 * @nextevt: The next global timer expiry of the current CPU
1408 *
1409 * Return:
1410 * * KTIME_MAX - when it is probable that nothing has to be done (not
1411 * the only one in the level 0 group; and if it is the
1412 * only one in level 0 group, but there are more than a
1413 * single group active on the way to top level)
1414 * * nextevt - when CPU is offline and has to handle timer on its own
1415 * or when on the way to top in every group only a single
1416 * child is active but @nextevt is before the lowest
1417 * next_expiry encountered while walking up to top level.
1418 * * next_expiry - value of lowest expiry encountered while walking groups
1419 * if only a single child is active on each and @nextevt
1420 * is after this lowest expiry.
1421 */
1422u64 tmigr_quick_check(u64 nextevt)
1423{
1424 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1425 struct tmigr_group *group = tmc->tmgroup;
1426
1427 if (tmigr_is_not_available(tmc))
1428 return nextevt;
1429
1430 if (WARN_ON_ONCE(tmc->idle))
1431 return nextevt;
1432
1433 if (!tmigr_check_migrator_and_lonely(group: tmc->tmgroup, childmask: tmc->groupmask))
1434 return KTIME_MAX;
1435
1436 do {
1437 if (!tmigr_check_lonely(group))
1438 return KTIME_MAX;
1439
1440 /*
1441 * Since current CPU is active, events may not be sorted
1442 * from bottom to the top because the CPU's event is ignored
1443 * up to the top and its sibling's events not propagated upwards.
1444 * Thus keep track of the lowest observed expiry.
1445 */
1446 nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry));
1447 group = group->parent;
1448 } while (group);
1449
1450 return nextevt;
1451}
1452
1453/*
1454 * tmigr_trigger_active() - trigger a CPU to become active again
1455 *
1456 * This function is executed on a CPU which is part of cpu_online_mask, when the
1457 * last active CPU in the hierarchy is offlining. With this, it is ensured that
1458 * the other CPU is active and takes over the migrator duty.
1459 */
1460static long tmigr_trigger_active(void *unused)
1461{
1462 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1463
1464 WARN_ON_ONCE(!tmc->available || tmc->idle);
1465
1466 return 0;
1467}
1468
1469static int tmigr_clear_cpu_available(unsigned int cpu)
1470{
1471 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1472 int migrator;
1473 u64 firstexp;
1474
1475 guard(mutex)(T: &tmigr_available_mutex);
1476
1477 cpumask_clear_cpu(cpu, dstp: tmigr_available_cpumask);
1478 scoped_guard(raw_spinlock_irq, &tmc->lock) {
1479 if (!tmc->available)
1480 return 0;
1481 tmc->available = false;
1482 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1483
1484 /*
1485 * CPU has to handle the local events on his own, when on the way to
1486 * offline; Therefore nextevt value is set to KTIME_MAX
1487 */
1488 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
1489 trace_tmigr_cpu_unavailable(tmc);
1490 }
1491
1492 if (firstexp != KTIME_MAX) {
1493 migrator = cpumask_any(tmigr_available_cpumask);
1494 work_on_cpu(migrator, tmigr_trigger_active, NULL);
1495 }
1496
1497 return 0;
1498}
1499
1500static int tmigr_set_cpu_available(unsigned int cpu)
1501{
1502 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1503
1504 /* Check whether CPU data was successfully initialized */
1505 if (WARN_ON_ONCE(!tmc->tmgroup))
1506 return -EINVAL;
1507
1508 if (tmigr_is_isolated(cpu))
1509 return 0;
1510
1511 guard(mutex)(T: &tmigr_available_mutex);
1512
1513 cpumask_set_cpu(cpu, dstp: tmigr_available_cpumask);
1514 scoped_guard(raw_spinlock_irq, &tmc->lock) {
1515 if (tmc->available)
1516 return 0;
1517 trace_tmigr_cpu_available(tmc);
1518 tmc->idle = timer_base_is_idle();
1519 if (!tmc->idle)
1520 __tmigr_cpu_activate(tmc);
1521 tmc->available = true;
1522 }
1523 return 0;
1524}
1525
1526static void tmigr_cpu_isolate(struct work_struct *ignored)
1527{
1528 tmigr_clear_cpu_available(smp_processor_id());
1529}
1530
1531static void tmigr_cpu_unisolate(struct work_struct *ignored)
1532{
1533 tmigr_set_cpu_available(smp_processor_id());
1534}
1535
1536/**
1537 * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
1538 * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
1539 *
1540 * This function can be called from cpuset code to provide the new set of
1541 * isolated CPUs that should be excluded from the hierarchy.
1542 * Online CPUs not present in exclude_cpumask but already excluded are brought
1543 * back to the hierarchy.
1544 * Functions to isolate/unisolate need to be called locally and can sleep.
1545 */
1546int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
1547{
1548 struct work_struct __percpu *works __free(free_percpu) =
1549 alloc_percpu(struct work_struct);
1550 cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
1551 int cpu;
1552
1553 lockdep_assert_cpus_held();
1554
1555 if (!works)
1556 return -ENOMEM;
1557 if (!alloc_cpumask_var(mask: &cpumask, GFP_KERNEL))
1558 return -ENOMEM;
1559
1560 /*
1561 * First set previously isolated CPUs as available (unisolate).
1562 * This cpumask contains only CPUs that switched to available now.
1563 */
1564 cpumask_andnot(dstp: cpumask, cpu_online_mask, src2p: exclude_cpumask);
1565 cpumask_andnot(dstp: cpumask, src1p: cpumask, src2p: tmigr_available_cpumask);
1566
1567 for_each_cpu(cpu, cpumask) {
1568 struct work_struct *work = per_cpu_ptr(works, cpu);
1569
1570 INIT_WORK(work, tmigr_cpu_unisolate);
1571 schedule_work_on(cpu, work);
1572 }
1573 for_each_cpu(cpu, cpumask)
1574 flush_work(per_cpu_ptr(works, cpu));
1575
1576 /*
1577 * Then clear previously available CPUs (isolate).
1578 * This cpumask contains only CPUs that switched to not available now.
1579 * There cannot be overlap with the newly available ones.
1580 */
1581 cpumask_and(dstp: cpumask, src1p: exclude_cpumask, src2p: tmigr_available_cpumask);
1582 cpumask_and(dstp: cpumask, src1p: cpumask, src2p: housekeeping_cpumask(type: HK_TYPE_KERNEL_NOISE));
1583 /*
1584 * Handle this here and not in the cpuset code because exclude_cpumask
1585 * might include also the tick CPU if included in isolcpus.
1586 */
1587 for_each_cpu(cpu, cpumask) {
1588 if (!tick_nohz_cpu_hotpluggable(cpu)) {
1589 cpumask_clear_cpu(cpu, dstp: cpumask);
1590 break;
1591 }
1592 }
1593
1594 for_each_cpu(cpu, cpumask) {
1595 struct work_struct *work = per_cpu_ptr(works, cpu);
1596
1597 INIT_WORK(work, tmigr_cpu_isolate);
1598 schedule_work_on(cpu, work);
1599 }
1600 for_each_cpu(cpu, cpumask)
1601 flush_work(per_cpu_ptr(works, cpu));
1602
1603 return 0;
1604}
1605
1606static int __init tmigr_init_isolation(void)
1607{
1608 cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
1609
1610 static_branch_enable(&tmigr_exclude_isolated);
1611
1612 if (!housekeeping_enabled(type: HK_TYPE_DOMAIN))
1613 return 0;
1614 if (!alloc_cpumask_var(mask: &cpumask, GFP_KERNEL))
1615 return -ENOMEM;
1616
1617 cpumask_andnot(dstp: cpumask, cpu_possible_mask, src2p: housekeeping_cpumask(type: HK_TYPE_DOMAIN));
1618
1619 /* Protect against RCU torture hotplug testing */
1620 guard(cpus_read_lock)();
1621 return tmigr_isolated_exclude_cpumask(exclude_cpumask: cpumask);
1622}
1623late_initcall(tmigr_init_isolation);
1624
1625static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
1626 int node)
1627{
1628 union tmigr_state s;
1629
1630 raw_spin_lock_init(&group->lock);
1631
1632 group->level = lvl;
1633 group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE;
1634
1635 group->num_children = 0;
1636
1637 s.migrator = TMIGR_NONE;
1638 s.active = 0;
1639 s.seq = 0;
1640 atomic_set(v: &group->migr_state, i: s.state);
1641
1642 timerqueue_init_head(head: &group->events);
1643 timerqueue_init(node: &group->groupevt.nextevt);
1644 group->groupevt.nextevt.expires = KTIME_MAX;
1645 WRITE_ONCE(group->next_expiry, KTIME_MAX);
1646 group->groupevt.ignore = true;
1647}
1648
1649static struct tmigr_group *tmigr_get_group(int node, unsigned int lvl)
1650{
1651 struct tmigr_group *tmp, *group = NULL;
1652
1653 lockdep_assert_held(&tmigr_mutex);
1654
1655 /* Try to attach to an existing group first */
1656 list_for_each_entry(tmp, &tmigr_level_list[lvl], list) {
1657 /*
1658 * If @lvl is below the cross NUMA node level, check whether
1659 * this group belongs to the same NUMA node.
1660 */
1661 if (lvl < tmigr_crossnode_level && tmp->numa_node != node)
1662 continue;
1663
1664 /* Capacity left? */
1665 if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP)
1666 continue;
1667
1668 /*
1669 * TODO: A possible further improvement: Make sure that all CPU
1670 * siblings end up in the same group of the lowest level of the
1671 * hierarchy. Rely on the topology sibling mask would be a
1672 * reasonable solution.
1673 */
1674
1675 group = tmp;
1676 break;
1677 }
1678
1679 if (group)
1680 return group;
1681
1682 /* Allocate and set up a new group */
1683 group = kzalloc_node(sizeof(*group), GFP_KERNEL, node);
1684 if (!group)
1685 return ERR_PTR(error: -ENOMEM);
1686
1687 tmigr_init_group(group, lvl, node);
1688
1689 /* Setup successful. Add it to the hierarchy */
1690 list_add(new: &group->list, head: &tmigr_level_list[lvl]);
1691 trace_tmigr_group_set(group);
1692 return group;
1693}
1694
1695static bool tmigr_init_root(struct tmigr_group *group, bool activate)
1696{
1697 if (!group->parent && group != tmigr_root) {
1698 /*
1699 * This is the new top-level, prepare its groupmask in advance
1700 * to avoid accidents where yet another new top-level is
1701 * created in the future and made visible before this groupmask.
1702 */
1703 group->groupmask = BIT(0);
1704 WARN_ON_ONCE(activate);
1705
1706 return true;
1707 }
1708
1709 return false;
1710
1711}
1712
1713static void tmigr_connect_child_parent(struct tmigr_group *child,
1714 struct tmigr_group *parent,
1715 bool activate)
1716{
1717 if (tmigr_init_root(group: parent, activate)) {
1718 /*
1719 * The previous top level had prepared its groupmask already,
1720 * simply account it in advance as the first child. If some groups
1721 * have been created between the old and new root due to node
1722 * mismatch, the new root's child will be intialized accordingly.
1723 */
1724 parent->num_children = 1;
1725 }
1726
1727 /* Connecting old root to new root ? */
1728 if (!parent->parent && activate) {
1729 /*
1730 * @child is the old top, or in case of node mismatch, some
1731 * intermediate group between the old top and the new one in
1732 * @parent. In this case the @child must be pre-accounted above
1733 * as the first child. Its new inactive sibling corresponding
1734 * to the CPU going up has been accounted as the second child.
1735 */
1736 WARN_ON_ONCE(parent->num_children != 2);
1737 child->groupmask = BIT(0);
1738 } else {
1739 /* Common case adding @child for the CPU going up to @parent. */
1740 child->groupmask = BIT(parent->num_children++);
1741 }
1742
1743 /*
1744 * Make sure parent initialization is visible before publishing it to a
1745 * racing CPU entering/exiting idle. This RELEASE barrier enforces an
1746 * address dependency that pairs with the READ_ONCE() in __walk_groups().
1747 */
1748 smp_store_release(&child->parent, parent);
1749
1750 trace_tmigr_connect_child_parent(child);
1751}
1752
1753static int tmigr_setup_groups(unsigned int cpu, unsigned int node,
1754 struct tmigr_group *start, bool activate)
1755{
1756 struct tmigr_group *group, *child, **stack;
1757 int i, top = 0, err = 0, start_lvl = 0;
1758 bool root_mismatch = false;
1759
1760 stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL);
1761 if (!stack)
1762 return -ENOMEM;
1763
1764 if (start) {
1765 stack[start->level] = start;
1766 start_lvl = start->level + 1;
1767 }
1768
1769 if (tmigr_root)
1770 root_mismatch = tmigr_root->numa_node != node;
1771
1772 for (i = start_lvl; i < tmigr_hierarchy_levels; i++) {
1773 group = tmigr_get_group(node, lvl: i);
1774 if (IS_ERR(ptr: group)) {
1775 err = PTR_ERR(ptr: group);
1776 i--;
1777 break;
1778 }
1779
1780 top = i;
1781 stack[i] = group;
1782
1783 /*
1784 * When booting only less CPUs of a system than CPUs are
1785 * available, not all calculated hierarchy levels are required,
1786 * unless a node mismatch is detected.
1787 *
1788 * The loop is aborted as soon as the highest level, which might
1789 * be different from tmigr_hierarchy_levels, contains only a
1790 * single group, unless the nodes mismatch below tmigr_crossnode_level
1791 */
1792 if (group->parent)
1793 break;
1794 if ((!root_mismatch || i >= tmigr_crossnode_level) &&
1795 list_is_singular(head: &tmigr_level_list[i]))
1796 break;
1797 }
1798
1799 /* Assert single root without parent */
1800 if (WARN_ON_ONCE(i >= tmigr_hierarchy_levels))
1801 return -EINVAL;
1802
1803 for (; i >= start_lvl; i--) {
1804 group = stack[i];
1805
1806 if (err < 0) {
1807 list_del(entry: &group->list);
1808 kfree(objp: group);
1809 continue;
1810 }
1811
1812 WARN_ON_ONCE(i != group->level);
1813
1814 /*
1815 * Update tmc -> group / child -> group connection
1816 */
1817 if (i == 0) {
1818 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
1819
1820 tmc->tmgroup = group;
1821 tmc->groupmask = BIT(group->num_children++);
1822
1823 tmigr_init_root(group, activate);
1824
1825 trace_tmigr_connect_cpu_parent(tmc);
1826
1827 /* There are no children that need to be connected */
1828 continue;
1829 } else {
1830 child = stack[i - 1];
1831 tmigr_connect_child_parent(child, parent: group, activate);
1832 }
1833 }
1834
1835 if (err < 0)
1836 goto out;
1837
1838 if (activate) {
1839 struct tmigr_walk data;
1840 union tmigr_state state;
1841
1842 /*
1843 * To prevent inconsistent states, active children need to be active in
1844 * the new parent as well. Inactive children are already marked inactive
1845 * in the parent group:
1846 *
1847 * * When new groups were created by tmigr_setup_groups() starting from
1848 * the lowest level, then they are not active. They will be set active
1849 * when the new online CPU comes active.
1850 *
1851 * * But if new groups above the current top level are required, it is
1852 * mandatory to propagate the active state of the already existing
1853 * child to the new parents. So tmigr_active_up() activates the
1854 * new parents while walking up from the old root to the new.
1855 *
1856 * * It is ensured that @start is active, as this setup path is
1857 * executed in hotplug prepare callback. This is executed by an
1858 * already connected and !idle CPU. Even if all other CPUs go idle,
1859 * the CPU executing the setup will be responsible up to current top
1860 * level group. And the next time it goes inactive, it will release
1861 * the new childmask and parent to subsequent walkers through this
1862 * @child. Therefore propagate active state unconditionally.
1863 */
1864 state.state = atomic_read(v: &start->migr_state);
1865 WARN_ON_ONCE(!state.active);
1866 WARN_ON_ONCE(!start->parent);
1867 data.childmask = start->groupmask;
1868 __walk_groups_from(up: tmigr_active_up, data: &data, child: start, group: start->parent);
1869 }
1870
1871 /* Root update */
1872 if (list_is_singular(head: &tmigr_level_list[top])) {
1873 group = list_first_entry(&tmigr_level_list[top],
1874 typeof(*group), list);
1875 WARN_ON_ONCE(group->parent);
1876 if (tmigr_root) {
1877 /* Old root should be the same or below */
1878 WARN_ON_ONCE(tmigr_root->level > top);
1879 }
1880 tmigr_root = group;
1881 }
1882out:
1883 kfree(objp: stack);
1884
1885 return err;
1886}
1887
1888static int tmigr_add_cpu(unsigned int cpu)
1889{
1890 struct tmigr_group *old_root = tmigr_root;
1891 int node = cpu_to_node(cpu);
1892 int ret;
1893
1894 guard(mutex)(T: &tmigr_mutex);
1895
1896 ret = tmigr_setup_groups(cpu, node, NULL, activate: false);
1897
1898 /* Root has changed? Connect the old one to the new */
1899 if (ret >= 0 && old_root && old_root != tmigr_root) {
1900 /*
1901 * The target CPU must never do the prepare work, except
1902 * on early boot when the boot CPU is the target. Otherwise
1903 * it may spuriously activate the old top level group inside
1904 * the new one (nevertheless whether old top level group is
1905 * active or not) and/or release an uninitialized childmask.
1906 */
1907 WARN_ON_ONCE(cpu == raw_smp_processor_id());
1908 /*
1909 * The (likely) current CPU is expected to be online in the hierarchy,
1910 * otherwise the old root may not be active as expected.
1911 */
1912 WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->available);
1913 ret = tmigr_setup_groups(cpu: -1, node: old_root->numa_node, start: old_root, activate: true);
1914 }
1915
1916 return ret;
1917}
1918
1919static int tmigr_cpu_prepare(unsigned int cpu)
1920{
1921 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
1922 int ret = 0;
1923
1924 /* Not first online attempt? */
1925 if (tmc->tmgroup)
1926 return ret;
1927
1928 raw_spin_lock_init(&tmc->lock);
1929 timerqueue_init(node: &tmc->cpuevt.nextevt);
1930 tmc->cpuevt.nextevt.expires = KTIME_MAX;
1931 tmc->cpuevt.ignore = true;
1932 tmc->cpuevt.cpu = cpu;
1933 tmc->remote = false;
1934 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1935
1936 ret = tmigr_add_cpu(cpu);
1937 if (ret < 0)
1938 return ret;
1939
1940 if (tmc->groupmask == 0)
1941 return -EINVAL;
1942
1943 return ret;
1944}
1945
1946static int __init tmigr_init(void)
1947{
1948 unsigned int cpulvl, nodelvl, cpus_per_node, i;
1949 unsigned int nnodes = num_possible_nodes();
1950 unsigned int ncpus = num_possible_cpus();
1951 int ret = -ENOMEM;
1952
1953 BUILD_BUG_ON_NOT_POWER_OF_2(TMIGR_CHILDREN_PER_GROUP);
1954
1955 /* Nothing to do if running on UP */
1956 if (ncpus == 1)
1957 return 0;
1958
1959 if (!zalloc_cpumask_var(mask: &tmigr_available_cpumask, GFP_KERNEL)) {
1960 ret = -ENOMEM;
1961 goto err;
1962 }
1963
1964 /*
1965 * Calculate the required hierarchy levels. Unfortunately there is no
1966 * reliable information available, unless all possible CPUs have been
1967 * brought up and all NUMA nodes are populated.
1968 *
1969 * Estimate the number of levels with the number of possible nodes and
1970 * the number of possible CPUs. Assume CPUs are spread evenly across
1971 * nodes. We cannot rely on cpumask_of_node() because it only works for
1972 * online CPUs.
1973 */
1974 cpus_per_node = DIV_ROUND_UP(ncpus, nnodes);
1975
1976 /* Calc the hierarchy levels required to hold the CPUs of a node */
1977 cpulvl = DIV_ROUND_UP(order_base_2(cpus_per_node),
1978 ilog2(TMIGR_CHILDREN_PER_GROUP));
1979
1980 /* Calculate the extra levels to connect all nodes */
1981 nodelvl = DIV_ROUND_UP(order_base_2(nnodes),
1982 ilog2(TMIGR_CHILDREN_PER_GROUP));
1983
1984 tmigr_hierarchy_levels = cpulvl + nodelvl;
1985
1986 /*
1987 * If a NUMA node spawns more than one CPU level group then the next
1988 * level(s) of the hierarchy contains groups which handle all CPU groups
1989 * of the same NUMA node. The level above goes across NUMA nodes. Store
1990 * this information for the setup code to decide in which level node
1991 * matching is no longer required.
1992 */
1993 tmigr_crossnode_level = cpulvl;
1994
1995 tmigr_level_list = kcalloc(tmigr_hierarchy_levels, sizeof(struct list_head), GFP_KERNEL);
1996 if (!tmigr_level_list)
1997 goto err;
1998
1999 for (i = 0; i < tmigr_hierarchy_levels; i++)
2000 INIT_LIST_HEAD(list: &tmigr_level_list[i]);
2001
2002 pr_info("Timer migration: %d hierarchy levels; %d children per group;"
2003 " %d crossnode level\n",
2004 tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP,
2005 tmigr_crossnode_level);
2006
2007 ret = cpuhp_setup_state(state: CPUHP_TMIGR_PREPARE, name: "tmigr:prepare",
2008 startup: tmigr_cpu_prepare, NULL);
2009 if (ret)
2010 goto err;
2011
2012 ret = cpuhp_setup_state(state: CPUHP_AP_TMIGR_ONLINE, name: "tmigr:online",
2013 startup: tmigr_set_cpu_available, teardown: tmigr_clear_cpu_available);
2014 if (ret)
2015 goto err;
2016
2017 return 0;
2018
2019err:
2020 pr_err("Timer migration setup failed\n");
2021 return ret;
2022}
2023early_initcall(tmigr_init);
2024

source code of linux/kernel/time/timer_migration.c