Skip to content

Commit ec846ec

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Three CPU hotplug related fixes and a debugging improvement" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/debug: Add debugfs knob for "sched_debug" sched/core: WARN() when migrating to an offline CPU sched/fair: Plug hole between hotplug and active_load_balance() sched/fair: Avoid newidle balance for !active CPUs
2 parents b5df1b3 + 9469eb0 commit ec846ec

5 files changed

Lines changed: 25 additions & 3 deletions

File tree

kernel/sched/core.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1173,6 +1173,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
11731173
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
11741174
lockdep_is_held(&task_rq(p)->lock)));
11751175
#endif
1176+
/*
1177+
* Clearly, migrating tasks to offline CPUs is a fairly daft thing.
1178+
*/
1179+
WARN_ON_ONCE(!cpu_online(new_cpu));
11761180
#endif
11771181

11781182
trace_sched_migrate_task(p, new_cpu);

kernel/sched/debug.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,11 +181,16 @@ static const struct file_operations sched_feat_fops = {
181181
.release = single_release,
182182
};
183183

184+
__read_mostly bool sched_debug_enabled;
185+
184186
static __init int sched_init_debug(void)
185187
{
186188
debugfs_create_file("sched_features", 0644, NULL, NULL,
187189
&sched_feat_fops);
188190

191+
debugfs_create_bool("sched_debug", 0644, NULL,
192+
&sched_debug_enabled);
193+
189194
return 0;
190195
}
191196
late_initcall(sched_init_debug);

kernel/sched/fair.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8436,6 +8436,12 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
84368436
*/
84378437
this_rq->idle_stamp = rq_clock(this_rq);
84388438

8439+
/*
8440+
* Do not pull tasks towards !active CPUs...
8441+
*/
8442+
if (!cpu_active(this_cpu))
8443+
return 0;
8444+
84398445
/*
84408446
* This is OK, because current is on_cpu, which avoids it being picked
84418447
* for load-balance and preemption/IRQs are still disabled avoiding
@@ -8543,6 +8549,13 @@ static int active_load_balance_cpu_stop(void *data)
85438549
struct rq_flags rf;
85448550

85458551
rq_lock_irq(busiest_rq, &rf);
8552+
/*
8553+
* Between queueing the stop-work and running it is a hole in which
8554+
* CPUs can become inactive. We should not move tasks from or to
8555+
* inactive CPUs.
8556+
*/
8557+
if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
8558+
goto out_unlock;
85468559

85478560
/* make sure the requested cpu hasn't gone down in the meantime */
85488561
if (unlikely(busiest_cpu != smp_processor_id() ||

kernel/sched/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1951,6 +1951,8 @@ extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
19511951
extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
19521952

19531953
#ifdef CONFIG_SCHED_DEBUG
1954+
extern bool sched_debug_enabled;
1955+
19541956
extern void print_cfs_stats(struct seq_file *m, int cpu);
19551957
extern void print_rt_stats(struct seq_file *m, int cpu);
19561958
extern void print_dl_stats(struct seq_file *m, int cpu);

kernel/sched/topology.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,9 @@ cpumask_var_t sched_domains_tmpmask2;
1414

1515
#ifdef CONFIG_SCHED_DEBUG
1616

17-
static __read_mostly int sched_debug_enabled;
18-
1917
static int __init sched_debug_setup(char *str)
2018
{
21-
sched_debug_enabled = 1;
19+
sched_debug_enabled = true;
2220

2321
return 0;
2422
}

0 commit comments

Comments
 (0)