Skip to content

Commit 040b9d7

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Three fixes: - fix a suspend/resume cpusets bug - fix a !CONFIG_NUMA_BALANCING bug - fix a kerneldoc warning" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix nuisance kernel-doc warning sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs sched/fair: Fix wake_affine_llc() balancing rules
2 parents e6328a7 + 4612335 commit 040b9d7

File tree

5 files changed

+30
-8
lines changed

5 files changed

+30
-8
lines changed

include/linux/cpuset.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,9 @@ static inline void cpuset_dec(void)
5151

5252
extern int cpuset_init(void);
5353
extern void cpuset_init_smp(void);
54+
extern void cpuset_force_rebuild(void);
5455
extern void cpuset_update_active_cpus(void);
56+
extern void cpuset_wait_for_hotplug(void);
5557
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
5658
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
5759
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
164166
static inline int cpuset_init(void) { return 0; }
165167
static inline void cpuset_init_smp(void) {}
166168

169+
static inline void cpuset_force_rebuild(void) { }
170+
167171
static inline void cpuset_update_active_cpus(void)
168172
{
169173
partition_sched_domains(1, NULL, NULL);
170174
}
171175

176+
static inline void cpuset_wait_for_hotplug(void) { }
177+
172178
static inline void cpuset_cpus_allowed(struct task_struct *p,
173179
struct cpumask *mask)
174180
{

kernel/cgroup/cpuset.c

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2275,6 +2275,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
22752275
mutex_unlock(&cpuset_mutex);
22762276
}
22772277

2278+
static bool force_rebuild;
2279+
2280+
void cpuset_force_rebuild(void)
2281+
{
2282+
force_rebuild = true;
2283+
}
2284+
22782285
/**
22792286
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
22802287
*
@@ -2349,8 +2356,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
23492356
}
23502357

23512358
/* rebuild sched domains if cpus_allowed has changed */
2352-
if (cpus_updated)
2359+
if (cpus_updated || force_rebuild) {
2360+
force_rebuild = false;
23532361
rebuild_sched_domains();
2362+
}
23542363
}
23552364

23562365
void cpuset_update_active_cpus(void)
@@ -2363,6 +2372,11 @@ void cpuset_update_active_cpus(void)
23632372
schedule_work(&cpuset_hotplug_work);
23642373
}
23652374

2375+
void cpuset_wait_for_hotplug(void)
2376+
{
2377+
flush_work(&cpuset_hotplug_work);
2378+
}
2379+
23662380
/*
23672381
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
23682382
* Call this routine anytime after node_states[N_MEMORY] changes.

kernel/power/process.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,9 @@
2020
#include <linux/workqueue.h>
2121
#include <linux/kmod.h>
2222
#include <trace/events/power.h>
23+
#include <linux/cpuset.h>
2324

24-
/*
25+
/*
2526
* Timeout for stopping processes
2627
*/
2728
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -202,6 +203,8 @@ void thaw_processes(void)
202203
__usermodehelper_set_disable_depth(UMH_FREEZING);
203204
thaw_workqueues();
204205

206+
cpuset_wait_for_hotplug();
207+
205208
read_lock(&tasklist_lock);
206209
for_each_process_thread(g, p) {
207210
/* No other threads should have PF_SUSPEND_TASK set */

kernel/sched/core.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
55565556
* operation in the resume sequence, just build a single sched
55575557
* domain, ignoring cpusets.
55585558
*/
5559-
num_cpus_frozen--;
5560-
if (likely(num_cpus_frozen)) {
5561-
partition_sched_domains(1, NULL, NULL);
5559+
partition_sched_domains(1, NULL, NULL);
5560+
if (--num_cpus_frozen)
55625561
return;
5563-
}
55645562
/*
55655563
* This is the last CPU online operation. So fall through and
55665564
* restore the original sched domains by considering the
55675565
* cpuset configurations.
55685566
*/
5567+
cpuset_force_rebuild();
55695568
}
55705569
cpuset_update_active_cpus();
55715570
}

kernel/sched/fair.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5424,7 +5424,7 @@ wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
54245424
return false;
54255425

54265426
/* if this cache has capacity, come here */
5427-
if (this_stats.has_capacity && this_stats.nr_running < prev_stats.nr_running+1)
5427+
if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
54285428
return true;
54295429

54305430
/*
@@ -7708,7 +7708,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
77087708
* number.
77097709
*
77107710
* Return: 1 when packing is required and a task should be moved to
7711-
* this CPU. The amount of the imbalance is returned in *imbalance.
7711+
* this CPU. The amount of the imbalance is returned in env->imbalance.
77127712
*
77137713
* @env: The load balancing environment.
77147714
* @sds: Statistics of the sched_domain which is to be packed

0 commit comments

Comments
 (0)