Skip to content

Commit 45554b2

Browse files
committed
Merge tag 'trace-v4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull another tracing update from Steven Rostedt: "Commit 79c6f44 ("tracing: Fix hwlat kthread migration") fixed a bug that was caused by a race condition in initializing the hwlat thread. When fixing this code, I realized that it should have been done differently. Instead of doing the rewrite and sending that to stable, I just sent the above commit to fix the bug that should be back ported. This commit is on top of the quick fix commit to rewrite the code the way it should have been written in the first place" * tag 'trace-v4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Clean up the hwlat binding code
2 parents 79b17ea + f447c19 commit 45554b2

1 file changed

Lines changed: 17 additions & 17 deletions

File tree

kernel/trace/trace_hwlat.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -266,24 +266,13 @@ static int get_sample(void)
266266
static struct cpumask save_cpumask;
267267
static bool disable_migrate;
268268

269-
static void move_to_next_cpu(bool initmask)
269+
static void move_to_next_cpu(void)
270270
{
271-
static struct cpumask *current_mask;
271+
struct cpumask *current_mask = &save_cpumask;
272272
int next_cpu;
273273

274274
if (disable_migrate)
275275
return;
276-
277-
/* Just pick the first CPU on first iteration */
278-
if (initmask) {
279-
current_mask = &save_cpumask;
280-
get_online_cpus();
281-
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
282-
put_online_cpus();
283-
next_cpu = cpumask_first(current_mask);
284-
goto set_affinity;
285-
}
286-
287276
/*
288277
* If for some reason the user modifies the CPU affinity
289278
* of this thread, than stop migrating for the duration
@@ -300,7 +289,6 @@ static void move_to_next_cpu(bool initmask)
300289
if (next_cpu >= nr_cpu_ids)
301290
next_cpu = cpumask_first(current_mask);
302291

303-
set_affinity:
304292
if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
305293
goto disable;
306294

@@ -327,12 +315,10 @@ static void move_to_next_cpu(bool initmask)
327315
static int kthread_fn(void *data)
328316
{
329317
u64 interval;
330-
bool initmask = true;
331318

332319
while (!kthread_should_stop()) {
333320

334-
move_to_next_cpu(initmask);
335-
initmask = false;
321+
move_to_next_cpu();
336322

337323
local_irq_disable();
338324
get_sample();
@@ -363,13 +349,27 @@ static int kthread_fn(void *data)
363349
*/
364350
static int start_kthread(struct trace_array *tr)
365351
{
352+
struct cpumask *current_mask = &save_cpumask;
366353
struct task_struct *kthread;
354+
int next_cpu;
355+
356+
/* Just pick the first CPU on first iteration */
357+
current_mask = &save_cpumask;
358+
get_online_cpus();
359+
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
360+
put_online_cpus();
361+
next_cpu = cpumask_first(current_mask);
367362

368363
kthread = kthread_create(kthread_fn, NULL, "hwlatd");
369364
if (IS_ERR(kthread)) {
370365
pr_err(BANNER "could not start sampling thread\n");
371366
return -ENOMEM;
372367
}
368+
369+
cpumask_clear(current_mask);
370+
cpumask_set_cpu(next_cpu, current_mask);
371+
sched_setaffinity(kthread->pid, current_mask);
372+
373373
hwlat_kthread = kthread;
374374
wake_up_process(kthread);
375375

0 commit comments

Comments
 (0)