Skip to content

Commit 2161573

Browse files
Davidlohr Buesotorvalds
authored andcommitted
sched/deadline: replace earliest dl and rq leftmost caching
... with the generic rbtree flavor instead. No changes in semantics whatsoever. Link: http://lkml.kernel.org/r/20170719014603.19029-9-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent bfb0688 commit 2161573

2 files changed

Lines changed: 21 additions & 35 deletions

File tree

kernel/sched/deadline.c

Lines changed: 19 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
296296
{
297297
struct sched_dl_entity *dl_se = &p->dl;
298298

299-
return dl_rq->rb_leftmost == &dl_se->rb_node;
299+
return dl_rq->root.rb_leftmost == &dl_se->rb_node;
300300
}
301301

302302
void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
@@ -320,15 +320,15 @@ void init_dl_bw(struct dl_bw *dl_b)
320320

321321
void init_dl_rq(struct dl_rq *dl_rq)
322322
{
323-
dl_rq->rb_root = RB_ROOT;
323+
dl_rq->root = RB_ROOT_CACHED;
324324

325325
#ifdef CONFIG_SMP
326326
/* zero means no -deadline tasks */
327327
dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
328328

329329
dl_rq->dl_nr_migratory = 0;
330330
dl_rq->overloaded = 0;
331-
dl_rq->pushable_dl_tasks_root = RB_ROOT;
331+
dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
332332
#else
333333
init_dl_bw(&dl_rq->dl_bw);
334334
#endif
@@ -410,10 +410,10 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
410410
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
411411
{
412412
struct dl_rq *dl_rq = &rq->dl;
413-
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
413+
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
414414
struct rb_node *parent = NULL;
415415
struct task_struct *entry;
416-
int leftmost = 1;
416+
bool leftmost = true;
417417

418418
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
419419

@@ -425,17 +425,16 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
425425
link = &parent->rb_left;
426426
else {
427427
link = &parent->rb_right;
428-
leftmost = 0;
428+
leftmost = false;
429429
}
430430
}
431431

432-
if (leftmost) {
433-
dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
432+
if (leftmost)
434433
dl_rq->earliest_dl.next = p->dl.deadline;
435-
}
436434

437435
rb_link_node(&p->pushable_dl_tasks, parent, link);
438-
rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
436+
rb_insert_color_cached(&p->pushable_dl_tasks,
437+
&dl_rq->pushable_dl_tasks_root, leftmost);
439438
}
440439

441440
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
@@ -445,24 +444,23 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
445444
if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
446445
return;
447446

448-
if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
447+
if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
449448
struct rb_node *next_node;
450449

451450
next_node = rb_next(&p->pushable_dl_tasks);
452-
dl_rq->pushable_dl_tasks_leftmost = next_node;
453451
if (next_node) {
454452
dl_rq->earliest_dl.next = rb_entry(next_node,
455453
struct task_struct, pushable_dl_tasks)->dl.deadline;
456454
}
457455
}
458456

459-
rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
457+
rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
460458
RB_CLEAR_NODE(&p->pushable_dl_tasks);
461459
}
462460

463461
static inline int has_pushable_dl_tasks(struct rq *rq)
464462
{
465-
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
463+
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
466464
}
467465

468466
static int push_dl_task(struct rq *rq);
@@ -1266,7 +1264,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
12661264
dl_rq->earliest_dl.next = 0;
12671265
cpudl_clear(&rq->rd->cpudl, rq->cpu);
12681266
} else {
1269-
struct rb_node *leftmost = dl_rq->rb_leftmost;
1267+
struct rb_node *leftmost = dl_rq->root.rb_leftmost;
12701268
struct sched_dl_entity *entry;
12711269

12721270
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
@@ -1313,7 +1311,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
13131311
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
13141312
{
13151313
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1316-
struct rb_node **link = &dl_rq->rb_root.rb_node;
1314+
struct rb_node **link = &dl_rq->root.rb_root.rb_node;
13171315
struct rb_node *parent = NULL;
13181316
struct sched_dl_entity *entry;
13191317
int leftmost = 1;
@@ -1331,11 +1329,8 @@ static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
13311329
}
13321330
}
13331331

1334-
if (leftmost)
1335-
dl_rq->rb_leftmost = &dl_se->rb_node;
1336-
13371332
rb_link_node(&dl_se->rb_node, parent, link);
1338-
rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
1333+
rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
13391334

13401335
inc_dl_tasks(dl_se, dl_rq);
13411336
}
@@ -1347,14 +1342,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
13471342
if (RB_EMPTY_NODE(&dl_se->rb_node))
13481343
return;
13491344

1350-
if (dl_rq->rb_leftmost == &dl_se->rb_node) {
1351-
struct rb_node *next_node;
1352-
1353-
next_node = rb_next(&dl_se->rb_node);
1354-
dl_rq->rb_leftmost = next_node;
1355-
}
1356-
1357-
rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
1345+
rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
13581346
RB_CLEAR_NODE(&dl_se->rb_node);
13591347

13601348
dec_dl_tasks(dl_se, dl_rq);
@@ -1647,7 +1635,7 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
16471635
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
16481636
struct dl_rq *dl_rq)
16491637
{
1650-
struct rb_node *left = dl_rq->rb_leftmost;
1638+
struct rb_node *left = rb_first_cached(&dl_rq->root);
16511639

16521640
if (!left)
16531641
return NULL;
@@ -1771,7 +1759,7 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
17711759
*/
17721760
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
17731761
{
1774-
struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1762+
struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
17751763
struct task_struct *p = NULL;
17761764

17771765
if (!has_pushable_dl_tasks(rq))
@@ -1945,7 +1933,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
19451933
if (!has_pushable_dl_tasks(rq))
19461934
return NULL;
19471935

1948-
p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1936+
p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
19491937
struct task_struct, pushable_dl_tasks);
19501938

19511939
BUG_ON(rq->cpu != task_cpu(p));

kernel/sched/sched.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -549,8 +549,7 @@ struct rt_rq {
549549
/* Deadline class' related fields in a runqueue */
550550
struct dl_rq {
551551
/* runqueue is an rbtree, ordered by deadline */
552-
struct rb_root rb_root;
553-
struct rb_node *rb_leftmost;
552+
struct rb_root_cached root;
554553

555554
unsigned long dl_nr_running;
556555

@@ -574,8 +573,7 @@ struct dl_rq {
574573
* an rb-tree, ordered by tasks' deadlines, with caching
575574
* of the leftmost (earliest deadline) element.
576575
*/
577-
struct rb_root pushable_dl_tasks_root;
578-
struct rb_node *pushable_dl_tasks_leftmost;
576+
struct rb_root_cached pushable_dl_tasks_root;
579577
#else
580578
struct dl_bw dl_bw;
581579
#endif

0 commit comments

Comments
 (0)