Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0
2 : : /*
3 : : * Deadline Scheduling Class (SCHED_DEADLINE)
4 : : *
5 : : * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 : : *
7 : : * Tasks that periodically executes their instances for less than their
8 : : * runtime won't miss any of their deadlines.
9 : : * Tasks that are not periodic or sporadic or that tries to execute more
10 : : * than their reserved bandwidth will be slowed down (and may potentially
11 : : * miss some of their deadlines), and won't affect any other task.
12 : : *
13 : : * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 : : * Juri Lelli <juri.lelli@gmail.com>,
15 : : * Michael Trimarchi <michael@amarulasolutions.com>,
16 : : * Fabio Checconi <fchecconi@gmail.com>
17 : : */
18 : : #include "sched.h"
19 : : #include "pelt.h"
20 : :
21 : : struct dl_bandwidth def_dl_bandwidth;
22 : :
23 : : static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 : : {
25 : 0 : return container_of(dl_se, struct task_struct, dl);
26 : : }
27 : :
28 : : static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 : : {
30 : 0 : return container_of(dl_rq, struct rq, dl);
31 : : }
32 : :
33 : : static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 : : {
35 : : struct task_struct *p = dl_task_of(dl_se);
36 : 0 : struct rq *rq = task_rq(p);
37 : :
38 : 0 : return &rq->dl;
39 : : }
40 : :
41 : : static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 : : {
43 : 3 : return !RB_EMPTY_NODE(&dl_se->rb_node);
44 : : }
45 : :
46 : : #ifdef CONFIG_SMP
47 : : static inline struct dl_bw *dl_bw_of(int i)
48 : : {
49 : : RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
50 : : "sched RCU must be held");
51 : 0 : return &cpu_rq(i)->rd->dl_bw;
52 : : }
53 : :
54 : 0 : static inline int dl_bw_cpus(int i)
55 : : {
56 : 0 : struct root_domain *rd = cpu_rq(i)->rd;
57 : : int cpus = 0;
58 : :
59 : : RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
60 : : "sched RCU must be held");
61 : 0 : for_each_cpu_and(i, rd->span, cpu_active_mask)
62 : 0 : cpus++;
63 : :
64 : 0 : return cpus;
65 : : }
66 : : #else
67 : : static inline struct dl_bw *dl_bw_of(int i)
68 : : {
69 : : return &cpu_rq(i)->dl.dl_bw;
70 : : }
71 : :
72 : : static inline int dl_bw_cpus(int i)
73 : : {
74 : : return 1;
75 : : }
76 : : #endif
77 : :
78 : : static inline
79 : 0 : void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
80 : : {
81 : 0 : u64 old = dl_rq->running_bw;
82 : :
83 : : lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
84 : 0 : dl_rq->running_bw += dl_bw;
85 : 0 : SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
86 : 0 : SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
87 : : /* kick cpufreq (see the comment in kernel/sched/sched.h). */
88 : 0 : cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
89 : 0 : }
90 : :
91 : : static inline
92 : 0 : void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
93 : : {
94 : 0 : u64 old = dl_rq->running_bw;
95 : :
96 : : lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
97 : 0 : dl_rq->running_bw -= dl_bw;
98 : 0 : SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
99 : 0 : if (dl_rq->running_bw > old)
100 : 0 : dl_rq->running_bw = 0;
101 : : /* kick cpufreq (see the comment in kernel/sched/sched.h). */
102 : 0 : cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
103 : 0 : }
104 : :
105 : : static inline
106 : 0 : void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
107 : : {
108 : 0 : u64 old = dl_rq->this_bw;
109 : :
110 : : lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
111 : 0 : dl_rq->this_bw += dl_bw;
112 : 0 : SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
113 : 0 : }
114 : :
115 : : static inline
116 : 0 : void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
117 : : {
118 : 0 : u64 old = dl_rq->this_bw;
119 : :
120 : : lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
121 : 0 : dl_rq->this_bw -= dl_bw;
122 : 0 : SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
123 : 0 : if (dl_rq->this_bw > old)
124 : 0 : dl_rq->this_bw = 0;
125 : 0 : SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
126 : 0 : }
127 : :
128 : : static inline
129 : 0 : void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
130 : : {
131 : 0 : if (!dl_entity_is_special(dl_se))
132 : 0 : __add_rq_bw(dl_se->dl_bw, dl_rq);
133 : 0 : }
134 : :
135 : : static inline
136 : 0 : void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
137 : : {
138 : 0 : if (!dl_entity_is_special(dl_se))
139 : 0 : __sub_rq_bw(dl_se->dl_bw, dl_rq);
140 : 0 : }
141 : :
142 : : static inline
143 : 0 : void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144 : : {
145 : 0 : if (!dl_entity_is_special(dl_se))
146 : 0 : __add_running_bw(dl_se->dl_bw, dl_rq);
147 : 0 : }
148 : :
149 : : static inline
150 : 0 : void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
151 : : {
152 : 0 : if (!dl_entity_is_special(dl_se))
153 : 0 : __sub_running_bw(dl_se->dl_bw, dl_rq);
154 : 0 : }
155 : :
156 : 0 : void dl_change_utilization(struct task_struct *p, u64 new_bw)
157 : : {
158 : : struct rq *rq;
159 : :
160 : 0 : BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
161 : :
162 : 0 : if (task_on_rq_queued(p))
163 : 0 : return;
164 : :
165 : 0 : rq = task_rq(p);
166 : 0 : if (p->dl.dl_non_contending) {
167 : 0 : sub_running_bw(&p->dl, &rq->dl);
168 : 0 : p->dl.dl_non_contending = 0;
169 : : /*
170 : : * If the timer handler is currently running and the
171 : : * timer cannot be cancelled, inactive_task_timer()
172 : : * will see that dl_not_contending is not set, and
173 : : * will not touch the rq's active utilization,
174 : : * so we are still safe.
175 : : */
176 : 0 : if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
177 : 0 : put_task_struct(p);
178 : : }
179 : 0 : __sub_rq_bw(p->dl.dl_bw, &rq->dl);
180 : 0 : __add_rq_bw(new_bw, &rq->dl);
181 : : }
182 : :
183 : : /*
184 : : * The utilization of a task cannot be immediately removed from
185 : : * the rq active utilization (running_bw) when the task blocks.
186 : : * Instead, we have to wait for the so called "0-lag time".
187 : : *
188 : : * If a task blocks before the "0-lag time", a timer (the inactive
189 : : * timer) is armed, and running_bw is decreased when the timer
190 : : * fires.
191 : : *
192 : : * If the task wakes up again before the inactive timer fires,
193 : : * the timer is cancelled, whereas if the task wakes up after the
194 : : * inactive timer fired (and running_bw has been decreased) the
195 : : * task's utilization has to be added to running_bw again.
196 : : * A flag in the deadline scheduling entity (dl_non_contending)
197 : : * is used to avoid race conditions between the inactive timer handler
198 : : * and task wakeups.
199 : : *
200 : : * The following diagram shows how running_bw is updated. A task is
201 : : * "ACTIVE" when its utilization contributes to running_bw; an
202 : : * "ACTIVE contending" task is in the TASK_RUNNING state, while an
203 : : * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
204 : : * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
205 : : * time already passed, which does not contribute to running_bw anymore.
206 : : * +------------------+
207 : : * wakeup | ACTIVE |
208 : : * +------------------>+ contending |
209 : : * | add_running_bw | |
210 : : * | +----+------+------+
211 : : * | | ^
212 : : * | dequeue | |
213 : : * +--------+-------+ | |
214 : : * | | t >= 0-lag | | wakeup
215 : : * | INACTIVE |<---------------+ |
216 : : * | | sub_running_bw | |
217 : : * +--------+-------+ | |
218 : : * ^ | |
219 : : * | t < 0-lag | |
220 : : * | | |
221 : : * | V |
222 : : * | +----+------+------+
223 : : * | sub_running_bw | ACTIVE |
224 : : * +-------------------+ |
225 : : * inactive timer | non contending |
226 : : * fired +------------------+
227 : : *
228 : : * The task_non_contending() function is invoked when a task
229 : : * blocks, and checks if the 0-lag time already passed or
230 : : * not (in the first case, it directly updates running_bw;
231 : : * in the second case, it arms the inactive timer).
232 : : *
233 : : * The task_contending() function is invoked when a task wakes
234 : : * up, and checks if the task is still in the "ACTIVE non contending"
235 : : * state or not (in the second case, it updates running_bw).
236 : : */
237 : 0 : static void task_non_contending(struct task_struct *p)
238 : : {
239 : 0 : struct sched_dl_entity *dl_se = &p->dl;
240 : 0 : struct hrtimer *timer = &dl_se->inactive_timer;
241 : : struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
242 : : struct rq *rq = rq_of_dl_rq(dl_rq);
243 : : s64 zerolag_time;
244 : :
245 : : /*
246 : : * If this is a non-deadline task that has been boosted,
247 : : * do nothing
248 : : */
249 : 0 : if (dl_se->dl_runtime == 0)
250 : : return;
251 : :
252 : 0 : if (dl_entity_is_special(dl_se))
253 : : return;
254 : :
255 : 0 : WARN_ON(dl_se->dl_non_contending);
256 : :
257 : 0 : zerolag_time = dl_se->deadline -
258 : 0 : div64_long((dl_se->runtime * dl_se->dl_period),
259 : : dl_se->dl_runtime);
260 : :
261 : : /*
262 : : * Using relative times instead of the absolute "0-lag time"
263 : : * allows to simplify the code
264 : : */
265 : 0 : zerolag_time -= rq_clock(rq);
266 : :
267 : : /*
268 : : * If the "0-lag time" already passed, decrease the active
269 : : * utilization now, instead of starting a timer
270 : : */
271 : 0 : if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
272 : 0 : if (dl_task(p))
273 : 0 : sub_running_bw(dl_se, dl_rq);
274 : 0 : if (!dl_task(p) || p->state == TASK_DEAD) {
275 : 0 : struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
276 : :
277 : 0 : if (p->state == TASK_DEAD)
278 : 0 : sub_rq_bw(&p->dl, &rq->dl);
279 : 0 : raw_spin_lock(&dl_b->lock);
280 : 0 : __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
281 : : __dl_clear_params(p);
282 : : raw_spin_unlock(&dl_b->lock);
283 : : }
284 : :
285 : : return;
286 : : }
287 : :
288 : 0 : dl_se->dl_non_contending = 1;
289 : : get_task_struct(p);
290 : : hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
291 : : }
292 : :
293 : 0 : static void task_contending(struct sched_dl_entity *dl_se, int flags)
294 : : {
295 : : struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
296 : :
297 : : /*
298 : : * If this is a non-deadline task that has been boosted,
299 : : * do nothing
300 : : */
301 : 0 : if (dl_se->dl_runtime == 0)
302 : 0 : return;
303 : :
304 : 0 : if (flags & ENQUEUE_MIGRATED)
305 : 0 : add_rq_bw(dl_se, dl_rq);
306 : :
307 : 0 : if (dl_se->dl_non_contending) {
308 : 0 : dl_se->dl_non_contending = 0;
309 : : /*
310 : : * If the timer handler is currently running and the
311 : : * timer cannot be cancelled, inactive_task_timer()
312 : : * will see that dl_not_contending is not set, and
313 : : * will not touch the rq's active utilization,
314 : : * so we are still safe.
315 : : */
316 : 0 : if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
317 : 0 : put_task_struct(dl_task_of(dl_se));
318 : : } else {
319 : : /*
320 : : * Since "dl_non_contending" is not set, the
321 : : * task's utilization has already been removed from
322 : : * active utilization (either when the task blocked,
323 : : * when the "inactive timer" fired).
324 : : * So, add it back.
325 : : */
326 : 0 : add_running_bw(dl_se, dl_rq);
327 : : }
328 : : }
329 : :
330 : : static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
331 : : {
332 : : struct sched_dl_entity *dl_se = &p->dl;
333 : :
334 : 0 : return dl_rq->root.rb_leftmost == &dl_se->rb_node;
335 : : }
336 : :
337 : 3 : void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
338 : : {
339 : 3 : raw_spin_lock_init(&dl_b->dl_runtime_lock);
340 : 3 : dl_b->dl_period = period;
341 : 3 : dl_b->dl_runtime = runtime;
342 : 3 : }
343 : :
344 : 3 : void init_dl_bw(struct dl_bw *dl_b)
345 : : {
346 : 3 : raw_spin_lock_init(&dl_b->lock);
347 : 3 : raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
348 : 3 : if (global_rt_runtime() == RUNTIME_INF)
349 : 0 : dl_b->bw = -1;
350 : : else
351 : 3 : dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
352 : : raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
353 : 3 : dl_b->total_bw = 0;
354 : 3 : }
355 : :
356 : 3 : void init_dl_rq(struct dl_rq *dl_rq)
357 : : {
358 : 3 : dl_rq->root = RB_ROOT_CACHED;
359 : :
360 : : #ifdef CONFIG_SMP
361 : : /* zero means no -deadline tasks */
362 : 3 : dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
363 : :
364 : 3 : dl_rq->dl_nr_migratory = 0;
365 : 3 : dl_rq->overloaded = 0;
366 : 3 : dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
367 : : #else
368 : : init_dl_bw(&dl_rq->dl_bw);
369 : : #endif
370 : :
371 : 3 : dl_rq->running_bw = 0;
372 : 3 : dl_rq->this_bw = 0;
373 : 3 : init_dl_rq_bw_ratio(dl_rq);
374 : 3 : }
375 : :
376 : : #ifdef CONFIG_SMP
377 : :
378 : : static inline int dl_overloaded(struct rq *rq)
379 : : {
380 : 0 : return atomic_read(&rq->rd->dlo_count);
381 : : }
382 : :
383 : 0 : static inline void dl_set_overload(struct rq *rq)
384 : : {
385 : 0 : if (!rq->online)
386 : 0 : return;
387 : :
388 : 0 : cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
389 : : /*
390 : : * Must be visible before the overload count is
391 : : * set (as in sched_rt.c).
392 : : *
393 : : * Matched by the barrier in pull_dl_task().
394 : : */
395 : 0 : smp_wmb();
396 : 0 : atomic_inc(&rq->rd->dlo_count);
397 : : }
398 : :
399 : 0 : static inline void dl_clear_overload(struct rq *rq)
400 : : {
401 : 0 : if (!rq->online)
402 : 0 : return;
403 : :
404 : 0 : atomic_dec(&rq->rd->dlo_count);
405 : 0 : cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
406 : : }
407 : :
408 : 0 : static void update_dl_migration(struct dl_rq *dl_rq)
409 : : {
410 : 0 : if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
411 : 0 : if (!dl_rq->overloaded) {
412 : 0 : dl_set_overload(rq_of_dl_rq(dl_rq));
413 : 0 : dl_rq->overloaded = 1;
414 : : }
415 : 0 : } else if (dl_rq->overloaded) {
416 : 0 : dl_clear_overload(rq_of_dl_rq(dl_rq));
417 : 0 : dl_rq->overloaded = 0;
418 : : }
419 : 0 : }
420 : :
421 : : static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
422 : : {
423 : : struct task_struct *p = dl_task_of(dl_se);
424 : :
425 : 0 : if (p->nr_cpus_allowed > 1)
426 : 0 : dl_rq->dl_nr_migratory++;
427 : :
428 : 0 : update_dl_migration(dl_rq);
429 : : }
430 : :
431 : : static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
432 : : {
433 : : struct task_struct *p = dl_task_of(dl_se);
434 : :
435 : 0 : if (p->nr_cpus_allowed > 1)
436 : 0 : dl_rq->dl_nr_migratory--;
437 : :
438 : 0 : update_dl_migration(dl_rq);
439 : : }
440 : :
441 : : /*
442 : : * The list of pushable -deadline task is not a plist, like in
443 : : * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
444 : : */
445 : 0 : static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
446 : : {
447 : : struct dl_rq *dl_rq = &rq->dl;
448 : 0 : struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
449 : : struct rb_node *parent = NULL;
450 : : struct task_struct *entry;
451 : : bool leftmost = true;
452 : :
453 : 0 : BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
454 : :
455 : 0 : while (*link) {
456 : : parent = *link;
457 : : entry = rb_entry(parent, struct task_struct,
458 : : pushable_dl_tasks);
459 : 0 : if (dl_entity_preempt(&p->dl, &entry->dl))
460 : 0 : link = &parent->rb_left;
461 : : else {
462 : 0 : link = &parent->rb_right;
463 : : leftmost = false;
464 : : }
465 : : }
466 : :
467 : 0 : if (leftmost)
468 : 0 : dl_rq->earliest_dl.next = p->dl.deadline;
469 : :
470 : : rb_link_node(&p->pushable_dl_tasks, parent, link);
471 : : rb_insert_color_cached(&p->pushable_dl_tasks,
472 : : &dl_rq->pushable_dl_tasks_root, leftmost);
473 : 0 : }
474 : :
475 : 0 : static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
476 : : {
477 : : struct dl_rq *dl_rq = &rq->dl;
478 : :
479 : 0 : if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
480 : 0 : return;
481 : :
482 : 0 : if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
483 : : struct rb_node *next_node;
484 : :
485 : 0 : next_node = rb_next(&p->pushable_dl_tasks);
486 : 0 : if (next_node) {
487 : 0 : dl_rq->earliest_dl.next = rb_entry(next_node,
488 : 0 : struct task_struct, pushable_dl_tasks)->dl.deadline;
489 : : }
490 : : }
491 : :
492 : 0 : rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
493 : 0 : RB_CLEAR_NODE(&p->pushable_dl_tasks);
494 : : }
495 : :
496 : : static inline int has_pushable_dl_tasks(struct rq *rq)
497 : : {
498 : 0 : return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
499 : : }
500 : :
501 : : static int push_dl_task(struct rq *rq);
502 : :
503 : : static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
504 : : {
505 : : return dl_task(prev);
506 : : }
507 : :
508 : : static DEFINE_PER_CPU(struct callback_head, dl_push_head);
509 : : static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
510 : :
511 : : static void push_dl_tasks(struct rq *);
512 : : static void pull_dl_task(struct rq *);
513 : :
514 : : static inline void deadline_queue_push_tasks(struct rq *rq)
515 : : {
516 : 0 : if (!has_pushable_dl_tasks(rq))
517 : : return;
518 : :
519 : 0 : queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
520 : : }
521 : :
522 : : static inline void deadline_queue_pull_task(struct rq *rq)
523 : : {
524 : 0 : queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
525 : : }
526 : :
527 : : static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
528 : :
529 : 0 : static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
530 : : {
531 : : struct rq *later_rq = NULL;
532 : : struct dl_bw *dl_b;
533 : :
534 : 0 : later_rq = find_lock_later_rq(p, rq);
535 : 0 : if (!later_rq) {
536 : : int cpu;
537 : :
538 : : /*
539 : : * If we cannot preempt any rq, fall back to pick any
540 : : * online CPU:
541 : : */
542 : 0 : cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
543 : 0 : if (cpu >= nr_cpu_ids) {
544 : : /*
545 : : * Failed to find any suitable CPU.
546 : : * The task will never come back!
547 : : */
548 : 0 : BUG_ON(dl_bandwidth_enabled());
549 : :
550 : : /*
551 : : * If admission control is disabled we
552 : : * try a little harder to let the task
553 : : * run.
554 : : */
555 : : cpu = cpumask_any(cpu_active_mask);
556 : : }
557 : 0 : later_rq = cpu_rq(cpu);
558 : 0 : double_lock_balance(rq, later_rq);
559 : : }
560 : :
561 : 0 : if (p->dl.dl_non_contending || p->dl.dl_throttled) {
562 : : /*
563 : : * Inactive timer is armed (or callback is running, but
564 : : * waiting for us to release rq locks). In any case, when it
565 : : * will fire (or continue), it will see running_bw of this
566 : : * task migrated to later_rq (and correctly handle it).
567 : : */
568 : 0 : sub_running_bw(&p->dl, &rq->dl);
569 : 0 : sub_rq_bw(&p->dl, &rq->dl);
570 : :
571 : 0 : add_rq_bw(&p->dl, &later_rq->dl);
572 : 0 : add_running_bw(&p->dl, &later_rq->dl);
573 : : } else {
574 : 0 : sub_rq_bw(&p->dl, &rq->dl);
575 : 0 : add_rq_bw(&p->dl, &later_rq->dl);
576 : : }
577 : :
578 : : /*
579 : : * And we finally need to fixup root_domain(s) bandwidth accounting,
580 : : * since p is still hanging out in the old (now moved to default) root
581 : : * domain.
582 : : */
583 : 0 : dl_b = &rq->rd->dl_bw;
584 : 0 : raw_spin_lock(&dl_b->lock);
585 : 0 : __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
586 : : raw_spin_unlock(&dl_b->lock);
587 : :
588 : 0 : dl_b = &later_rq->rd->dl_bw;
589 : 0 : raw_spin_lock(&dl_b->lock);
590 : 0 : __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
591 : : raw_spin_unlock(&dl_b->lock);
592 : :
593 : 0 : set_task_cpu(p, later_rq->cpu);
594 : : double_unlock_balance(later_rq, rq);
595 : :
596 : 0 : return later_rq;
597 : : }
598 : :
599 : : #else
600 : :
601 : : static inline
602 : : void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
603 : : {
604 : : }
605 : :
606 : : static inline
607 : : void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
608 : : {
609 : : }
610 : :
611 : : static inline
612 : : void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
613 : : {
614 : : }
615 : :
616 : : static inline
617 : : void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
618 : : {
619 : : }
620 : :
621 : : static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
622 : : {
623 : : return false;
624 : : }
625 : :
626 : : static inline void pull_dl_task(struct rq *rq)
627 : : {
628 : : }
629 : :
630 : : static inline void deadline_queue_push_tasks(struct rq *rq)
631 : : {
632 : : }
633 : :
634 : : static inline void deadline_queue_pull_task(struct rq *rq)
635 : : {
636 : : }
637 : : #endif /* CONFIG_SMP */
638 : :
639 : : static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
640 : : static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
641 : : static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
642 : :
643 : : /*
644 : : * We are being explicitly informed that a new instance is starting,
645 : : * and this means that:
646 : : * - the absolute deadline of the entity has to be placed at
647 : : * current time + relative deadline;
648 : : * - the runtime of the entity has to be set to the maximum value.
649 : : *
650 : : * The capability of specifying such event is useful whenever a -deadline
651 : : * entity wants to (try to!) synchronize its behaviour with the scheduler's
652 : : * one, and to (try to!) reconcile itself with its own scheduling
653 : : * parameters.
654 : : */
655 : 0 : static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
656 : : {
657 : : struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
658 : : struct rq *rq = rq_of_dl_rq(dl_rq);
659 : :
660 : 0 : WARN_ON(dl_se->dl_boosted);
661 : 0 : WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
662 : :
663 : : /*
664 : : * We are racing with the deadline timer. So, do nothing because
665 : : * the deadline timer handler will take care of properly recharging
666 : : * the runtime and postponing the deadline
667 : : */
668 : 0 : if (dl_se->dl_throttled)
669 : 0 : return;
670 : :
671 : : /*
672 : : * We use the regular wall clock time to set deadlines in the
673 : : * future; in fact, we must consider execution overheads (time
674 : : * spent on hardirq context, etc.).
675 : : */
676 : 0 : dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
677 : 0 : dl_se->runtime = dl_se->dl_runtime;
678 : : }
679 : :
680 : : /*
681 : : * Pure Earliest Deadline First (EDF) scheduling does not deal with the
682 : : * possibility of a entity lasting more than what it declared, and thus
683 : : * exhausting its runtime.
684 : : *
685 : : * Here we are interested in making runtime overrun possible, but we do
686 : : * not want a entity which is misbehaving to affect the scheduling of all
687 : : * other entities.
688 : : * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
689 : : * is used, in order to confine each entity within its own bandwidth.
690 : : *
691 : : * This function deals exactly with that, and ensures that when the runtime
692 : : * of a entity is replenished, its deadline is also postponed. That ensures
693 : : * the overrunning entity can't interfere with other entity in the system and
694 : : * can't make them miss their deadlines. Reasons why this kind of overruns
695 : : * could happen are, typically, a entity voluntarily trying to overcome its
696 : : * runtime, or it just underestimated it during sched_setattr().
697 : : */
698 : 0 : static void replenish_dl_entity(struct sched_dl_entity *dl_se,
699 : : struct sched_dl_entity *pi_se)
700 : : {
701 : : struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
702 : : struct rq *rq = rq_of_dl_rq(dl_rq);
703 : :
704 : 0 : BUG_ON(pi_se->dl_runtime <= 0);
705 : :
706 : : /*
707 : : * This could be the case for a !-dl task that is boosted.
708 : : * Just go with full inherited parameters.
709 : : */
710 : 0 : if (dl_se->dl_deadline == 0) {
711 : 0 : dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
712 : 0 : dl_se->runtime = pi_se->dl_runtime;
713 : : }
714 : :
715 : 0 : if (dl_se->dl_yielded && dl_se->runtime > 0)
716 : 0 : dl_se->runtime = 0;
717 : :
718 : : /*
719 : : * We keep moving the deadline away until we get some
720 : : * available runtime for the entity. This ensures correct
721 : : * handling of situations where the runtime overrun is
722 : : * arbitrary large.
723 : : */
724 : 0 : while (dl_se->runtime <= 0) {
725 : 0 : dl_se->deadline += pi_se->dl_period;
726 : 0 : dl_se->runtime += pi_se->dl_runtime;
727 : : }
728 : :
729 : : /*
730 : : * At this point, the deadline really should be "in
731 : : * the future" with respect to rq->clock. If it's
732 : : * not, we are, for some reason, lagging too much!
733 : : * Anyway, after having warn userspace abut that,
734 : : * we still try to keep the things running by
735 : : * resetting the deadline and the budget of the
736 : : * entity.
737 : : */
738 : 0 : if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
739 : 0 : printk_deferred_once("sched: DL replenish lagged too much\n");
740 : 0 : dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
741 : 0 : dl_se->runtime = pi_se->dl_runtime;
742 : : }
743 : :
744 : 0 : if (dl_se->dl_yielded)
745 : 0 : dl_se->dl_yielded = 0;
746 : 0 : if (dl_se->dl_throttled)
747 : 0 : dl_se->dl_throttled = 0;
748 : 0 : }
749 : :
750 : : /*
751 : : * Here we check if --at time t-- an entity (which is probably being
752 : : * [re]activated or, in general, enqueued) can use its remaining runtime
753 : : * and its current deadline _without_ exceeding the bandwidth it is
754 : : * assigned (function returns true if it can't). We are in fact applying
755 : : * one of the CBS rules: when a task wakes up, if the residual runtime
756 : : * over residual deadline fits within the allocated bandwidth, then we
757 : : * can keep the current (absolute) deadline and residual budget without
758 : : * disrupting the schedulability of the system. Otherwise, we should
759 : : * refill the runtime and set the deadline a period in the future,
760 : : * because keeping the current (absolute) deadline of the task would
761 : : * result in breaking guarantees promised to other tasks (refer to
762 : : * Documentation/scheduler/sched-deadline.rst for more information).
763 : : *
764 : : * This function returns true if:
765 : : *
766 : : * runtime / (deadline - t) > dl_runtime / dl_deadline ,
767 : : *
768 : : * IOW we can't recycle current parameters.
769 : : *
770 : : * Notice that the bandwidth check is done against the deadline. For
771 : : * task with deadline equal to period this is the same of using
772 : : * dl_period instead of dl_deadline in the equation above.
773 : : */
774 : : static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
775 : : struct sched_dl_entity *pi_se, u64 t)
776 : : {
777 : : u64 left, right;
778 : :
779 : : /*
780 : : * left and right are the two sides of the equation above,
781 : : * after a bit of shuffling to use multiplications instead
782 : : * of divisions.
783 : : *
784 : : * Note that none of the time values involved in the two
785 : : * multiplications are absolute: dl_deadline and dl_runtime
786 : : * are the relative deadline and the maximum runtime of each
787 : : * instance, runtime is the runtime left for the last instance
788 : : * and (deadline - t), since t is rq->clock, is the time left
789 : : * to the (absolute) deadline. Even if overflowing the u64 type
790 : : * is very unlikely to occur in both cases, here we scale down
791 : : * as we want to avoid that risk at all. Scaling down by 10
792 : : * means that we reduce granularity to 1us. We are fine with it,
793 : : * since this is only a true/false check and, anyway, thinking
794 : : * of anything below microseconds resolution is actually fiction
795 : : * (but still we want to give the user that illusion >;).
796 : : */
797 : 0 : left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
798 : 0 : right = ((dl_se->deadline - t) >> DL_SCALE) *
799 : 0 : (pi_se->dl_runtime >> DL_SCALE);
800 : :
801 : : return dl_time_before(right, left);
802 : : }
803 : :
804 : : /*
805 : : * Revised wakeup rule [1]: For self-suspending tasks, rather then
806 : : * re-initializing task's runtime and deadline, the revised wakeup
807 : : * rule adjusts the task's runtime to avoid the task to overrun its
808 : : * density.
809 : : *
810 : : * Reasoning: a task may overrun the density if:
811 : : * runtime / (deadline - t) > dl_runtime / dl_deadline
812 : : *
813 : : * Therefore, runtime can be adjusted to:
814 : : * runtime = (dl_runtime / dl_deadline) * (deadline - t)
815 : : *
816 : : * In such way that runtime will be equal to the maximum density
817 : : * the task can use without breaking any rule.
818 : : *
819 : : * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
820 : : * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
821 : : */
822 : : static void
823 : 0 : update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
824 : : {
825 : 0 : u64 laxity = dl_se->deadline - rq_clock(rq);
826 : :
827 : : /*
828 : : * If the task has deadline < period, and the deadline is in the past,
829 : : * it should already be throttled before this check.
830 : : *
831 : : * See update_dl_entity() comments for further details.
832 : : */
833 : 0 : WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
834 : :
835 : 0 : dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
836 : 0 : }
837 : :
838 : : /*
839 : : * Regarding the deadline, a task with implicit deadline has a relative
840 : : * deadline == relative period. A task with constrained deadline has a
841 : : * relative deadline <= relative period.
842 : : *
843 : : * We support constrained deadline tasks. However, there are some restrictions
844 : : * applied only for tasks which do not have an implicit deadline. See
845 : : * update_dl_entity() to know more about such restrictions.
846 : : *
847 : : * The dl_is_implicit() returns true if the task has an implicit deadline.
848 : : */
849 : : static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
850 : : {
851 : 0 : return dl_se->dl_deadline == dl_se->dl_period;
852 : : }
853 : :
854 : : /*
855 : : * When a deadline entity is placed in the runqueue, its runtime and deadline
856 : : * might need to be updated. This is done by a CBS wake up rule. There are two
857 : : * different rules: 1) the original CBS; and 2) the Revisited CBS.
858 : : *
859 : : * When the task is starting a new period, the Original CBS is used. In this
860 : : * case, the runtime is replenished and a new absolute deadline is set.
861 : : *
862 : : * When a task is queued before the begin of the next period, using the
863 : : * remaining runtime and deadline could make the entity to overflow, see
864 : : * dl_entity_overflow() to find more about runtime overflow. When such case
865 : : * is detected, the runtime and deadline need to be updated.
866 : : *
867 : : * If the task has an implicit deadline, i.e., deadline == period, the Original
868 : : * CBS is applied. the runtime is replenished and a new absolute deadline is
869 : : * set, as in the previous cases.
870 : : *
871 : : * However, the Original CBS does not work properly for tasks with
872 : : * deadline < period, which are said to have a constrained deadline. By
873 : : * applying the Original CBS, a constrained deadline task would be able to run
874 : : * runtime/deadline in a period. With deadline < period, the task would
875 : : * overrun the runtime/period allowed bandwidth, breaking the admission test.
876 : : *
877 : : * In order to prevent this misbehave, the Revisited CBS is used for
878 : : * constrained deadline tasks when a runtime overflow is detected. In the
879 : : * Revisited CBS, rather than replenishing & setting a new absolute deadline,
880 : : * the remaining runtime of the task is reduced to avoid runtime overflow.
881 : : * Please refer to the comments update_dl_revised_wakeup() function to find
882 : : * more about the Revised CBS rule.
883 : : */
884 : 0 : static void update_dl_entity(struct sched_dl_entity *dl_se,
885 : : struct sched_dl_entity *pi_se)
886 : : {
887 : : struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
888 : : struct rq *rq = rq_of_dl_rq(dl_rq);
889 : :
890 : 0 : if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
891 : : dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
892 : :
893 : 0 : if (unlikely(!dl_is_implicit(dl_se) &&
894 : : !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
895 : : !dl_se->dl_boosted)){
896 : 0 : update_dl_revised_wakeup(dl_se, rq);
897 : 0 : return;
898 : : }
899 : :
900 : 0 : dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
901 : 0 : dl_se->runtime = pi_se->dl_runtime;
902 : : }
903 : : }
904 : :
905 : : static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
906 : : {
907 : 0 : return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
908 : : }
909 : :
910 : : /*
911 : : * If the entity depleted all its runtime, and if we want it to sleep
912 : : * while waiting for some new execution time to become available, we
913 : : * set the bandwidth replenishment timer to the replenishment instant
914 : : * and try to activate it.
915 : : *
916 : : * Notice that it is important for the caller to know if the timer
917 : : * actually started or not (i.e., the replenishment instant is in
918 : : * the future or in the past).
919 : : */
920 : 0 : static int start_dl_timer(struct task_struct *p)
921 : : {
922 : : struct sched_dl_entity *dl_se = &p->dl;
923 : 0 : struct hrtimer *timer = &dl_se->dl_timer;
924 : 0 : struct rq *rq = task_rq(p);
925 : : ktime_t now, act;
926 : : s64 delta;
927 : :
928 : : lockdep_assert_held(&rq->lock);
929 : :
930 : : /*
931 : : * We want the timer to fire at the deadline, but considering
932 : : * that it is actually coming from rq->clock and not from
933 : : * hrtimer's time base reading.
934 : : */
935 : : act = ns_to_ktime(dl_next_period(dl_se));
936 : : now = hrtimer_cb_get_time(timer);
937 : 0 : delta = ktime_to_ns(now) - rq_clock(rq);
938 : 0 : act = ktime_add_ns(act, delta);
939 : :
940 : : /*
941 : : * If the expiry time already passed, e.g., because the value
942 : : * chosen as the deadline is too small, don't even try to
943 : : * start the timer in the past!
944 : : */
945 : 0 : if (ktime_us_delta(act, now) < 0)
946 : : return 0;
947 : :
948 : : /*
949 : : * !enqueued will guarantee another callback; even if one is already in
950 : : * progress. This ensures a balanced {get,put}_task_struct().
951 : : *
952 : : * The race against __run_timer() clearing the enqueued state is
953 : : * harmless because we're holding task_rq()->lock, therefore the timer
954 : : * expiring after we've done the check will wait on its task_rq_lock()
955 : : * and observe our state.
956 : : */
957 : 0 : if (!hrtimer_is_queued(timer)) {
958 : : get_task_struct(p);
959 : : hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
960 : : }
961 : :
962 : : return 1;
963 : : }
964 : :
965 : : /*
966 : : * This is the bandwidth enforcement timer callback. If here, we know
967 : : * a task is not on its dl_rq, since the fact that the timer was running
968 : : * means the task is throttled and needs a runtime replenishment.
969 : : *
970 : : * However, what we actually do depends on the fact the task is active,
971 : : * (it is on its rq) or has been removed from there by a call to
972 : : * dequeue_task_dl(). In the former case we must issue the runtime
973 : : * replenishment and add the task back to the dl_rq; in the latter, we just
974 : : * do nothing but clearing dl_throttled, so that runtime and deadline
975 : : * updating (and the queueing back to dl_rq) will be done by the
976 : : * next call to enqueue_task_dl().
977 : : */
978 : 0 : static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
979 : : {
980 : 0 : struct sched_dl_entity *dl_se = container_of(timer,
981 : : struct sched_dl_entity,
982 : : dl_timer);
983 : : struct task_struct *p = dl_task_of(dl_se);
984 : : struct rq_flags rf;
985 : : struct rq *rq;
986 : :
987 : 0 : rq = task_rq_lock(p, &rf);
988 : :
989 : : /*
990 : : * The task might have changed its scheduling policy to something
991 : : * different than SCHED_DEADLINE (through switched_from_dl()).
992 : : */
993 : 0 : if (!dl_task(p))
994 : : goto unlock;
995 : :
996 : : /*
997 : : * The task might have been boosted by someone else and might be in the
998 : : * boosting/deboosting path, its not throttled.
999 : : */
1000 : 0 : if (dl_se->dl_boosted)
1001 : : goto unlock;
1002 : :
1003 : : /*
1004 : : * Spurious timer due to start_dl_timer() race; or we already received
1005 : : * a replenishment from rt_mutex_setprio().
1006 : : */
1007 : 0 : if (!dl_se->dl_throttled)
1008 : : goto unlock;
1009 : :
1010 : : sched_clock_tick();
1011 : 0 : update_rq_clock(rq);
1012 : :
1013 : : /*
1014 : : * If the throttle happened during sched-out; like:
1015 : : *
1016 : : * schedule()
1017 : : * deactivate_task()
1018 : : * dequeue_task_dl()
1019 : : * update_curr_dl()
1020 : : * start_dl_timer()
1021 : : * __dequeue_task_dl()
1022 : : * prev->on_rq = 0;
1023 : : *
1024 : : * We can be both throttled and !queued. Replenish the counter
1025 : : * but do not enqueue -- wait for our wakeup to do that.
1026 : : */
1027 : 0 : if (!task_on_rq_queued(p)) {
1028 : 0 : replenish_dl_entity(dl_se, dl_se);
1029 : 0 : goto unlock;
1030 : : }
1031 : :
1032 : : #ifdef CONFIG_SMP
1033 : 0 : if (unlikely(!rq->online)) {
1034 : : /*
1035 : : * If the runqueue is no longer available, migrate the
1036 : : * task elsewhere. This necessarily changes rq.
1037 : : */
1038 : : lockdep_unpin_lock(&rq->lock, rf.cookie);
1039 : 0 : rq = dl_task_offline_migration(rq, p);
1040 : : rf.cookie = lockdep_pin_lock(&rq->lock);
1041 : 0 : update_rq_clock(rq);
1042 : :
1043 : : /*
1044 : : * Now that the task has been migrated to the new RQ and we
1045 : : * have that locked, proceed as normal and enqueue the task
1046 : : * there.
1047 : : */
1048 : : }
1049 : : #endif
1050 : :
1051 : 0 : enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1052 : 0 : if (dl_task(rq->curr))
1053 : 0 : check_preempt_curr_dl(rq, p, 0);
1054 : : else
1055 : 0 : resched_curr(rq);
1056 : :
1057 : : #ifdef CONFIG_SMP
1058 : : /*
1059 : : * Queueing this task back might have overloaded rq, check if we need
1060 : : * to kick someone away.
1061 : : */
1062 : 0 : if (has_pushable_dl_tasks(rq)) {
1063 : : /*
1064 : : * Nothing relies on rq->lock after this, so its safe to drop
1065 : : * rq->lock.
1066 : : */
1067 : : rq_unpin_lock(rq, &rf);
1068 : 0 : push_dl_task(rq);
1069 : : rq_repin_lock(rq, &rf);
1070 : : }
1071 : : #endif
1072 : :
1073 : : unlock:
1074 : 0 : task_rq_unlock(rq, p, &rf);
1075 : :
1076 : : /*
1077 : : * This can free the task_struct, including this hrtimer, do not touch
1078 : : * anything related to that after this.
1079 : : */
1080 : 0 : put_task_struct(p);
1081 : :
1082 : 0 : return HRTIMER_NORESTART;
1083 : : }
1084 : :
1085 : 3 : void init_dl_task_timer(struct sched_dl_entity *dl_se)
1086 : : {
1087 : 3 : struct hrtimer *timer = &dl_se->dl_timer;
1088 : :
1089 : 3 : hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1090 : 3 : timer->function = dl_task_timer;
1091 : 3 : }
1092 : :
1093 : : /*
1094 : : * During the activation, CBS checks if it can reuse the current task's
1095 : : * runtime and period. If the deadline of the task is in the past, CBS
1096 : : * cannot use the runtime, and so it replenishes the task. This rule
1097 : : * works fine for implicit deadline tasks (deadline == period), and the
1098 : : * CBS was designed for implicit deadline tasks. However, a task with
1099 : : * constrained deadline (deadine < period) might be awakened after the
1100 : : * deadline, but before the next period. In this case, replenishing the
1101 : : * task would allow it to run for runtime / deadline. As in this case
1102 : : * deadline < period, CBS enables a task to run for more than the
1103 : : * runtime / period. In a very loaded system, this can cause a domino
1104 : : * effect, making other tasks miss their deadlines.
1105 : : *
1106 : : * To avoid this problem, in the activation of a constrained deadline
1107 : : * task after the deadline but before the next period, throttle the
1108 : : * task and set the replenishing timer to the begin of the next period,
1109 : : * unless it is boosted.
1110 : : */
1111 : 0 : static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1112 : : {
1113 : : struct task_struct *p = dl_task_of(dl_se);
1114 : : struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1115 : :
1116 : 0 : if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1117 : : dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1118 : 0 : if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1119 : 0 : return;
1120 : 0 : dl_se->dl_throttled = 1;
1121 : 0 : if (dl_se->runtime > 0)
1122 : 0 : dl_se->runtime = 0;
1123 : : }
1124 : : }
1125 : :
1126 : : static
1127 : : int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1128 : : {
1129 : 0 : return (dl_se->runtime <= 0);
1130 : : }
1131 : :
1132 : : extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1133 : :
1134 : : /*
1135 : : * This function implements the GRUB accounting rule:
1136 : : * according to the GRUB reclaiming algorithm, the runtime is
1137 : : * not decreased as "dq = -dt", but as
1138 : : * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1139 : : * where u is the utilization of the task, Umax is the maximum reclaimable
1140 : : * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1141 : : * as the difference between the "total runqueue utilization" and the
1142 : : * runqueue active utilization, and Uextra is the (per runqueue) extra
1143 : : * reclaimable utilization.
1144 : : * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1145 : : * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1146 : : * BW_SHIFT.
1147 : : * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1148 : : * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1149 : : * Since delta is a 64 bit variable, to have an overflow its value
1150 : : * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1151 : : * So, overflow is not an issue here.
1152 : : */
1153 : : static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1154 : : {
1155 : 0 : u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1156 : : u64 u_act;
1157 : 0 : u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1158 : :
1159 : : /*
1160 : : * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1161 : : * we compare u_inact + rq->dl.extra_bw with
1162 : : * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1163 : : * u_inact + rq->dl.extra_bw can be larger than
1164 : : * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1165 : : * leading to wrong results)
1166 : : */
1167 : 0 : if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1168 : : u_act = u_act_min;
1169 : : else
1170 : 0 : u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1171 : :
1172 : 0 : return (delta * u_act) >> BW_SHIFT;
1173 : : }
1174 : :
1175 : : /*
1176 : : * Update the current task's runtime statistics (provided it is still
1177 : : * a -deadline task and has not been removed from the dl_rq).
1178 : : */
1179 : 0 : static void update_curr_dl(struct rq *rq)
1180 : : {
1181 : 0 : struct task_struct *curr = rq->curr;
1182 : : struct sched_dl_entity *dl_se = &curr->dl;
1183 : : u64 delta_exec, scaled_delta_exec;
1184 : : int cpu = cpu_of(rq);
1185 : : u64 now;
1186 : :
1187 : 0 : if (!dl_task(curr) || !on_dl_rq(dl_se))
1188 : : return;
1189 : :
1190 : : /*
1191 : : * Consumed budget is computed considering the time as
1192 : : * observed by schedulable tasks (excluding time spent
1193 : : * in hardirq context, etc.). Deadlines are instead
1194 : : * computed using hard walltime. This seems to be the more
1195 : : * natural solution, but the full ramifications of this
1196 : : * approach need further study.
1197 : : */
1198 : : now = rq_clock_task(rq);
1199 : 0 : delta_exec = now - curr->se.exec_start;
1200 : 0 : if (unlikely((s64)delta_exec <= 0)) {
1201 : 0 : if (unlikely(dl_se->dl_yielded))
1202 : : goto throttle;
1203 : : return;
1204 : : }
1205 : :
1206 : 0 : schedstat_set(curr->se.statistics.exec_max,
1207 : : max(curr->se.statistics.exec_max, delta_exec));
1208 : :
1209 : 0 : curr->se.sum_exec_runtime += delta_exec;
1210 : 0 : account_group_exec_runtime(curr, delta_exec);
1211 : :
1212 : 0 : curr->se.exec_start = now;
1213 : 0 : cgroup_account_cputime(curr, delta_exec);
1214 : :
1215 : 0 : if (dl_entity_is_special(dl_se))
1216 : : return;
1217 : :
1218 : : /*
1219 : : * For tasks that participate in GRUB, we implement GRUB-PA: the
1220 : : * spare reclaimed bandwidth is used to clock down frequency.
1221 : : *
1222 : : * For the others, we still need to scale reservation parameters
1223 : : * according to current frequency and CPU maximum capacity.
1224 : : */
1225 : 0 : if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1226 : : scaled_delta_exec = grub_reclaim(delta_exec,
1227 : : rq,
1228 : : &curr->dl);
1229 : : } else {
1230 : : unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1231 : : unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1232 : :
1233 : 0 : scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1234 : 0 : scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1235 : : }
1236 : :
1237 : 0 : dl_se->runtime -= scaled_delta_exec;
1238 : :
1239 : : throttle:
1240 : 0 : if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1241 : 0 : dl_se->dl_throttled = 1;
1242 : :
1243 : : /* If requested, inform the user about runtime overruns. */
1244 : 0 : if (dl_runtime_exceeded(dl_se) &&
1245 : 0 : (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1246 : 0 : dl_se->dl_overrun = 1;
1247 : :
1248 : : __dequeue_task_dl(rq, curr, 0);
1249 : 0 : if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1250 : 0 : enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1251 : :
1252 : 0 : if (!is_leftmost(curr, &rq->dl))
1253 : 0 : resched_curr(rq);
1254 : : }
1255 : :
1256 : : /*
1257 : : * Because -- for now -- we share the rt bandwidth, we need to
1258 : : * account our runtime there too, otherwise actual rt tasks
1259 : : * would be able to exceed the shared quota.
1260 : : *
1261 : : * Account to the root rt group for now.
1262 : : *
1263 : : * The solution we're working towards is having the RT groups scheduled
1264 : : * using deadline servers -- however there's a few nasties to figure
1265 : : * out before that can happen.
1266 : : */
1267 : 0 : if (rt_bandwidth_enabled()) {
1268 : 0 : struct rt_rq *rt_rq = &rq->rt;
1269 : :
1270 : 0 : raw_spin_lock(&rt_rq->rt_runtime_lock);
1271 : : /*
1272 : : * We'll let actual RT tasks worry about the overflow here, we
1273 : : * have our own CBS to keep us inline; only account when RT
1274 : : * bandwidth is relevant.
1275 : : */
1276 : 0 : if (sched_rt_bandwidth_account(rt_rq))
1277 : 0 : rt_rq->rt_time += delta_exec;
1278 : : raw_spin_unlock(&rt_rq->rt_runtime_lock);
1279 : : }
1280 : : }
1281 : :
1282 : 0 : static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1283 : : {
1284 : 0 : struct sched_dl_entity *dl_se = container_of(timer,
1285 : : struct sched_dl_entity,
1286 : : inactive_timer);
1287 : : struct task_struct *p = dl_task_of(dl_se);
1288 : : struct rq_flags rf;
1289 : : struct rq *rq;
1290 : :
1291 : 0 : rq = task_rq_lock(p, &rf);
1292 : :
1293 : : sched_clock_tick();
1294 : 0 : update_rq_clock(rq);
1295 : :
1296 : 0 : if (!dl_task(p) || p->state == TASK_DEAD) {
1297 : 0 : struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1298 : :
1299 : 0 : if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1300 : 0 : sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1301 : 0 : sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1302 : 0 : dl_se->dl_non_contending = 0;
1303 : : }
1304 : :
1305 : 0 : raw_spin_lock(&dl_b->lock);
1306 : 0 : __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1307 : : raw_spin_unlock(&dl_b->lock);
1308 : : __dl_clear_params(p);
1309 : :
1310 : : goto unlock;
1311 : : }
1312 : 0 : if (dl_se->dl_non_contending == 0)
1313 : : goto unlock;
1314 : :
1315 : 0 : sub_running_bw(dl_se, &rq->dl);
1316 : 0 : dl_se->dl_non_contending = 0;
1317 : : unlock:
1318 : 0 : task_rq_unlock(rq, p, &rf);
1319 : 0 : put_task_struct(p);
1320 : :
1321 : 0 : return HRTIMER_NORESTART;
1322 : : }
1323 : :
1324 : 3 : void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1325 : : {
1326 : 3 : struct hrtimer *timer = &dl_se->inactive_timer;
1327 : :
1328 : 3 : hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1329 : 3 : timer->function = inactive_task_timer;
1330 : 3 : }
1331 : :
1332 : : #ifdef CONFIG_SMP
1333 : :
1334 : 0 : static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1335 : : {
1336 : : struct rq *rq = rq_of_dl_rq(dl_rq);
1337 : :
1338 : 0 : if (dl_rq->earliest_dl.curr == 0 ||
1339 : : dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1340 : 0 : dl_rq->earliest_dl.curr = deadline;
1341 : 0 : cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1342 : : }
1343 : 0 : }
1344 : :
1345 : 0 : static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1346 : : {
1347 : : struct rq *rq = rq_of_dl_rq(dl_rq);
1348 : :
1349 : : /*
1350 : : * Since we may have removed our earliest (and/or next earliest)
1351 : : * task we must recompute them.
1352 : : */
1353 : 0 : if (!dl_rq->dl_nr_running) {
1354 : 0 : dl_rq->earliest_dl.curr = 0;
1355 : 0 : dl_rq->earliest_dl.next = 0;
1356 : 0 : cpudl_clear(&rq->rd->cpudl, rq->cpu);
1357 : : } else {
1358 : 0 : struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1359 : : struct sched_dl_entity *entry;
1360 : :
1361 : : entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1362 : 0 : dl_rq->earliest_dl.curr = entry->deadline;
1363 : 0 : cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1364 : : }
1365 : 0 : }
1366 : :
1367 : : #else
1368 : :
1369 : : static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1370 : : static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1371 : :
1372 : : #endif /* CONFIG_SMP */
1373 : :
1374 : : static inline
1375 : 0 : void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1376 : : {
1377 : 0 : int prio = dl_task_of(dl_se)->prio;
1378 : 0 : u64 deadline = dl_se->deadline;
1379 : :
1380 : 0 : WARN_ON(!dl_prio(prio));
1381 : 0 : dl_rq->dl_nr_running++;
1382 : : add_nr_running(rq_of_dl_rq(dl_rq), 1);
1383 : :
1384 : 0 : inc_dl_deadline(dl_rq, deadline);
1385 : : inc_dl_migration(dl_se, dl_rq);
1386 : 0 : }
1387 : :
1388 : : static inline
1389 : 0 : void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1390 : : {
1391 : 0 : int prio = dl_task_of(dl_se)->prio;
1392 : :
1393 : 0 : WARN_ON(!dl_prio(prio));
1394 : 0 : WARN_ON(!dl_rq->dl_nr_running);
1395 : 0 : dl_rq->dl_nr_running--;
1396 : : sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1397 : :
1398 : 0 : dec_dl_deadline(dl_rq, dl_se->deadline);
1399 : : dec_dl_migration(dl_se, dl_rq);
1400 : 0 : }
1401 : :
1402 : 0 : static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1403 : : {
1404 : : struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1405 : 0 : struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1406 : : struct rb_node *parent = NULL;
1407 : : struct sched_dl_entity *entry;
1408 : : int leftmost = 1;
1409 : :
1410 : 0 : BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1411 : :
1412 : 0 : while (*link) {
1413 : : parent = *link;
1414 : : entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1415 : 0 : if (dl_time_before(dl_se->deadline, entry->deadline))
1416 : 0 : link = &parent->rb_left;
1417 : : else {
1418 : 0 : link = &parent->rb_right;
1419 : : leftmost = 0;
1420 : : }
1421 : : }
1422 : :
1423 : : rb_link_node(&dl_se->rb_node, parent, link);
1424 : : rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1425 : :
1426 : 0 : inc_dl_tasks(dl_se, dl_rq);
1427 : 0 : }
1428 : :
1429 : 0 : static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1430 : : {
1431 : : struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1432 : :
1433 : 0 : if (RB_EMPTY_NODE(&dl_se->rb_node))
1434 : 0 : return;
1435 : :
1436 : 0 : rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1437 : 0 : RB_CLEAR_NODE(&dl_se->rb_node);
1438 : :
1439 : 0 : dec_dl_tasks(dl_se, dl_rq);
1440 : : }
1441 : :
1442 : : static void
1443 : 0 : enqueue_dl_entity(struct sched_dl_entity *dl_se,
1444 : : struct sched_dl_entity *pi_se, int flags)
1445 : : {
1446 : 0 : BUG_ON(on_dl_rq(dl_se));
1447 : :
1448 : : /*
1449 : : * If this is a wakeup or a new instance, the scheduling
1450 : : * parameters of the task might need updating. Otherwise,
1451 : : * we want a replenishment of its runtime.
1452 : : */
1453 : 0 : if (flags & ENQUEUE_WAKEUP) {
1454 : 0 : task_contending(dl_se, flags);
1455 : 0 : update_dl_entity(dl_se, pi_se);
1456 : 0 : } else if (flags & ENQUEUE_REPLENISH) {
1457 : 0 : replenish_dl_entity(dl_se, pi_se);
1458 : 0 : } else if ((flags & ENQUEUE_RESTORE) &&
1459 : 0 : dl_time_before(dl_se->deadline,
1460 : : rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1461 : 0 : setup_new_dl_entity(dl_se);
1462 : : }
1463 : :
1464 : 0 : __enqueue_dl_entity(dl_se);
1465 : 0 : }
1466 : :
1467 : : static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1468 : : {
1469 : 0 : __dequeue_dl_entity(dl_se);
1470 : : }
1471 : :
1472 : 0 : static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1473 : : {
1474 : : struct task_struct *pi_task = rt_mutex_get_top_task(p);
1475 : 0 : struct sched_dl_entity *pi_se = &p->dl;
1476 : :
1477 : : /*
1478 : : * Use the scheduling parameters of the top pi-waiter task if:
1479 : : * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1480 : : * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1481 : : * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1482 : : * boosted due to a SCHED_DEADLINE pi-waiter).
1483 : : * Otherwise we keep our runtime and deadline.
1484 : : */
1485 : 0 : if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1486 : 0 : pi_se = &pi_task->dl;
1487 : 0 : } else if (!dl_prio(p->normal_prio)) {
1488 : : /*
1489 : : * Special case in which we have a !SCHED_DEADLINE task
1490 : : * that is going to be deboosted, but exceeds its
1491 : : * runtime while doing so. No point in replenishing
1492 : : * it, as it's going to return back to its original
1493 : : * scheduling class after this.
1494 : : */
1495 : 0 : BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1496 : : return;
1497 : : }
1498 : :
1499 : : /*
1500 : : * Check if a constrained deadline task was activated
1501 : : * after the deadline but before the next period.
1502 : : * If that is the case, the task will be throttled and
1503 : : * the replenishment timer will be set to the next period.
1504 : : */
1505 : 0 : if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1506 : 0 : dl_check_constrained_dl(&p->dl);
1507 : :
1508 : 0 : if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1509 : 0 : add_rq_bw(&p->dl, &rq->dl);
1510 : 0 : add_running_bw(&p->dl, &rq->dl);
1511 : : }
1512 : :
1513 : : /*
1514 : : * If p is throttled, we do not enqueue it. In fact, if it exhausted
1515 : : * its budget it needs a replenishment and, since it now is on
1516 : : * its rq, the bandwidth timer callback (which clearly has not
1517 : : * run yet) will take care of this.
1518 : : * However, the active utilization does not depend on the fact
1519 : : * that the task is on the runqueue or not (but depends on the
1520 : : * task's state - in GRUB parlance, "inactive" vs "active contending").
1521 : : * In other words, even if a task is throttled its utilization must
1522 : : * be counted in the active utilization; hence, we need to call
1523 : : * add_running_bw().
1524 : : */
1525 : 0 : if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1526 : 0 : if (flags & ENQUEUE_WAKEUP)
1527 : 0 : task_contending(&p->dl, flags);
1528 : :
1529 : : return;
1530 : : }
1531 : :
1532 : 0 : enqueue_dl_entity(&p->dl, pi_se, flags);
1533 : :
1534 : 0 : if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1535 : 0 : enqueue_pushable_dl_task(rq, p);
1536 : : }
1537 : :
1538 : : static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1539 : : {
1540 : 0 : dequeue_dl_entity(&p->dl);
1541 : 0 : dequeue_pushable_dl_task(rq, p);
1542 : : }
1543 : :
1544 : 0 : static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1545 : : {
1546 : 0 : update_curr_dl(rq);
1547 : : __dequeue_task_dl(rq, p, flags);
1548 : :
1549 : 0 : if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1550 : 0 : sub_running_bw(&p->dl, &rq->dl);
1551 : 0 : sub_rq_bw(&p->dl, &rq->dl);
1552 : : }
1553 : :
1554 : : /*
1555 : : * This check allows to start the inactive timer (or to immediately
1556 : : * decrease the active utilization, if needed) in two cases:
1557 : : * when the task blocks and when it is terminating
1558 : : * (p->state == TASK_DEAD). We can handle the two cases in the same
1559 : : * way, because from GRUB's point of view the same thing is happening
1560 : : * (the task moves from "active contending" to "active non contending"
1561 : : * or "inactive")
1562 : : */
1563 : 0 : if (flags & DEQUEUE_SLEEP)
1564 : 0 : task_non_contending(p);
1565 : 0 : }
1566 : :
1567 : : /*
1568 : : * Yield task semantic for -deadline tasks is:
1569 : : *
1570 : : * get off from the CPU until our next instance, with
1571 : : * a new runtime. This is of little use now, since we
1572 : : * don't have a bandwidth reclaiming mechanism. Anyway,
1573 : : * bandwidth reclaiming is planned for the future, and
1574 : : * yield_task_dl will indicate that some spare budget
1575 : : * is available for other task instances to use it.
1576 : : */
1577 : 0 : static void yield_task_dl(struct rq *rq)
1578 : : {
1579 : : /*
1580 : : * We make the task go to sleep until its current deadline by
1581 : : * forcing its runtime to zero. This way, update_curr_dl() stops
1582 : : * it and the bandwidth timer will wake it up and will give it
1583 : : * new scheduling parameters (thanks to dl_yielded=1).
1584 : : */
1585 : 0 : rq->curr->dl.dl_yielded = 1;
1586 : :
1587 : 0 : update_rq_clock(rq);
1588 : 0 : update_curr_dl(rq);
1589 : : /*
1590 : : * Tell update_rq_clock() that we've just updated,
1591 : : * so we don't do microscopic update in schedule()
1592 : : * and double the fastpath cost.
1593 : : */
1594 : : rq_clock_skip_update(rq);
1595 : 0 : }
1596 : :
1597 : : #ifdef CONFIG_SMP
1598 : :
1599 : : static int find_later_rq(struct task_struct *task);
1600 : :
1601 : : static int
1602 : 0 : select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1603 : : {
1604 : : struct task_struct *curr;
1605 : : struct rq *rq;
1606 : :
1607 : 0 : if (sd_flag != SD_BALANCE_WAKE)
1608 : : goto out;
1609 : :
1610 : 0 : rq = cpu_rq(cpu);
1611 : :
1612 : : rcu_read_lock();
1613 : 0 : curr = READ_ONCE(rq->curr); /* unlocked access */
1614 : :
1615 : : /*
1616 : : * If we are dealing with a -deadline task, we must
1617 : : * decide where to wake it up.
1618 : : * If it has a later deadline and the current task
1619 : : * on this rq can't move (provided the waking task
1620 : : * can!) we prefer to send it somewhere else. On the
1621 : : * other hand, if it has a shorter deadline, we
1622 : : * try to make it stay here, it might be important.
1623 : : */
1624 : 0 : if (unlikely(dl_task(curr)) &&
1625 : 0 : (curr->nr_cpus_allowed < 2 ||
1626 : 0 : !dl_entity_preempt(&p->dl, &curr->dl)) &&
1627 : 0 : (p->nr_cpus_allowed > 1)) {
1628 : 0 : int target = find_later_rq(p);
1629 : :
1630 : 0 : if (target != -1 &&
1631 : 0 : (dl_time_before(p->dl.deadline,
1632 : 0 : cpu_rq(target)->dl.earliest_dl.curr) ||
1633 : 0 : (cpu_rq(target)->dl.dl_nr_running == 0)))
1634 : : cpu = target;
1635 : : }
1636 : : rcu_read_unlock();
1637 : :
1638 : : out:
1639 : 0 : return cpu;
1640 : : }
1641 : :
1642 : 0 : static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1643 : : {
1644 : : struct rq *rq;
1645 : :
1646 : 0 : if (p->state != TASK_WAKING)
1647 : 0 : return;
1648 : :
1649 : 0 : rq = task_rq(p);
1650 : : /*
1651 : : * Since p->state == TASK_WAKING, set_task_cpu() has been called
1652 : : * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1653 : : * rq->lock is not... So, lock it
1654 : : */
1655 : 0 : raw_spin_lock(&rq->lock);
1656 : 0 : if (p->dl.dl_non_contending) {
1657 : 0 : sub_running_bw(&p->dl, &rq->dl);
1658 : 0 : p->dl.dl_non_contending = 0;
1659 : : /*
1660 : : * If the timer handler is currently running and the
1661 : : * timer cannot be cancelled, inactive_task_timer()
1662 : : * will see that dl_not_contending is not set, and
1663 : : * will not touch the rq's active utilization,
1664 : : * so we are still safe.
1665 : : */
1666 : 0 : if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1667 : 0 : put_task_struct(p);
1668 : : }
1669 : 0 : sub_rq_bw(&p->dl, &rq->dl);
1670 : : raw_spin_unlock(&rq->lock);
1671 : : }
1672 : :
1673 : 0 : static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1674 : : {
1675 : : /*
1676 : : * Current can't be migrated, useless to reschedule,
1677 : : * let's hope p can move out.
1678 : : */
1679 : 0 : if (rq->curr->nr_cpus_allowed == 1 ||
1680 : 0 : !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1681 : : return;
1682 : :
1683 : : /*
1684 : : * p is migratable, so let's not schedule it and
1685 : : * see if it is pushed or pulled somewhere else.
1686 : : */
1687 : 0 : if (p->nr_cpus_allowed != 1 &&
1688 : 0 : cpudl_find(&rq->rd->cpudl, p, NULL))
1689 : : return;
1690 : :
1691 : 0 : resched_curr(rq);
1692 : : }
1693 : :
1694 : 3 : static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1695 : : {
1696 : 3 : if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1697 : : /*
1698 : : * This is OK, because current is on_cpu, which avoids it being
1699 : : * picked for load-balance and preemption/IRQs are still
1700 : : * disabled avoiding further scheduler activity on it and we've
1701 : : * not yet started the picking loop.
1702 : : */
1703 : : rq_unpin_lock(rq, rf);
1704 : 0 : pull_dl_task(rq);
1705 : : rq_repin_lock(rq, rf);
1706 : : }
1707 : :
1708 : 3 : return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1709 : : }
1710 : : #endif /* CONFIG_SMP */
1711 : :
1712 : : /*
1713 : : * Only called when both the current and waking task are -deadline
1714 : : * tasks.
1715 : : */
1716 : 0 : static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1717 : : int flags)
1718 : : {
1719 : 0 : if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1720 : 0 : resched_curr(rq);
1721 : 0 : return;
1722 : : }
1723 : :
1724 : : #ifdef CONFIG_SMP
1725 : : /*
1726 : : * In the unlikely case current and p have the same deadline
1727 : : * let us try to decide what's the best thing to do...
1728 : : */
1729 : 0 : if ((p->dl.deadline == rq->curr->dl.deadline) &&
1730 : : !test_tsk_need_resched(rq->curr))
1731 : 0 : check_preempt_equal_dl(rq, p);
1732 : : #endif /* CONFIG_SMP */
1733 : : }
1734 : :
1735 : : #ifdef CONFIG_SCHED_HRTICK
1736 : : static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1737 : : {
1738 : 0 : hrtick_start(rq, p->dl.runtime);
1739 : : }
1740 : : #else /* !CONFIG_SCHED_HRTICK */
1741 : : static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1742 : : {
1743 : : }
1744 : : #endif
1745 : :
1746 : 0 : static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1747 : : {
1748 : 0 : p->se.exec_start = rq_clock_task(rq);
1749 : :
1750 : : /* You can't push away the running task */
1751 : 0 : dequeue_pushable_dl_task(rq, p);
1752 : :
1753 : 0 : if (!first)
1754 : 0 : return;
1755 : :
1756 : 0 : if (hrtick_enabled(rq))
1757 : : start_hrtick_dl(rq, p);
1758 : :
1759 : 0 : if (rq->curr->sched_class != &dl_sched_class)
1760 : 0 : update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1761 : :
1762 : : deadline_queue_push_tasks(rq);
1763 : : }
1764 : :
1765 : : static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1766 : : struct dl_rq *dl_rq)
1767 : : {
1768 : 0 : struct rb_node *left = rb_first_cached(&dl_rq->root);
1769 : :
1770 : 0 : if (!left)
1771 : : return NULL;
1772 : :
1773 : : return rb_entry(left, struct sched_dl_entity, rb_node);
1774 : : }
1775 : :
1776 : : static struct task_struct *
1777 : 3 : pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1778 : : {
1779 : : struct sched_dl_entity *dl_se;
1780 : : struct dl_rq *dl_rq = &rq->dl;
1781 : : struct task_struct *p;
1782 : :
1783 : 3 : WARN_ON_ONCE(prev || rf);
1784 : :
1785 : 3 : if (!sched_dl_runnable(rq))
1786 : : return NULL;
1787 : :
1788 : : dl_se = pick_next_dl_entity(rq, dl_rq);
1789 : 0 : BUG_ON(!dl_se);
1790 : : p = dl_task_of(dl_se);
1791 : 0 : set_next_task_dl(rq, p, true);
1792 : 0 : return p;
1793 : : }
1794 : :
1795 : 0 : static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1796 : : {
1797 : 0 : update_curr_dl(rq);
1798 : :
1799 : 0 : update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1800 : 0 : if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1801 : 0 : enqueue_pushable_dl_task(rq, p);
1802 : 0 : }
1803 : :
1804 : : /*
1805 : : * scheduler tick hitting a task of our scheduling class.
1806 : : *
1807 : : * NOTE: This function can be called remotely by the tick offload that
1808 : : * goes along full dynticks. Therefore no local assumption can be made
1809 : : * and everything must be accessed through the @rq and @curr passed in
1810 : : * parameters.
1811 : : */
1812 : 0 : static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1813 : : {
1814 : 0 : update_curr_dl(rq);
1815 : :
1816 : 0 : update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1817 : : /*
1818 : : * Even when we have runtime, update_curr_dl() might have resulted in us
1819 : : * not being the leftmost task anymore. In that case NEED_RESCHED will
1820 : : * be set and schedule() will start a new hrtick for the next task.
1821 : : */
1822 : 0 : if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1823 : : is_leftmost(p, &rq->dl))
1824 : : start_hrtick_dl(rq, p);
1825 : 0 : }
1826 : :
1827 : 0 : static void task_fork_dl(struct task_struct *p)
1828 : : {
1829 : : /*
1830 : : * SCHED_DEADLINE tasks cannot fork and this is achieved through
1831 : : * sched_fork()
1832 : : */
1833 : 0 : }
1834 : :
1835 : : #ifdef CONFIG_SMP
1836 : :
1837 : : /* Only try algorithms three times */
1838 : : #define DL_MAX_TRIES 3
1839 : :
1840 : : static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1841 : : {
1842 : 0 : if (!task_running(rq, p) &&
1843 : 0 : cpumask_test_cpu(cpu, p->cpus_ptr))
1844 : : return 1;
1845 : : return 0;
1846 : : }
1847 : :
1848 : : /*
1849 : : * Return the earliest pushable rq's task, which is suitable to be executed
1850 : : * on the CPU, NULL otherwise:
1851 : : */
1852 : 0 : static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1853 : : {
1854 : 0 : struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1855 : : struct task_struct *p = NULL;
1856 : :
1857 : 0 : if (!has_pushable_dl_tasks(rq))
1858 : : return NULL;
1859 : :
1860 : : next_node:
1861 : 0 : if (next_node) {
1862 : 0 : p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1863 : :
1864 : 0 : if (pick_dl_task(rq, p, cpu))
1865 : 0 : return p;
1866 : :
1867 : 0 : next_node = rb_next(next_node);
1868 : 0 : goto next_node;
1869 : : }
1870 : :
1871 : : return NULL;
1872 : : }
1873 : :
1874 : : static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1875 : :
1876 : 0 : static int find_later_rq(struct task_struct *task)
1877 : : {
1878 : : struct sched_domain *sd;
1879 : 0 : struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1880 : 0 : int this_cpu = smp_processor_id();
1881 : 0 : int cpu = task_cpu(task);
1882 : :
1883 : : /* Make sure the mask is initialized first */
1884 : 0 : if (unlikely(!later_mask))
1885 : : return -1;
1886 : :
1887 : 0 : if (task->nr_cpus_allowed == 1)
1888 : : return -1;
1889 : :
1890 : : /*
1891 : : * We have to consider system topology and task affinity
1892 : : * first, then we can look for a suitable CPU.
1893 : : */
1894 : 0 : if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1895 : : return -1;
1896 : :
1897 : : /*
1898 : : * If we are here, some targets have been found, including
1899 : : * the most suitable which is, among the runqueues where the
1900 : : * current tasks have later deadlines than the task's one, the
1901 : : * rq with the latest possible one.
1902 : : *
1903 : : * Now we check how well this matches with task's
1904 : : * affinity and system topology.
1905 : : *
1906 : : * The last CPU where the task run is our first
1907 : : * guess, since it is most likely cache-hot there.
1908 : : */
1909 : 0 : if (cpumask_test_cpu(cpu, later_mask))
1910 : : return cpu;
1911 : : /*
1912 : : * Check if this_cpu is to be skipped (i.e., it is
1913 : : * not in the mask) or not.
1914 : : */
1915 : 0 : if (!cpumask_test_cpu(this_cpu, later_mask))
1916 : : this_cpu = -1;
1917 : :
1918 : : rcu_read_lock();
1919 : 0 : for_each_domain(cpu, sd) {
1920 : 0 : if (sd->flags & SD_WAKE_AFFINE) {
1921 : : int best_cpu;
1922 : :
1923 : : /*
1924 : : * If possible, preempting this_cpu is
1925 : : * cheaper than migrating.
1926 : : */
1927 : 0 : if (this_cpu != -1 &&
1928 : : cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1929 : : rcu_read_unlock();
1930 : 0 : return this_cpu;
1931 : : }
1932 : :
1933 : 0 : best_cpu = cpumask_first_and(later_mask,
1934 : : sched_domain_span(sd));
1935 : : /*
1936 : : * Last chance: if a CPU being in both later_mask
1937 : : * and current sd span is valid, that becomes our
1938 : : * choice. Of course, the latest possible CPU is
1939 : : * already under consideration through later_mask.
1940 : : */
1941 : 0 : if (best_cpu < nr_cpu_ids) {
1942 : : rcu_read_unlock();
1943 : 0 : return best_cpu;
1944 : : }
1945 : : }
1946 : : }
1947 : : rcu_read_unlock();
1948 : :
1949 : : /*
1950 : : * At this point, all our guesses failed, we just return
1951 : : * 'something', and let the caller sort the things out.
1952 : : */
1953 : 0 : if (this_cpu != -1)
1954 : : return this_cpu;
1955 : :
1956 : : cpu = cpumask_any(later_mask);
1957 : 0 : if (cpu < nr_cpu_ids)
1958 : 0 : return cpu;
1959 : :
1960 : : return -1;
1961 : : }
1962 : :
1963 : : /* Locks the rq it finds */
1964 : 0 : static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1965 : : {
1966 : : struct rq *later_rq = NULL;
1967 : : int tries;
1968 : : int cpu;
1969 : :
1970 : 0 : for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1971 : 0 : cpu = find_later_rq(task);
1972 : :
1973 : 0 : if ((cpu == -1) || (cpu == rq->cpu))
1974 : : break;
1975 : :
1976 : 0 : later_rq = cpu_rq(cpu);
1977 : :
1978 : 0 : if (later_rq->dl.dl_nr_running &&
1979 : 0 : !dl_time_before(task->dl.deadline,
1980 : : later_rq->dl.earliest_dl.curr)) {
1981 : : /*
1982 : : * Target rq has tasks of equal or earlier deadline,
1983 : : * retrying does not release any lock and is unlikely
1984 : : * to yield a different result.
1985 : : */
1986 : : later_rq = NULL;
1987 : : break;
1988 : : }
1989 : :
1990 : : /* Retry if something changed. */
1991 : 0 : if (double_lock_balance(rq, later_rq)) {
1992 : 0 : if (unlikely(task_rq(task) != rq ||
1993 : : !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
1994 : : task_running(rq, task) ||
1995 : : !dl_task(task) ||
1996 : : !task_on_rq_queued(task))) {
1997 : : double_unlock_balance(rq, later_rq);
1998 : : later_rq = NULL;
1999 : 0 : break;
2000 : : }
2001 : : }
2002 : :
2003 : : /*
2004 : : * If the rq we found has no -deadline task, or
2005 : : * its earliest one has a later deadline than our
2006 : : * task, the rq is a good one.
2007 : : */
2008 : 0 : if (!later_rq->dl.dl_nr_running ||
2009 : 0 : dl_time_before(task->dl.deadline,
2010 : : later_rq->dl.earliest_dl.curr))
2011 : : break;
2012 : :
2013 : : /* Otherwise we try again. */
2014 : : double_unlock_balance(rq, later_rq);
2015 : : later_rq = NULL;
2016 : : }
2017 : :
2018 : 0 : return later_rq;
2019 : : }
2020 : :
2021 : 0 : static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2022 : : {
2023 : : struct task_struct *p;
2024 : :
2025 : 0 : if (!has_pushable_dl_tasks(rq))
2026 : : return NULL;
2027 : :
2028 : 0 : p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2029 : : struct task_struct, pushable_dl_tasks);
2030 : :
2031 : 0 : BUG_ON(rq->cpu != task_cpu(p));
2032 : 0 : BUG_ON(task_current(rq, p));
2033 : 0 : BUG_ON(p->nr_cpus_allowed <= 1);
2034 : :
2035 : 0 : BUG_ON(!task_on_rq_queued(p));
2036 : 0 : BUG_ON(!dl_task(p));
2037 : :
2038 : : return p;
2039 : : }
2040 : :
2041 : : /*
2042 : : * See if the non running -deadline tasks on this rq
2043 : : * can be sent to some other CPU where they can preempt
2044 : : * and start executing.
2045 : : */
2046 : 0 : static int push_dl_task(struct rq *rq)
2047 : : {
2048 : : struct task_struct *next_task;
2049 : : struct rq *later_rq;
2050 : : int ret = 0;
2051 : :
2052 : 0 : if (!rq->dl.overloaded)
2053 : : return 0;
2054 : :
2055 : 0 : next_task = pick_next_pushable_dl_task(rq);
2056 : 0 : if (!next_task)
2057 : : return 0;
2058 : :
2059 : : retry:
2060 : 0 : if (WARN_ON(next_task == rq->curr))
2061 : : return 0;
2062 : :
2063 : : /*
2064 : : * If next_task preempts rq->curr, and rq->curr
2065 : : * can move away, it makes sense to just reschedule
2066 : : * without going further in pushing next_task.
2067 : : */
2068 : 0 : if (dl_task(rq->curr) &&
2069 : 0 : dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2070 : 0 : rq->curr->nr_cpus_allowed > 1) {
2071 : 0 : resched_curr(rq);
2072 : 0 : return 0;
2073 : : }
2074 : :
2075 : : /* We might release rq lock */
2076 : : get_task_struct(next_task);
2077 : :
2078 : : /* Will lock the rq it'll find */
2079 : 0 : later_rq = find_lock_later_rq(next_task, rq);
2080 : 0 : if (!later_rq) {
2081 : : struct task_struct *task;
2082 : :
2083 : : /*
2084 : : * We must check all this again, since
2085 : : * find_lock_later_rq releases rq->lock and it is
2086 : : * then possible that next_task has migrated.
2087 : : */
2088 : 0 : task = pick_next_pushable_dl_task(rq);
2089 : 0 : if (task == next_task) {
2090 : : /*
2091 : : * The task is still there. We don't try
2092 : : * again, some other CPU will pull it when ready.
2093 : : */
2094 : : goto out;
2095 : : }
2096 : :
2097 : 0 : if (!task)
2098 : : /* No more tasks */
2099 : : goto out;
2100 : :
2101 : 0 : put_task_struct(next_task);
2102 : : next_task = task;
2103 : 0 : goto retry;
2104 : : }
2105 : :
2106 : 0 : deactivate_task(rq, next_task, 0);
2107 : 0 : set_task_cpu(next_task, later_rq->cpu);
2108 : :
2109 : : /*
2110 : : * Update the later_rq clock here, because the clock is used
2111 : : * by the cpufreq_update_util() inside __add_running_bw().
2112 : : */
2113 : 0 : update_rq_clock(later_rq);
2114 : 0 : activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2115 : : ret = 1;
2116 : :
2117 : 0 : resched_curr(later_rq);
2118 : :
2119 : : double_unlock_balance(rq, later_rq);
2120 : :
2121 : : out:
2122 : 0 : put_task_struct(next_task);
2123 : :
2124 : 0 : return ret;
2125 : : }
2126 : :
2127 : 0 : static void push_dl_tasks(struct rq *rq)
2128 : : {
2129 : : /* push_dl_task() will return true if it moved a -deadline task */
2130 : 0 : while (push_dl_task(rq))
2131 : : ;
2132 : 0 : }
2133 : :
2134 : 0 : static void pull_dl_task(struct rq *this_rq)
2135 : : {
2136 : 0 : int this_cpu = this_rq->cpu, cpu;
2137 : : struct task_struct *p;
2138 : : bool resched = false;
2139 : : struct rq *src_rq;
2140 : : u64 dmin = LONG_MAX;
2141 : :
2142 : 0 : if (likely(!dl_overloaded(this_rq)))
2143 : 0 : return;
2144 : :
2145 : : /*
2146 : : * Match the barrier from dl_set_overloaded; this guarantees that if we
2147 : : * see overloaded we must also see the dlo_mask bit.
2148 : : */
2149 : 0 : smp_rmb();
2150 : :
2151 : 0 : for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2152 : 0 : if (this_cpu == cpu)
2153 : 0 : continue;
2154 : :
2155 : 0 : src_rq = cpu_rq(cpu);
2156 : :
2157 : : /*
2158 : : * It looks racy, abd it is! However, as in sched_rt.c,
2159 : : * we are fine with this.
2160 : : */
2161 : 0 : if (this_rq->dl.dl_nr_running &&
2162 : 0 : dl_time_before(this_rq->dl.earliest_dl.curr,
2163 : : src_rq->dl.earliest_dl.next))
2164 : 0 : continue;
2165 : :
2166 : : /* Might drop this_rq->lock */
2167 : 0 : double_lock_balance(this_rq, src_rq);
2168 : :
2169 : : /*
2170 : : * If there are no more pullable tasks on the
2171 : : * rq, we're done with it.
2172 : : */
2173 : 0 : if (src_rq->dl.dl_nr_running <= 1)
2174 : : goto skip;
2175 : :
2176 : 0 : p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2177 : :
2178 : : /*
2179 : : * We found a task to be pulled if:
2180 : : * - it preempts our current (if there's one),
2181 : : * - it will preempt the last one we pulled (if any).
2182 : : */
2183 : 0 : if (p && dl_time_before(p->dl.deadline, dmin) &&
2184 : 0 : (!this_rq->dl.dl_nr_running ||
2185 : 0 : dl_time_before(p->dl.deadline,
2186 : : this_rq->dl.earliest_dl.curr))) {
2187 : 0 : WARN_ON(p == src_rq->curr);
2188 : 0 : WARN_ON(!task_on_rq_queued(p));
2189 : :
2190 : : /*
2191 : : * Then we pull iff p has actually an earlier
2192 : : * deadline than the current task of its runqueue.
2193 : : */
2194 : 0 : if (dl_time_before(p->dl.deadline,
2195 : 0 : src_rq->curr->dl.deadline))
2196 : : goto skip;
2197 : :
2198 : : resched = true;
2199 : :
2200 : 0 : deactivate_task(src_rq, p, 0);
2201 : 0 : set_task_cpu(p, this_cpu);
2202 : 0 : activate_task(this_rq, p, 0);
2203 : 0 : dmin = p->dl.deadline;
2204 : :
2205 : : /* Is there any other task even earlier? */
2206 : : }
2207 : : skip:
2208 : : double_unlock_balance(this_rq, src_rq);
2209 : : }
2210 : :
2211 : 0 : if (resched)
2212 : 0 : resched_curr(this_rq);
2213 : : }
2214 : :
2215 : : /*
2216 : : * Since the task is not running and a reschedule is not going to happen
2217 : : * anytime soon on its runqueue, we try pushing it away now.
2218 : : */
2219 : 0 : static void task_woken_dl(struct rq *rq, struct task_struct *p)
2220 : : {
2221 : 0 : if (!task_running(rq, p) &&
2222 : 0 : !test_tsk_need_resched(rq->curr) &&
2223 : 0 : p->nr_cpus_allowed > 1 &&
2224 : 0 : dl_task(rq->curr) &&
2225 : 0 : (rq->curr->nr_cpus_allowed < 2 ||
2226 : : !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2227 : : push_dl_tasks(rq);
2228 : : }
2229 : 0 : }
2230 : :
2231 : 0 : static void set_cpus_allowed_dl(struct task_struct *p,
2232 : : const struct cpumask *new_mask)
2233 : : {
2234 : : struct root_domain *src_rd;
2235 : : struct rq *rq;
2236 : :
2237 : 0 : BUG_ON(!dl_task(p));
2238 : :
2239 : 0 : rq = task_rq(p);
2240 : 0 : src_rd = rq->rd;
2241 : : /*
2242 : : * Migrating a SCHED_DEADLINE task between exclusive
2243 : : * cpusets (different root_domains) entails a bandwidth
2244 : : * update. We already made space for us in the destination
2245 : : * domain (see cpuset_can_attach()).
2246 : : */
2247 : 0 : if (!cpumask_intersects(src_rd->span, new_mask)) {
2248 : : struct dl_bw *src_dl_b;
2249 : :
2250 : : src_dl_b = dl_bw_of(cpu_of(rq));
2251 : : /*
2252 : : * We now free resources of the root_domain we are migrating
2253 : : * off. In the worst case, sched_setattr() may temporary fail
2254 : : * until we complete the update.
2255 : : */
2256 : 0 : raw_spin_lock(&src_dl_b->lock);
2257 : 0 : __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2258 : : raw_spin_unlock(&src_dl_b->lock);
2259 : : }
2260 : :
2261 : 0 : set_cpus_allowed_common(p, new_mask);
2262 : 0 : }
2263 : :
2264 : : /* Assumes rq->lock is held */
2265 : 3 : static void rq_online_dl(struct rq *rq)
2266 : : {
2267 : 3 : if (rq->dl.overloaded)
2268 : 0 : dl_set_overload(rq);
2269 : :
2270 : 3 : cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2271 : 3 : if (rq->dl.dl_nr_running > 0)
2272 : 0 : cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2273 : 3 : }
2274 : :
2275 : : /* Assumes rq->lock is held */
2276 : 3 : static void rq_offline_dl(struct rq *rq)
2277 : : {
2278 : 3 : if (rq->dl.overloaded)
2279 : 0 : dl_clear_overload(rq);
2280 : :
2281 : 3 : cpudl_clear(&rq->rd->cpudl, rq->cpu);
2282 : 3 : cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2283 : 3 : }
2284 : :
2285 : 3 : void __init init_sched_dl_class(void)
2286 : : {
2287 : : unsigned int i;
2288 : :
2289 : 3 : for_each_possible_cpu(i)
2290 : 3 : zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2291 : : GFP_KERNEL, cpu_to_node(i));
2292 : 3 : }
2293 : :
2294 : 0 : void dl_add_task_root_domain(struct task_struct *p)
2295 : : {
2296 : : struct rq_flags rf;
2297 : : struct rq *rq;
2298 : : struct dl_bw *dl_b;
2299 : :
2300 : 0 : rq = task_rq_lock(p, &rf);
2301 : 0 : if (!dl_task(p))
2302 : : goto unlock;
2303 : :
2304 : 0 : dl_b = &rq->rd->dl_bw;
2305 : 0 : raw_spin_lock(&dl_b->lock);
2306 : :
2307 : 0 : __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2308 : :
2309 : : raw_spin_unlock(&dl_b->lock);
2310 : :
2311 : : unlock:
2312 : 0 : task_rq_unlock(rq, p, &rf);
2313 : 0 : }
2314 : :
2315 : 0 : void dl_clear_root_domain(struct root_domain *rd)
2316 : : {
2317 : : unsigned long flags;
2318 : :
2319 : 0 : raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2320 : 0 : rd->dl_bw.total_bw = 0;
2321 : 0 : raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2322 : 0 : }
2323 : :
2324 : : #endif /* CONFIG_SMP */
2325 : :
2326 : 0 : static void switched_from_dl(struct rq *rq, struct task_struct *p)
2327 : : {
2328 : : /*
2329 : : * task_non_contending() can start the "inactive timer" (if the 0-lag
2330 : : * time is in the future). If the task switches back to dl before
2331 : : * the "inactive timer" fires, it can continue to consume its current
2332 : : * runtime using its current deadline. If it stays outside of
2333 : : * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2334 : : * will reset the task parameters.
2335 : : */
2336 : 0 : if (task_on_rq_queued(p) && p->dl.dl_runtime)
2337 : 0 : task_non_contending(p);
2338 : :
2339 : 0 : if (!task_on_rq_queued(p)) {
2340 : : /*
2341 : : * Inactive timer is armed. However, p is leaving DEADLINE and
2342 : : * might migrate away from this rq while continuing to run on
2343 : : * some other class. We need to remove its contribution from
2344 : : * this rq running_bw now, or sub_rq_bw (below) will complain.
2345 : : */
2346 : 0 : if (p->dl.dl_non_contending)
2347 : 0 : sub_running_bw(&p->dl, &rq->dl);
2348 : 0 : sub_rq_bw(&p->dl, &rq->dl);
2349 : : }
2350 : :
2351 : : /*
2352 : : * We cannot use inactive_task_timer() to invoke sub_running_bw()
2353 : : * at the 0-lag time, because the task could have been migrated
2354 : : * while SCHED_OTHER in the meanwhile.
2355 : : */
2356 : 0 : if (p->dl.dl_non_contending)
2357 : 0 : p->dl.dl_non_contending = 0;
2358 : :
2359 : : /*
2360 : : * Since this might be the only -deadline task on the rq,
2361 : : * this is the right place to try to pull some other one
2362 : : * from an overloaded CPU, if any.
2363 : : */
2364 : 0 : if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2365 : 0 : return;
2366 : :
2367 : : deadline_queue_pull_task(rq);
2368 : : }
2369 : :
2370 : : /*
2371 : : * When switching to -deadline, we may overload the rq, then
2372 : : * we try to push someone off, if possible.
2373 : : */
2374 : 0 : static void switched_to_dl(struct rq *rq, struct task_struct *p)
2375 : : {
2376 : 0 : if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2377 : 0 : put_task_struct(p);
2378 : :
2379 : : /* If p is not queued we will update its parameters at next wakeup. */
2380 : 0 : if (!task_on_rq_queued(p)) {
2381 : 0 : add_rq_bw(&p->dl, &rq->dl);
2382 : :
2383 : 0 : return;
2384 : : }
2385 : :
2386 : 0 : if (rq->curr != p) {
2387 : : #ifdef CONFIG_SMP
2388 : 0 : if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2389 : : deadline_queue_push_tasks(rq);
2390 : : #endif
2391 : 0 : if (dl_task(rq->curr))
2392 : 0 : check_preempt_curr_dl(rq, p, 0);
2393 : : else
2394 : 0 : resched_curr(rq);
2395 : : }
2396 : : }
2397 : :
2398 : : /*
2399 : : * If the scheduling parameters of a -deadline task changed,
2400 : : * a push or pull operation might be needed.
2401 : : */
2402 : 0 : static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2403 : : int oldprio)
2404 : : {
2405 : 0 : if (task_on_rq_queued(p) || rq->curr == p) {
2406 : : #ifdef CONFIG_SMP
2407 : : /*
2408 : : * This might be too much, but unfortunately
2409 : : * we don't have the old deadline value, and
2410 : : * we can't argue if the task is increasing
2411 : : * or lowering its prio, so...
2412 : : */
2413 : 0 : if (!rq->dl.overloaded)
2414 : : deadline_queue_pull_task(rq);
2415 : :
2416 : : /*
2417 : : * If we now have a earlier deadline task than p,
2418 : : * then reschedule, provided p is still on this
2419 : : * runqueue.
2420 : : */
2421 : 0 : if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2422 : 0 : resched_curr(rq);
2423 : : #else
2424 : : /*
2425 : : * Again, we don't know if p has a earlier
2426 : : * or later deadline, so let's blindly set a
2427 : : * (maybe not needed) rescheduling point.
2428 : : */
2429 : : resched_curr(rq);
2430 : : #endif /* CONFIG_SMP */
2431 : : }
2432 : 0 : }
2433 : :
2434 : : const struct sched_class dl_sched_class = {
2435 : : .next = &rt_sched_class,
2436 : : .enqueue_task = enqueue_task_dl,
2437 : : .dequeue_task = dequeue_task_dl,
2438 : : .yield_task = yield_task_dl,
2439 : :
2440 : : .check_preempt_curr = check_preempt_curr_dl,
2441 : :
2442 : : .pick_next_task = pick_next_task_dl,
2443 : : .put_prev_task = put_prev_task_dl,
2444 : : .set_next_task = set_next_task_dl,
2445 : :
2446 : : #ifdef CONFIG_SMP
2447 : : .balance = balance_dl,
2448 : : .select_task_rq = select_task_rq_dl,
2449 : : .migrate_task_rq = migrate_task_rq_dl,
2450 : : .set_cpus_allowed = set_cpus_allowed_dl,
2451 : : .rq_online = rq_online_dl,
2452 : : .rq_offline = rq_offline_dl,
2453 : : .task_woken = task_woken_dl,
2454 : : #endif
2455 : :
2456 : : .task_tick = task_tick_dl,
2457 : : .task_fork = task_fork_dl,
2458 : :
2459 : : .prio_changed = prio_changed_dl,
2460 : : .switched_from = switched_from_dl,
2461 : : .switched_to = switched_to_dl,
2462 : :
2463 : : .update_curr = update_curr_dl,
2464 : : };
2465 : :
2466 : 0 : int sched_dl_global_validate(void)
2467 : : {
2468 : : u64 runtime = global_rt_runtime();
2469 : : u64 period = global_rt_period();
2470 : 0 : u64 new_bw = to_ratio(period, runtime);
2471 : : struct dl_bw *dl_b;
2472 : : int cpu, ret = 0;
2473 : : unsigned long flags;
2474 : :
2475 : : /*
2476 : : * Here we want to check the bandwidth not being set to some
2477 : : * value smaller than the currently allocated bandwidth in
2478 : : * any of the root_domains.
2479 : : *
2480 : : * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2481 : : * cycling on root_domains... Discussion on different/better
2482 : : * solutions is welcome!
2483 : : */
2484 : 0 : for_each_possible_cpu(cpu) {
2485 : : rcu_read_lock_sched();
2486 : : dl_b = dl_bw_of(cpu);
2487 : :
2488 : 0 : raw_spin_lock_irqsave(&dl_b->lock, flags);
2489 : 0 : if (new_bw < dl_b->total_bw)
2490 : : ret = -EBUSY;
2491 : 0 : raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2492 : :
2493 : : rcu_read_unlock_sched();
2494 : :
2495 : 0 : if (ret)
2496 : : break;
2497 : : }
2498 : :
2499 : 0 : return ret;
2500 : : }
2501 : :
2502 : 3 : void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2503 : : {
2504 : 3 : if (global_rt_runtime() == RUNTIME_INF) {
2505 : 0 : dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2506 : 0 : dl_rq->extra_bw = 1 << BW_SHIFT;
2507 : : } else {
2508 : 3 : dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2509 : 3 : global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2510 : 3 : dl_rq->extra_bw = to_ratio(global_rt_period(),
2511 : : global_rt_runtime());
2512 : : }
2513 : 3 : }
2514 : :
2515 : 0 : void sched_dl_do_global(void)
2516 : : {
2517 : : u64 new_bw = -1;
2518 : : struct dl_bw *dl_b;
2519 : : int cpu;
2520 : : unsigned long flags;
2521 : :
2522 : 0 : def_dl_bandwidth.dl_period = global_rt_period();
2523 : 0 : def_dl_bandwidth.dl_runtime = global_rt_runtime();
2524 : :
2525 : 0 : if (global_rt_runtime() != RUNTIME_INF)
2526 : 0 : new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2527 : :
2528 : : /*
2529 : : * FIXME: As above...
2530 : : */
2531 : 0 : for_each_possible_cpu(cpu) {
2532 : : rcu_read_lock_sched();
2533 : : dl_b = dl_bw_of(cpu);
2534 : :
2535 : 0 : raw_spin_lock_irqsave(&dl_b->lock, flags);
2536 : 0 : dl_b->bw = new_bw;
2537 : 0 : raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2538 : :
2539 : : rcu_read_unlock_sched();
2540 : 0 : init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2541 : : }
2542 : 0 : }
2543 : :
2544 : : /*
2545 : : * We must be sure that accepting a new task (or allowing changing the
2546 : : * parameters of an existing one) is consistent with the bandwidth
2547 : : * constraints. If yes, this function also accordingly updates the currently
2548 : : * allocated bandwidth to reflect the new situation.
2549 : : *
2550 : : * This function is called while holding p's rq->lock.
2551 : : */
2552 : 0 : int sched_dl_overflow(struct task_struct *p, int policy,
2553 : : const struct sched_attr *attr)
2554 : : {
2555 : 0 : struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2556 : 0 : u64 period = attr->sched_period ?: attr->sched_deadline;
2557 : 0 : u64 runtime = attr->sched_runtime;
2558 : 0 : u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2559 : : int cpus, err = -1;
2560 : :
2561 : 0 : if (attr->sched_flags & SCHED_FLAG_SUGOV)
2562 : : return 0;
2563 : :
2564 : : /* !deadline task may carry old deadline bandwidth */
2565 : 0 : if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2566 : : return 0;
2567 : :
2568 : : /*
2569 : : * Either if a task, enters, leave, or stays -deadline but changes
2570 : : * its parameters, we may need to update accordingly the total
2571 : : * allocated bandwidth of the container.
2572 : : */
2573 : 0 : raw_spin_lock(&dl_b->lock);
2574 : 0 : cpus = dl_bw_cpus(task_cpu(p));
2575 : 0 : if (dl_policy(policy) && !task_has_dl_policy(p) &&
2576 : : !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2577 : 0 : if (hrtimer_active(&p->dl.inactive_timer))
2578 : 0 : __dl_sub(dl_b, p->dl.dl_bw, cpus);
2579 : : __dl_add(dl_b, new_bw, cpus);
2580 : 0 : err = 0;
2581 : 0 : } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2582 : 0 : !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2583 : : /*
2584 : : * XXX this is slightly incorrect: when the task
2585 : : * utilization decreases, we should delay the total
2586 : : * utilization change until the task's 0-lag point.
2587 : : * But this would require to set the task's "inactive
2588 : : * timer" when the task is not inactive.
2589 : : */
2590 : : __dl_sub(dl_b, p->dl.dl_bw, cpus);
2591 : : __dl_add(dl_b, new_bw, cpus);
2592 : 0 : dl_change_utilization(p, new_bw);
2593 : 0 : err = 0;
2594 : 0 : } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2595 : : /*
2596 : : * Do not decrease the total deadline utilization here,
2597 : : * switched_from_dl() will take care to do it at the correct
2598 : : * (0-lag) time.
2599 : : */
2600 : : err = 0;
2601 : : }
2602 : : raw_spin_unlock(&dl_b->lock);
2603 : :
2604 : 0 : return err;
2605 : : }
2606 : :
2607 : : /*
2608 : : * This function initializes the sched_dl_entity of a newly becoming
2609 : : * SCHED_DEADLINE task.
2610 : : *
2611 : : * Only the static values are considered here, the actual runtime and the
2612 : : * absolute deadline will be properly calculated when the task is enqueued
2613 : : * for the first time with its new policy.
2614 : : */
2615 : 0 : void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2616 : : {
2617 : : struct sched_dl_entity *dl_se = &p->dl;
2618 : :
2619 : 0 : dl_se->dl_runtime = attr->sched_runtime;
2620 : 0 : dl_se->dl_deadline = attr->sched_deadline;
2621 : 0 : dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2622 : 0 : dl_se->flags = attr->sched_flags;
2623 : 0 : dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2624 : 0 : dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2625 : 0 : }
2626 : :
2627 : 0 : void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2628 : : {
2629 : : struct sched_dl_entity *dl_se = &p->dl;
2630 : :
2631 : 0 : attr->sched_priority = p->rt_priority;
2632 : 0 : attr->sched_runtime = dl_se->dl_runtime;
2633 : 0 : attr->sched_deadline = dl_se->dl_deadline;
2634 : 0 : attr->sched_period = dl_se->dl_period;
2635 : 0 : attr->sched_flags = dl_se->flags;
2636 : 0 : }
2637 : :
2638 : : /*
2639 : : * This function validates the new parameters of a -deadline task.
2640 : : * We ask for the deadline not being zero, and greater or equal
2641 : : * than the runtime, as well as the period of being zero or
2642 : : * greater than deadline. Furthermore, we have to be sure that
2643 : : * user parameters are above the internal resolution of 1us (we
2644 : : * check sched_runtime only since it is always the smaller one) and
2645 : : * below 2^63 ns (we have to check both sched_deadline and
2646 : : * sched_period, as the latter can be zero).
2647 : : */
2648 : 0 : bool __checkparam_dl(const struct sched_attr *attr)
2649 : : {
2650 : : /* special dl tasks don't actually use any parameter */
2651 : 0 : if (attr->sched_flags & SCHED_FLAG_SUGOV)
2652 : : return true;
2653 : :
2654 : : /* deadline != 0 */
2655 : 0 : if (attr->sched_deadline == 0)
2656 : : return false;
2657 : :
2658 : : /*
2659 : : * Since we truncate DL_SCALE bits, make sure we're at least
2660 : : * that big.
2661 : : */
2662 : 0 : if (attr->sched_runtime < (1ULL << DL_SCALE))
2663 : : return false;
2664 : :
2665 : : /*
2666 : : * Since we use the MSB for wrap-around and sign issues, make
2667 : : * sure it's not set (mind that period can be equal to zero).
2668 : : */
2669 : 0 : if (attr->sched_deadline & (1ULL << 63) ||
2670 : 0 : attr->sched_period & (1ULL << 63))
2671 : : return false;
2672 : :
2673 : : /* runtime <= deadline <= period (if period != 0) */
2674 : 0 : if ((attr->sched_period != 0 &&
2675 : 0 : attr->sched_period < attr->sched_deadline) ||
2676 : : attr->sched_deadline < attr->sched_runtime)
2677 : : return false;
2678 : :
2679 : 0 : return true;
2680 : : }
2681 : :
2682 : : /*
2683 : : * This function clears the sched_dl_entity static params.
2684 : : */
2685 : 3 : void __dl_clear_params(struct task_struct *p)
2686 : : {
2687 : : struct sched_dl_entity *dl_se = &p->dl;
2688 : :
2689 : 3 : dl_se->dl_runtime = 0;
2690 : 3 : dl_se->dl_deadline = 0;
2691 : 3 : dl_se->dl_period = 0;
2692 : 3 : dl_se->flags = 0;
2693 : 3 : dl_se->dl_bw = 0;
2694 : 3 : dl_se->dl_density = 0;
2695 : :
2696 : 3 : dl_se->dl_boosted = 0;
2697 : 3 : dl_se->dl_throttled = 0;
2698 : 3 : dl_se->dl_yielded = 0;
2699 : 3 : dl_se->dl_non_contending = 0;
2700 : 3 : dl_se->dl_overrun = 0;
2701 : 3 : }
2702 : :
2703 : 0 : bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2704 : : {
2705 : : struct sched_dl_entity *dl_se = &p->dl;
2706 : :
2707 : 0 : if (dl_se->dl_runtime != attr->sched_runtime ||
2708 : 0 : dl_se->dl_deadline != attr->sched_deadline ||
2709 : 0 : dl_se->dl_period != attr->sched_period ||
2710 : 0 : dl_se->flags != attr->sched_flags)
2711 : : return true;
2712 : :
2713 : 0 : return false;
2714 : : }
2715 : :
2716 : : #ifdef CONFIG_SMP
2717 : 0 : int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2718 : : {
2719 : : unsigned int dest_cpu;
2720 : : struct dl_bw *dl_b;
2721 : : bool overflow;
2722 : : int cpus, ret;
2723 : : unsigned long flags;
2724 : :
2725 : 0 : dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2726 : :
2727 : : rcu_read_lock_sched();
2728 : : dl_b = dl_bw_of(dest_cpu);
2729 : 0 : raw_spin_lock_irqsave(&dl_b->lock, flags);
2730 : 0 : cpus = dl_bw_cpus(dest_cpu);
2731 : 0 : overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2732 : 0 : if (overflow) {
2733 : : ret = -EBUSY;
2734 : : } else {
2735 : : /*
2736 : : * We reserve space for this task in the destination
2737 : : * root_domain, as we can't fail after this point.
2738 : : * We will free resources in the source root_domain
2739 : : * later on (see set_cpus_allowed_dl()).
2740 : : */
2741 : : __dl_add(dl_b, p->dl.dl_bw, cpus);
2742 : : ret = 0;
2743 : : }
2744 : 0 : raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2745 : : rcu_read_unlock_sched();
2746 : :
2747 : 0 : return ret;
2748 : : }
2749 : :
2750 : 0 : int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2751 : : const struct cpumask *trial)
2752 : : {
2753 : : int ret = 1, trial_cpus;
2754 : : struct dl_bw *cur_dl_b;
2755 : : unsigned long flags;
2756 : :
2757 : : rcu_read_lock_sched();
2758 : : cur_dl_b = dl_bw_of(cpumask_any(cur));
2759 : 0 : trial_cpus = cpumask_weight(trial);
2760 : :
2761 : 0 : raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2762 : 0 : if (cur_dl_b->bw != -1 &&
2763 : 0 : cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2764 : : ret = 0;
2765 : 0 : raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2766 : : rcu_read_unlock_sched();
2767 : :
2768 : 0 : return ret;
2769 : : }
2770 : :
2771 : 0 : bool dl_cpu_busy(unsigned int cpu)
2772 : : {
2773 : : unsigned long flags;
2774 : : struct dl_bw *dl_b;
2775 : : bool overflow;
2776 : : int cpus;
2777 : :
2778 : : rcu_read_lock_sched();
2779 : 0 : dl_b = dl_bw_of(cpu);
2780 : 0 : raw_spin_lock_irqsave(&dl_b->lock, flags);
2781 : 0 : cpus = dl_bw_cpus(cpu);
2782 : : overflow = __dl_overflow(dl_b, cpus, 0, 0);
2783 : 0 : raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2784 : : rcu_read_unlock_sched();
2785 : :
2786 : 0 : return overflow;
2787 : : }
2788 : : #endif
2789 : :
2790 : : #ifdef CONFIG_SCHED_DEBUG
2791 : 0 : void print_dl_stats(struct seq_file *m, int cpu)
2792 : : {
2793 : 0 : print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2794 : 0 : }
2795 : : #endif /* CONFIG_SCHED_DEBUG */
|