~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/deadline.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Deadline Scheduling Class (SCHED_DEADLINE)
  4  *
  5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
  6  *
  7  * Tasks that periodically executes their instances for less than their
  8  * runtime won't miss any of their deadlines.
  9  * Tasks that are not periodic or sporadic or that tries to execute more
 10  * than their reserved bandwidth will be slowed down (and may potentially
 11  * miss some of their deadlines), and won't affect any other task.
 12  *
 13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
 14  *                    Juri Lelli <juri.lelli@gmail.com>,
 15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
 16  *                    Fabio Checconi <fchecconi@gmail.com>
 17  */
 18 
 19 #include <linux/cpuset.h>
 20 
 21 /*
 22  * Default limits for DL period; on the top end we guard against small util
 23  * tasks still getting ridiculously long effective runtimes, on the bottom end we
 24  * guard against timer DoS.
 25  */
 26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
 27 static unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
 28 #ifdef CONFIG_SYSCTL
 29 static struct ctl_table sched_dl_sysctls[] = {
 30         {
 31                 .procname       = "sched_deadline_period_max_us",
 32                 .data           = &sysctl_sched_dl_period_max,
 33                 .maxlen         = sizeof(unsigned int),
 34                 .mode           = 0644,
 35                 .proc_handler   = proc_douintvec_minmax,
 36                 .extra1         = (void *)&sysctl_sched_dl_period_min,
 37         },
 38         {
 39                 .procname       = "sched_deadline_period_min_us",
 40                 .data           = &sysctl_sched_dl_period_min,
 41                 .maxlen         = sizeof(unsigned int),
 42                 .mode           = 0644,
 43                 .proc_handler   = proc_douintvec_minmax,
 44                 .extra2         = (void *)&sysctl_sched_dl_period_max,
 45         },
 46 };
 47 
 48 static int __init sched_dl_sysctl_init(void)
 49 {
 50         register_sysctl_init("kernel", sched_dl_sysctls);
 51         return 0;
 52 }
 53 late_initcall(sched_dl_sysctl_init);
 54 #endif
 55 
 56 static bool dl_server(struct sched_dl_entity *dl_se)
 57 {
 58         return dl_se->dl_server;
 59 }
 60 
 61 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
 62 {
 63         BUG_ON(dl_server(dl_se));
 64         return container_of(dl_se, struct task_struct, dl);
 65 }
 66 
 67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
 68 {
 69         return container_of(dl_rq, struct rq, dl);
 70 }
 71 
 72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
 73 {
 74         struct rq *rq = dl_se->rq;
 75 
 76         if (!dl_server(dl_se))
 77                 rq = task_rq(dl_task_of(dl_se));
 78 
 79         return rq;
 80 }
 81 
 82 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
 83 {
 84         return &rq_of_dl_se(dl_se)->dl;
 85 }
 86 
 87 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
 88 {
 89         return !RB_EMPTY_NODE(&dl_se->rb_node);
 90 }
 91 
 92 #ifdef CONFIG_RT_MUTEXES
 93 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
 94 {
 95         return dl_se->pi_se;
 96 }
 97 
 98 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
 99 {
100         return pi_of(dl_se) != dl_se;
101 }
102 #else
103 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
104 {
105         return dl_se;
106 }
107 
108 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
109 {
110         return false;
111 }
112 #endif
113 
114 #ifdef CONFIG_SMP
115 static inline struct dl_bw *dl_bw_of(int i)
116 {
117         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
118                          "sched RCU must be held");
119         return &cpu_rq(i)->rd->dl_bw;
120 }
121 
122 static inline int dl_bw_cpus(int i)
123 {
124         struct root_domain *rd = cpu_rq(i)->rd;
125         int cpus;
126 
127         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
128                          "sched RCU must be held");
129 
130         if (cpumask_subset(rd->span, cpu_active_mask))
131                 return cpumask_weight(rd->span);
132 
133         cpus = 0;
134 
135         for_each_cpu_and(i, rd->span, cpu_active_mask)
136                 cpus++;
137 
138         return cpus;
139 }
140 
141 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
142 {
143         unsigned long cap = 0;
144         int i;
145 
146         for_each_cpu_and(i, mask, cpu_active_mask)
147                 cap += arch_scale_cpu_capacity(i);
148 
149         return cap;
150 }
151 
152 /*
153  * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
154  * of the CPU the task is running on rather rd's \Sum CPU capacity.
155  */
156 static inline unsigned long dl_bw_capacity(int i)
157 {
158         if (!sched_asym_cpucap_active() &&
159             arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
160                 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
161         } else {
162                 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
163                                  "sched RCU must be held");
164 
165                 return __dl_bw_capacity(cpu_rq(i)->rd->span);
166         }
167 }
168 
169 static inline bool dl_bw_visited(int cpu, u64 gen)
170 {
171         struct root_domain *rd = cpu_rq(cpu)->rd;
172 
173         if (rd->visit_gen == gen)
174                 return true;
175 
176         rd->visit_gen = gen;
177         return false;
178 }
179 
180 static inline
181 void __dl_update(struct dl_bw *dl_b, s64 bw)
182 {
183         struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
184         int i;
185 
186         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
187                          "sched RCU must be held");
188         for_each_cpu_and(i, rd->span, cpu_active_mask) {
189                 struct rq *rq = cpu_rq(i);
190 
191                 rq->dl.extra_bw += bw;
192         }
193 }
194 #else
195 static inline struct dl_bw *dl_bw_of(int i)
196 {
197         return &cpu_rq(i)->dl.dl_bw;
198 }
199 
200 static inline int dl_bw_cpus(int i)
201 {
202         return 1;
203 }
204 
205 static inline unsigned long dl_bw_capacity(int i)
206 {
207         return SCHED_CAPACITY_SCALE;
208 }
209 
210 static inline bool dl_bw_visited(int cpu, u64 gen)
211 {
212         return false;
213 }
214 
215 static inline
216 void __dl_update(struct dl_bw *dl_b, s64 bw)
217 {
218         struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
219 
220         dl->extra_bw += bw;
221 }
222 #endif
223 
224 static inline
225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
226 {
227         dl_b->total_bw -= tsk_bw;
228         __dl_update(dl_b, (s32)tsk_bw / cpus);
229 }
230 
231 static inline
232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
233 {
234         dl_b->total_bw += tsk_bw;
235         __dl_update(dl_b, -((s32)tsk_bw / cpus));
236 }
237 
238 static inline bool
239 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
240 {
241         return dl_b->bw != -1 &&
242                cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
243 }
244 
245 static inline
246 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
247 {
248         u64 old = dl_rq->running_bw;
249 
250         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
251         dl_rq->running_bw += dl_bw;
252         SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
253         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
254         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
255         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
256 }
257 
258 static inline
259 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
260 {
261         u64 old = dl_rq->running_bw;
262 
263         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
264         dl_rq->running_bw -= dl_bw;
265         SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
266         if (dl_rq->running_bw > old)
267                 dl_rq->running_bw = 0;
268         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
269         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
270 }
271 
272 static inline
273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
274 {
275         u64 old = dl_rq->this_bw;
276 
277         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
278         dl_rq->this_bw += dl_bw;
279         SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
280 }
281 
282 static inline
283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
284 {
285         u64 old = dl_rq->this_bw;
286 
287         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
288         dl_rq->this_bw -= dl_bw;
289         SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
290         if (dl_rq->this_bw > old)
291                 dl_rq->this_bw = 0;
292         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
293 }
294 
295 static inline
296 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
297 {
298         if (!dl_entity_is_special(dl_se))
299                 __add_rq_bw(dl_se->dl_bw, dl_rq);
300 }
301 
302 static inline
303 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
304 {
305         if (!dl_entity_is_special(dl_se))
306                 __sub_rq_bw(dl_se->dl_bw, dl_rq);
307 }
308 
309 static inline
310 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
311 {
312         if (!dl_entity_is_special(dl_se))
313                 __add_running_bw(dl_se->dl_bw, dl_rq);
314 }
315 
316 static inline
317 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
318 {
319         if (!dl_entity_is_special(dl_se))
320                 __sub_running_bw(dl_se->dl_bw, dl_rq);
321 }
322 
323 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
324 {
325         struct rq *rq;
326 
327         WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
328 
329         if (task_on_rq_queued(p))
330                 return;
331 
332         rq = task_rq(p);
333         if (p->dl.dl_non_contending) {
334                 sub_running_bw(&p->dl, &rq->dl);
335                 p->dl.dl_non_contending = 0;
336                 /*
337                  * If the timer handler is currently running and the
338                  * timer cannot be canceled, inactive_task_timer()
339                  * will see that dl_not_contending is not set, and
340                  * will not touch the rq's active utilization,
341                  * so we are still safe.
342                  */
343                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
344                         put_task_struct(p);
345         }
346         __sub_rq_bw(p->dl.dl_bw, &rq->dl);
347         __add_rq_bw(new_bw, &rq->dl);
348 }
349 
350 static void __dl_clear_params(struct sched_dl_entity *dl_se);
351 
352 /*
353  * The utilization of a task cannot be immediately removed from
354  * the rq active utilization (running_bw) when the task blocks.
355  * Instead, we have to wait for the so called "0-lag time".
356  *
357  * If a task blocks before the "0-lag time", a timer (the inactive
358  * timer) is armed, and running_bw is decreased when the timer
359  * fires.
360  *
361  * If the task wakes up again before the inactive timer fires,
362  * the timer is canceled, whereas if the task wakes up after the
363  * inactive timer fired (and running_bw has been decreased) the
364  * task's utilization has to be added to running_bw again.
365  * A flag in the deadline scheduling entity (dl_non_contending)
366  * is used to avoid race conditions between the inactive timer handler
367  * and task wakeups.
368  *
369  * The following diagram shows how running_bw is updated. A task is
370  * "ACTIVE" when its utilization contributes to running_bw; an
371  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
372  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
373  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
374  * time already passed, which does not contribute to running_bw anymore.
375  *                              +------------------+
376  *             wakeup           |    ACTIVE        |
377  *          +------------------>+   contending     |
378  *          | add_running_bw    |                  |
379  *          |                   +----+------+------+
380  *          |                        |      ^
381  *          |                dequeue |      |
382  * +--------+-------+                |      |
383  * |                |   t >= 0-lag   |      | wakeup
384  * |    INACTIVE    |<---------------+      |
385  * |                | sub_running_bw |      |
386  * +--------+-------+                |      |
387  *          ^                        |      |
388  *          |              t < 0-lag |      |
389  *          |                        |      |
390  *          |                        V      |
391  *          |                   +----+------+------+
392  *          | sub_running_bw    |    ACTIVE        |
393  *          +-------------------+                  |
394  *            inactive timer    |  non contending  |
395  *            fired             +------------------+
396  *
397  * The task_non_contending() function is invoked when a task
398  * blocks, and checks if the 0-lag time already passed or
399  * not (in the first case, it directly updates running_bw;
400  * in the second case, it arms the inactive timer).
401  *
402  * The task_contending() function is invoked when a task wakes
403  * up, and checks if the task is still in the "ACTIVE non contending"
404  * state or not (in the second case, it updates running_bw).
405  */
406 static void task_non_contending(struct sched_dl_entity *dl_se)
407 {
408         struct hrtimer *timer = &dl_se->inactive_timer;
409         struct rq *rq = rq_of_dl_se(dl_se);
410         struct dl_rq *dl_rq = &rq->dl;
411         s64 zerolag_time;
412 
413         /*
414          * If this is a non-deadline task that has been boosted,
415          * do nothing
416          */
417         if (dl_se->dl_runtime == 0)
418                 return;
419 
420         if (dl_entity_is_special(dl_se))
421                 return;
422 
423         WARN_ON(dl_se->dl_non_contending);
424 
425         zerolag_time = dl_se->deadline -
426                  div64_long((dl_se->runtime * dl_se->dl_period),
427                         dl_se->dl_runtime);
428 
429         /*
430          * Using relative times instead of the absolute "0-lag time"
431          * allows to simplify the code
432          */
433         zerolag_time -= rq_clock(rq);
434 
435         /*
436          * If the "0-lag time" already passed, decrease the active
437          * utilization now, instead of starting a timer
438          */
439         if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
440                 if (dl_server(dl_se)) {
441                         sub_running_bw(dl_se, dl_rq);
442                 } else {
443                         struct task_struct *p = dl_task_of(dl_se);
444 
445                         if (dl_task(p))
446                                 sub_running_bw(dl_se, dl_rq);
447 
448                         if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
449                                 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
450 
451                                 if (READ_ONCE(p->__state) == TASK_DEAD)
452                                         sub_rq_bw(dl_se, &rq->dl);
453                                 raw_spin_lock(&dl_b->lock);
454                                 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
455                                 raw_spin_unlock(&dl_b->lock);
456                                 __dl_clear_params(dl_se);
457                         }
458                 }
459 
460                 return;
461         }
462 
463         dl_se->dl_non_contending = 1;
464         if (!dl_server(dl_se))
465                 get_task_struct(dl_task_of(dl_se));
466 
467         hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
468 }
469 
470 static void task_contending(struct sched_dl_entity *dl_se, int flags)
471 {
472         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
473 
474         /*
475          * If this is a non-deadline task that has been boosted,
476          * do nothing
477          */
478         if (dl_se->dl_runtime == 0)
479                 return;
480 
481         if (flags & ENQUEUE_MIGRATED)
482                 add_rq_bw(dl_se, dl_rq);
483 
484         if (dl_se->dl_non_contending) {
485                 dl_se->dl_non_contending = 0;
486                 /*
487                  * If the timer handler is currently running and the
488                  * timer cannot be canceled, inactive_task_timer()
489                  * will see that dl_not_contending is not set, and
490                  * will not touch the rq's active utilization,
491                  * so we are still safe.
492                  */
493                 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
494                         if (!dl_server(dl_se))
495                                 put_task_struct(dl_task_of(dl_se));
496                 }
497         } else {
498                 /*
499                  * Since "dl_non_contending" is not set, the
500                  * task's utilization has already been removed from
501                  * active utilization (either when the task blocked,
502                  * when the "inactive timer" fired).
503                  * So, add it back.
504                  */
505                 add_running_bw(dl_se, dl_rq);
506         }
507 }
508 
509 static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
510 {
511         return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
512 }
513 
514 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
515 
516 void init_dl_bw(struct dl_bw *dl_b)
517 {
518         raw_spin_lock_init(&dl_b->lock);
519         if (global_rt_runtime() == RUNTIME_INF)
520                 dl_b->bw = -1;
521         else
522                 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
523         dl_b->total_bw = 0;
524 }
525 
526 void init_dl_rq(struct dl_rq *dl_rq)
527 {
528         dl_rq->root = RB_ROOT_CACHED;
529 
530 #ifdef CONFIG_SMP
531         /* zero means no -deadline tasks */
532         dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
533 
534         dl_rq->overloaded = 0;
535         dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
536 #else
537         init_dl_bw(&dl_rq->dl_bw);
538 #endif
539 
540         dl_rq->running_bw = 0;
541         dl_rq->this_bw = 0;
542         init_dl_rq_bw_ratio(dl_rq);
543 }
544 
545 #ifdef CONFIG_SMP
546 
547 static inline int dl_overloaded(struct rq *rq)
548 {
549         return atomic_read(&rq->rd->dlo_count);
550 }
551 
552 static inline void dl_set_overload(struct rq *rq)
553 {
554         if (!rq->online)
555                 return;
556 
557         cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
558         /*
559          * Must be visible before the overload count is
560          * set (as in sched_rt.c).
561          *
562          * Matched by the barrier in pull_dl_task().
563          */
564         smp_wmb();
565         atomic_inc(&rq->rd->dlo_count);
566 }
567 
568 static inline void dl_clear_overload(struct rq *rq)
569 {
570         if (!rq->online)
571                 return;
572 
573         atomic_dec(&rq->rd->dlo_count);
574         cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
575 }
576 
577 #define __node_2_pdl(node) \
578         rb_entry((node), struct task_struct, pushable_dl_tasks)
579 
580 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
581 {
582         return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
583 }
584 
585 static inline int has_pushable_dl_tasks(struct rq *rq)
586 {
587         return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
588 }
589 
590 /*
591  * The list of pushable -deadline task is not a plist, like in
592  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
593  */
594 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
595 {
596         struct rb_node *leftmost;
597 
598         WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
599 
600         leftmost = rb_add_cached(&p->pushable_dl_tasks,
601                                  &rq->dl.pushable_dl_tasks_root,
602                                  __pushable_less);
603         if (leftmost)
604                 rq->dl.earliest_dl.next = p->dl.deadline;
605 
606         if (!rq->dl.overloaded) {
607                 dl_set_overload(rq);
608                 rq->dl.overloaded = 1;
609         }
610 }
611 
612 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
613 {
614         struct dl_rq *dl_rq = &rq->dl;
615         struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
616         struct rb_node *leftmost;
617 
618         if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
619                 return;
620 
621         leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
622         if (leftmost)
623                 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
624 
625         RB_CLEAR_NODE(&p->pushable_dl_tasks);
626 
627         if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
628                 dl_clear_overload(rq);
629                 rq->dl.overloaded = 0;
630         }
631 }
632 
633 static int push_dl_task(struct rq *rq);
634 
635 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
636 {
637         return rq->online && dl_task(prev);
638 }
639 
640 static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
641 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
642 
643 static void push_dl_tasks(struct rq *);
644 static void pull_dl_task(struct rq *);
645 
646 static inline void deadline_queue_push_tasks(struct rq *rq)
647 {
648         if (!has_pushable_dl_tasks(rq))
649                 return;
650 
651         queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
652 }
653 
654 static inline void deadline_queue_pull_task(struct rq *rq)
655 {
656         queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
657 }
658 
659 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
660 
661 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
662 {
663         struct rq *later_rq = NULL;
664         struct dl_bw *dl_b;
665 
666         later_rq = find_lock_later_rq(p, rq);
667         if (!later_rq) {
668                 int cpu;
669 
670                 /*
671                  * If we cannot preempt any rq, fall back to pick any
672                  * online CPU:
673                  */
674                 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
675                 if (cpu >= nr_cpu_ids) {
676                         /*
677                          * Failed to find any suitable CPU.
678                          * The task will never come back!
679                          */
680                         WARN_ON_ONCE(dl_bandwidth_enabled());
681 
682                         /*
683                          * If admission control is disabled we
684                          * try a little harder to let the task
685                          * run.
686                          */
687                         cpu = cpumask_any(cpu_active_mask);
688                 }
689                 later_rq = cpu_rq(cpu);
690                 double_lock_balance(rq, later_rq);
691         }
692 
693         if (p->dl.dl_non_contending || p->dl.dl_throttled) {
694                 /*
695                  * Inactive timer is armed (or callback is running, but
696                  * waiting for us to release rq locks). In any case, when it
697                  * will fire (or continue), it will see running_bw of this
698                  * task migrated to later_rq (and correctly handle it).
699                  */
700                 sub_running_bw(&p->dl, &rq->dl);
701                 sub_rq_bw(&p->dl, &rq->dl);
702 
703                 add_rq_bw(&p->dl, &later_rq->dl);
704                 add_running_bw(&p->dl, &later_rq->dl);
705         } else {
706                 sub_rq_bw(&p->dl, &rq->dl);
707                 add_rq_bw(&p->dl, &later_rq->dl);
708         }
709 
710         /*
711          * And we finally need to fix up root_domain(s) bandwidth accounting,
712          * since p is still hanging out in the old (now moved to default) root
713          * domain.
714          */
715         dl_b = &rq->rd->dl_bw;
716         raw_spin_lock(&dl_b->lock);
717         __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
718         raw_spin_unlock(&dl_b->lock);
719 
720         dl_b = &later_rq->rd->dl_bw;
721         raw_spin_lock(&dl_b->lock);
722         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
723         raw_spin_unlock(&dl_b->lock);
724 
725         set_task_cpu(p, later_rq->cpu);
726         double_unlock_balance(later_rq, rq);
727 
728         return later_rq;
729 }
730 
731 #else
732 
733 static inline
734 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
735 {
736 }
737 
738 static inline
739 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
740 {
741 }
742 
743 static inline
744 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
745 {
746 }
747 
748 static inline
749 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
750 {
751 }
752 
753 static inline void deadline_queue_push_tasks(struct rq *rq)
754 {
755 }
756 
757 static inline void deadline_queue_pull_task(struct rq *rq)
758 {
759 }
760 #endif /* CONFIG_SMP */
761 
762 static void
763 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
764 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
765 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
766 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
767 
768 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
769                                             struct rq *rq)
770 {
771         /* for non-boosted task, pi_of(dl_se) == dl_se */
772         dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
773         dl_se->runtime = pi_of(dl_se)->dl_runtime;
774 }
775 
776 /*
777  * We are being explicitly informed that a new instance is starting,
778  * and this means that:
779  *  - the absolute deadline of the entity has to be placed at
780  *    current time + relative deadline;
781  *  - the runtime of the entity has to be set to the maximum value.
782  *
783  * The capability of specifying such event is useful whenever a -deadline
784  * entity wants to (try to!) synchronize its behaviour with the scheduler's
785  * one, and to (try to!) reconcile itself with its own scheduling
786  * parameters.
787  */
788 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
789 {
790         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
791         struct rq *rq = rq_of_dl_rq(dl_rq);
792 
793         WARN_ON(is_dl_boosted(dl_se));
794         WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
795 
796         /*
797          * We are racing with the deadline timer. So, do nothing because
798          * the deadline timer handler will take care of properly recharging
799          * the runtime and postponing the deadline
800          */
801         if (dl_se->dl_throttled)
802                 return;
803 
804         /*
805          * We use the regular wall clock time to set deadlines in the
806          * future; in fact, we must consider execution overheads (time
807          * spent on hardirq context, etc.).
808          */
809         replenish_dl_new_period(dl_se, rq);
810 }
811 
812 /*
813  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
814  * possibility of a entity lasting more than what it declared, and thus
815  * exhausting its runtime.
816  *
817  * Here we are interested in making runtime overrun possible, but we do
818  * not want a entity which is misbehaving to affect the scheduling of all
819  * other entities.
820  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
821  * is used, in order to confine each entity within its own bandwidth.
822  *
823  * This function deals exactly with that, and ensures that when the runtime
824  * of a entity is replenished, its deadline is also postponed. That ensures
825  * the overrunning entity can't interfere with other entity in the system and
826  * can't make them miss their deadlines. Reasons why this kind of overruns
827  * could happen are, typically, a entity voluntarily trying to overcome its
828  * runtime, or it just underestimated it during sched_setattr().
829  */
830 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
831 {
832         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
833         struct rq *rq = rq_of_dl_rq(dl_rq);
834 
835         WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
836 
837         /*
838          * This could be the case for a !-dl task that is boosted.
839          * Just go with full inherited parameters.
840          */
841         if (dl_se->dl_deadline == 0)
842                 replenish_dl_new_period(dl_se, rq);
843 
844         if (dl_se->dl_yielded && dl_se->runtime > 0)
845                 dl_se->runtime = 0;
846 
847         /*
848          * We keep moving the deadline away until we get some
849          * available runtime for the entity. This ensures correct
850          * handling of situations where the runtime overrun is
851          * arbitrary large.
852          */
853         while (dl_se->runtime <= 0) {
854                 dl_se->deadline += pi_of(dl_se)->dl_period;
855                 dl_se->runtime += pi_of(dl_se)->dl_runtime;
856         }
857 
858         /*
859          * At this point, the deadline really should be "in
860          * the future" with respect to rq->clock. If it's
861          * not, we are, for some reason, lagging too much!
862          * Anyway, after having warn userspace abut that,
863          * we still try to keep the things running by
864          * resetting the deadline and the budget of the
865          * entity.
866          */
867         if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
868                 printk_deferred_once("sched: DL replenish lagged too much\n");
869                 replenish_dl_new_period(dl_se, rq);
870         }
871 
872         if (dl_se->dl_yielded)
873                 dl_se->dl_yielded = 0;
874         if (dl_se->dl_throttled)
875                 dl_se->dl_throttled = 0;
876 }
877 
878 /*
879  * Here we check if --at time t-- an entity (which is probably being
880  * [re]activated or, in general, enqueued) can use its remaining runtime
881  * and its current deadline _without_ exceeding the bandwidth it is
882  * assigned (function returns true if it can't). We are in fact applying
883  * one of the CBS rules: when a task wakes up, if the residual runtime
884  * over residual deadline fits within the allocated bandwidth, then we
885  * can keep the current (absolute) deadline and residual budget without
886  * disrupting the schedulability of the system. Otherwise, we should
887  * refill the runtime and set the deadline a period in the future,
888  * because keeping the current (absolute) deadline of the task would
889  * result in breaking guarantees promised to other tasks (refer to
890  * Documentation/scheduler/sched-deadline.rst for more information).
891  *
892  * This function returns true if:
893  *
894  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
895  *
896  * IOW we can't recycle current parameters.
897  *
898  * Notice that the bandwidth check is done against the deadline. For
899  * task with deadline equal to period this is the same of using
900  * dl_period instead of dl_deadline in the equation above.
901  */
902 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
903 {
904         u64 left, right;
905 
906         /*
907          * left and right are the two sides of the equation above,
908          * after a bit of shuffling to use multiplications instead
909          * of divisions.
910          *
911          * Note that none of the time values involved in the two
912          * multiplications are absolute: dl_deadline and dl_runtime
913          * are the relative deadline and the maximum runtime of each
914          * instance, runtime is the runtime left for the last instance
915          * and (deadline - t), since t is rq->clock, is the time left
916          * to the (absolute) deadline. Even if overflowing the u64 type
917          * is very unlikely to occur in both cases, here we scale down
918          * as we want to avoid that risk at all. Scaling down by 10
919          * means that we reduce granularity to 1us. We are fine with it,
920          * since this is only a true/false check and, anyway, thinking
921          * of anything below microseconds resolution is actually fiction
922          * (but still we want to give the user that illusion >;).
923          */
924         left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
925         right = ((dl_se->deadline - t) >> DL_SCALE) *
926                 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
927 
928         return dl_time_before(right, left);
929 }
930 
931 /*
932  * Revised wakeup rule [1]: For self-suspending tasks, rather then
933  * re-initializing task's runtime and deadline, the revised wakeup
934  * rule adjusts the task's runtime to avoid the task to overrun its
935  * density.
936  *
937  * Reasoning: a task may overrun the density if:
938  *    runtime / (deadline - t) > dl_runtime / dl_deadline
939  *
940  * Therefore, runtime can be adjusted to:
941  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
942  *
943  * In such way that runtime will be equal to the maximum density
944  * the task can use without breaking any rule.
945  *
946  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
947  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
948  */
949 static void
950 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
951 {
952         u64 laxity = dl_se->deadline - rq_clock(rq);
953 
954         /*
955          * If the task has deadline < period, and the deadline is in the past,
956          * it should already be throttled before this check.
957          *
958          * See update_dl_entity() comments for further details.
959          */
960         WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
961 
962         dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
963 }
964 
965 /*
966  * Regarding the deadline, a task with implicit deadline has a relative
967  * deadline == relative period. A task with constrained deadline has a
968  * relative deadline <= relative period.
969  *
970  * We support constrained deadline tasks. However, there are some restrictions
971  * applied only for tasks which do not have an implicit deadline. See
972  * update_dl_entity() to know more about such restrictions.
973  *
974  * The dl_is_implicit() returns true if the task has an implicit deadline.
975  */
976 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
977 {
978         return dl_se->dl_deadline == dl_se->dl_period;
979 }
980 
981 /*
982  * When a deadline entity is placed in the runqueue, its runtime and deadline
983  * might need to be updated. This is done by a CBS wake up rule. There are two
984  * different rules: 1) the original CBS; and 2) the Revisited CBS.
985  *
986  * When the task is starting a new period, the Original CBS is used. In this
987  * case, the runtime is replenished and a new absolute deadline is set.
988  *
989  * When a task is queued before the begin of the next period, using the
990  * remaining runtime and deadline could make the entity to overflow, see
991  * dl_entity_overflow() to find more about runtime overflow. When such case
992  * is detected, the runtime and deadline need to be updated.
993  *
994  * If the task has an implicit deadline, i.e., deadline == period, the Original
995  * CBS is applied. The runtime is replenished and a new absolute deadline is
996  * set, as in the previous cases.
997  *
998  * However, the Original CBS does not work properly for tasks with
999  * deadline < period, which are said to have a constrained deadline. By
1000  * applying the Original CBS, a constrained deadline task would be able to run
1001  * runtime/deadline in a period. With deadline < period, the task would
1002  * overrun the runtime/period allowed bandwidth, breaking the admission test.
1003  *
1004  * In order to prevent this misbehave, the Revisited CBS is used for
1005  * constrained deadline tasks when a runtime overflow is detected. In the
1006  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1007  * the remaining runtime of the task is reduced to avoid runtime overflow.
1008  * Please refer to the comments update_dl_revised_wakeup() function to find
1009  * more about the Revised CBS rule.
1010  */
1011 static void update_dl_entity(struct sched_dl_entity *dl_se)
1012 {
1013         struct rq *rq = rq_of_dl_se(dl_se);
1014 
1015         if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1016             dl_entity_overflow(dl_se, rq_clock(rq))) {
1017 
1018                 if (unlikely(!dl_is_implicit(dl_se) &&
1019                              !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1020                              !is_dl_boosted(dl_se))) {
1021                         update_dl_revised_wakeup(dl_se, rq);
1022                         return;
1023                 }
1024 
1025                 replenish_dl_new_period(dl_se, rq);
1026         }
1027 }
1028 
1029 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1030 {
1031         return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1032 }
1033 
1034 /*
1035  * If the entity depleted all its runtime, and if we want it to sleep
1036  * while waiting for some new execution time to become available, we
1037  * set the bandwidth replenishment timer to the replenishment instant
1038  * and try to activate it.
1039  *
1040  * Notice that it is important for the caller to know if the timer
1041  * actually started or not (i.e., the replenishment instant is in
1042  * the future or in the past).
1043  */
1044 static int start_dl_timer(struct sched_dl_entity *dl_se)
1045 {
1046         struct hrtimer *timer = &dl_se->dl_timer;
1047         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1048         struct rq *rq = rq_of_dl_rq(dl_rq);
1049         ktime_t now, act;
1050         s64 delta;
1051 
1052         lockdep_assert_rq_held(rq);
1053 
1054         /*
1055          * We want the timer to fire at the deadline, but considering
1056          * that it is actually coming from rq->clock and not from
1057          * hrtimer's time base reading.
1058          */
1059         act = ns_to_ktime(dl_next_period(dl_se));
1060         now = hrtimer_cb_get_time(timer);
1061         delta = ktime_to_ns(now) - rq_clock(rq);
1062         act = ktime_add_ns(act, delta);
1063 
1064         /*
1065          * If the expiry time already passed, e.g., because the value
1066          * chosen as the deadline is too small, don't even try to
1067          * start the timer in the past!
1068          */
1069         if (ktime_us_delta(act, now) < 0)
1070                 return 0;
1071 
1072         /*
1073          * !enqueued will guarantee another callback; even if one is already in
1074          * progress. This ensures a balanced {get,put}_task_struct().
1075          *
1076          * The race against __run_timer() clearing the enqueued state is
1077          * harmless because we're holding task_rq()->lock, therefore the timer
1078          * expiring after we've done the check will wait on its task_rq_lock()
1079          * and observe our state.
1080          */
1081         if (!hrtimer_is_queued(timer)) {
1082                 if (!dl_server(dl_se))
1083                         get_task_struct(dl_task_of(dl_se));
1084                 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1085         }
1086 
1087         return 1;
1088 }
1089 
1090 static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1091 {
1092 #ifdef CONFIG_SMP
1093         /*
1094          * Queueing this task back might have overloaded rq, check if we need
1095          * to kick someone away.
1096          */
1097         if (has_pushable_dl_tasks(rq)) {
1098                 /*
1099                  * Nothing relies on rq->lock after this, so its safe to drop
1100                  * rq->lock.
1101                  */
1102                 rq_unpin_lock(rq, rf);
1103                 push_dl_task(rq);
1104                 rq_repin_lock(rq, rf);
1105         }
1106 #endif
1107 }
1108 
1109 /*
1110  * This is the bandwidth enforcement timer callback. If here, we know
1111  * a task is not on its dl_rq, since the fact that the timer was running
1112  * means the task is throttled and needs a runtime replenishment.
1113  *
1114  * However, what we actually do depends on the fact the task is active,
1115  * (it is on its rq) or has been removed from there by a call to
1116  * dequeue_task_dl(). In the former case we must issue the runtime
1117  * replenishment and add the task back to the dl_rq; in the latter, we just
1118  * do nothing but clearing dl_throttled, so that runtime and deadline
1119  * updating (and the queueing back to dl_rq) will be done by the
1120  * next call to enqueue_task_dl().
1121  */
1122 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1123 {
1124         struct sched_dl_entity *dl_se = container_of(timer,
1125                                                      struct sched_dl_entity,
1126                                                      dl_timer);
1127         struct task_struct *p;
1128         struct rq_flags rf;
1129         struct rq *rq;
1130 
1131         if (dl_server(dl_se)) {
1132                 struct rq *rq = rq_of_dl_se(dl_se);
1133                 struct rq_flags rf;
1134 
1135                 rq_lock(rq, &rf);
1136                 if (dl_se->dl_throttled) {
1137                         sched_clock_tick();
1138                         update_rq_clock(rq);
1139 
1140                         if (dl_se->server_has_tasks(dl_se)) {
1141                                 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1142                                 resched_curr(rq);
1143                                 __push_dl_task(rq, &rf);
1144                         } else {
1145                                 replenish_dl_entity(dl_se);
1146                         }
1147 
1148                 }
1149                 rq_unlock(rq, &rf);
1150 
1151                 return HRTIMER_NORESTART;
1152         }
1153 
1154         p = dl_task_of(dl_se);
1155         rq = task_rq_lock(p, &rf);
1156 
1157         /*
1158          * The task might have changed its scheduling policy to something
1159          * different than SCHED_DEADLINE (through switched_from_dl()).
1160          */
1161         if (!dl_task(p))
1162                 goto unlock;
1163 
1164         /*
1165          * The task might have been boosted by someone else and might be in the
1166          * boosting/deboosting path, its not throttled.
1167          */
1168         if (is_dl_boosted(dl_se))
1169                 goto unlock;
1170 
1171         /*
1172          * Spurious timer due to start_dl_timer() race; or we already received
1173          * a replenishment from rt_mutex_setprio().
1174          */
1175         if (!dl_se->dl_throttled)
1176                 goto unlock;
1177 
1178         sched_clock_tick();
1179         update_rq_clock(rq);
1180 
1181         /*
1182          * If the throttle happened during sched-out; like:
1183          *
1184          *   schedule()
1185          *     deactivate_task()
1186          *       dequeue_task_dl()
1187          *         update_curr_dl()
1188          *           start_dl_timer()
1189          *         __dequeue_task_dl()
1190          *     prev->on_rq = 0;
1191          *
1192          * We can be both throttled and !queued. Replenish the counter
1193          * but do not enqueue -- wait for our wakeup to do that.
1194          */
1195         if (!task_on_rq_queued(p)) {
1196                 replenish_dl_entity(dl_se);
1197                 goto unlock;
1198         }
1199 
1200 #ifdef CONFIG_SMP
1201         if (unlikely(!rq->online)) {
1202                 /*
1203                  * If the runqueue is no longer available, migrate the
1204                  * task elsewhere. This necessarily changes rq.
1205                  */
1206                 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1207                 rq = dl_task_offline_migration(rq, p);
1208                 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1209                 update_rq_clock(rq);
1210 
1211                 /*
1212                  * Now that the task has been migrated to the new RQ and we
1213                  * have that locked, proceed as normal and enqueue the task
1214                  * there.
1215                  */
1216         }
1217 #endif
1218 
1219         enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1220         if (dl_task(rq->curr))
1221                 wakeup_preempt_dl(rq, p, 0);
1222         else
1223                 resched_curr(rq);
1224 
1225         __push_dl_task(rq, &rf);
1226 
1227 unlock:
1228         task_rq_unlock(rq, p, &rf);
1229 
1230         /*
1231          * This can free the task_struct, including this hrtimer, do not touch
1232          * anything related to that after this.
1233          */
1234         put_task_struct(p);
1235 
1236         return HRTIMER_NORESTART;
1237 }
1238 
1239 static void init_dl_task_timer(struct sched_dl_entity *dl_se)
1240 {
1241         struct hrtimer *timer = &dl_se->dl_timer;
1242 
1243         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1244         timer->function = dl_task_timer;
1245 }
1246 
1247 /*
1248  * During the activation, CBS checks if it can reuse the current task's
1249  * runtime and period. If the deadline of the task is in the past, CBS
1250  * cannot use the runtime, and so it replenishes the task. This rule
1251  * works fine for implicit deadline tasks (deadline == period), and the
1252  * CBS was designed for implicit deadline tasks. However, a task with
1253  * constrained deadline (deadline < period) might be awakened after the
1254  * deadline, but before the next period. In this case, replenishing the
1255  * task would allow it to run for runtime / deadline. As in this case
1256  * deadline < period, CBS enables a task to run for more than the
1257  * runtime / period. In a very loaded system, this can cause a domino
1258  * effect, making other tasks miss their deadlines.
1259  *
1260  * To avoid this problem, in the activation of a constrained deadline
1261  * task after the deadline but before the next period, throttle the
1262  * task and set the replenishing timer to the begin of the next period,
1263  * unless it is boosted.
1264  */
1265 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1266 {
1267         struct rq *rq = rq_of_dl_se(dl_se);
1268 
1269         if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1270             dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1271                 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
1272                         return;
1273                 dl_se->dl_throttled = 1;
1274                 if (dl_se->runtime > 0)
1275                         dl_se->runtime = 0;
1276         }
1277 }
1278 
1279 static
1280 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1281 {
1282         return (dl_se->runtime <= 0);
1283 }
1284 
1285 /*
1286  * This function implements the GRUB accounting rule. According to the
1287  * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1288  * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
1289  * where u is the utilization of the task, Umax is the maximum reclaimable
1290  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1291  * as the difference between the "total runqueue utilization" and the
1292  * "runqueue active utilization", and Uextra is the (per runqueue) extra
1293  * reclaimable utilization.
1294  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1295  * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1296  * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1297  * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1298  * Since delta is a 64 bit variable, to have an overflow its value should be
1299  * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1300  * not an issue here.
1301  */
1302 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1303 {
1304         u64 u_act;
1305         u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1306 
1307         /*
1308          * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1309          * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1310          * can be larger than u_max. So, u_max - u_inact - u_extra would be
1311          * negative leading to wrong results.
1312          */
1313         if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1314                 u_act = dl_se->dl_bw;
1315         else
1316                 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1317 
1318         u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1319         return (delta * u_act) >> BW_SHIFT;
1320 }
1321 
1322 static inline void
1323 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1324                         int flags);
1325 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1326 {
1327         s64 scaled_delta_exec;
1328 
1329         if (unlikely(delta_exec <= 0)) {
1330                 if (unlikely(dl_se->dl_yielded))
1331                         goto throttle;
1332                 return;
1333         }
1334 
1335         if (dl_entity_is_special(dl_se))
1336                 return;
1337 
1338         /*
1339          * For tasks that participate in GRUB, we implement GRUB-PA: the
1340          * spare reclaimed bandwidth is used to clock down frequency.
1341          *
1342          * For the others, we still need to scale reservation parameters
1343          * according to current frequency and CPU maximum capacity.
1344          */
1345         if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1346                 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
1347         } else {
1348                 int cpu = cpu_of(rq);
1349                 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1350                 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1351 
1352                 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1353                 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1354         }
1355 
1356         dl_se->runtime -= scaled_delta_exec;
1357 
1358 throttle:
1359         if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1360                 dl_se->dl_throttled = 1;
1361 
1362                 /* If requested, inform the user about runtime overruns. */
1363                 if (dl_runtime_exceeded(dl_se) &&
1364                     (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1365                         dl_se->dl_overrun = 1;
1366 
1367                 dequeue_dl_entity(dl_se, 0);
1368                 if (!dl_server(dl_se)) {
1369                         update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1370                         dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1371                 }
1372 
1373                 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
1374                         if (dl_server(dl_se))
1375                                 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1376                         else
1377                                 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1378                 }
1379 
1380                 if (!is_leftmost(dl_se, &rq->dl))
1381                         resched_curr(rq);
1382         }
1383 
1384         /*
1385          * Because -- for now -- we share the rt bandwidth, we need to
1386          * account our runtime there too, otherwise actual rt tasks
1387          * would be able to exceed the shared quota.
1388          *
1389          * Account to the root rt group for now.
1390          *
1391          * The solution we're working towards is having the RT groups scheduled
1392          * using deadline servers -- however there's a few nasties to figure
1393          * out before that can happen.
1394          */
1395         if (rt_bandwidth_enabled()) {
1396                 struct rt_rq *rt_rq = &rq->rt;
1397 
1398                 raw_spin_lock(&rt_rq->rt_runtime_lock);
1399                 /*
1400                  * We'll let actual RT tasks worry about the overflow here, we
1401                  * have our own CBS to keep us inline; only account when RT
1402                  * bandwidth is relevant.
1403                  */
1404                 if (sched_rt_bandwidth_account(rt_rq))
1405                         rt_rq->rt_time += delta_exec;
1406                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1407         }
1408 }
1409 
1410 void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
1411 {
1412         update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1413 }
1414 
1415 void dl_server_start(struct sched_dl_entity *dl_se)
1416 {
1417         if (!dl_server(dl_se)) {
1418                 dl_se->dl_server = 1;
1419                 setup_new_dl_entity(dl_se);
1420         }
1421         enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
1422 }
1423 
1424 void dl_server_stop(struct sched_dl_entity *dl_se)
1425 {
1426         dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
1427 }
1428 
1429 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1430                     dl_server_has_tasks_f has_tasks,
1431                     dl_server_pick_f pick)
1432 {
1433         dl_se->rq = rq;
1434         dl_se->server_has_tasks = has_tasks;
1435         dl_se->server_pick = pick;
1436 }
1437 
1438 /*
1439  * Update the current task's runtime statistics (provided it is still
1440  * a -deadline task and has not been removed from the dl_rq).
1441  */
1442 static void update_curr_dl(struct rq *rq)
1443 {
1444         struct task_struct *curr = rq->curr;
1445         struct sched_dl_entity *dl_se = &curr->dl;
1446         s64 delta_exec;
1447 
1448         if (!dl_task(curr) || !on_dl_rq(dl_se))
1449                 return;
1450 
1451         /*
1452          * Consumed budget is computed considering the time as
1453          * observed by schedulable tasks (excluding time spent
1454          * in hardirq context, etc.). Deadlines are instead
1455          * computed using hard walltime. This seems to be the more
1456          * natural solution, but the full ramifications of this
1457          * approach need further study.
1458          */
1459         delta_exec = update_curr_common(rq);
1460         update_curr_dl_se(rq, dl_se, delta_exec);
1461 }
1462 
1463 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1464 {
1465         struct sched_dl_entity *dl_se = container_of(timer,
1466                                                      struct sched_dl_entity,
1467                                                      inactive_timer);
1468         struct task_struct *p = NULL;
1469         struct rq_flags rf;
1470         struct rq *rq;
1471 
1472         if (!dl_server(dl_se)) {
1473                 p = dl_task_of(dl_se);
1474                 rq = task_rq_lock(p, &rf);
1475         } else {
1476                 rq = dl_se->rq;
1477                 rq_lock(rq, &rf);
1478         }
1479 
1480         sched_clock_tick();
1481         update_rq_clock(rq);
1482 
1483         if (dl_server(dl_se))
1484                 goto no_task;
1485 
1486         if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1487                 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1488 
1489                 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1490                         sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1491                         sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1492                         dl_se->dl_non_contending = 0;
1493                 }
1494 
1495                 raw_spin_lock(&dl_b->lock);
1496                 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1497                 raw_spin_unlock(&dl_b->lock);
1498                 __dl_clear_params(dl_se);
1499 
1500                 goto unlock;
1501         }
1502 
1503 no_task:
1504         if (dl_se->dl_non_contending == 0)
1505                 goto unlock;
1506 
1507         sub_running_bw(dl_se, &rq->dl);
1508         dl_se->dl_non_contending = 0;
1509 unlock:
1510 
1511         if (!dl_server(dl_se)) {
1512                 task_rq_unlock(rq, p, &rf);
1513                 put_task_struct(p);
1514         } else {
1515                 rq_unlock(rq, &rf);
1516         }
1517 
1518         return HRTIMER_NORESTART;
1519 }
1520 
1521 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1522 {
1523         struct hrtimer *timer = &dl_se->inactive_timer;
1524 
1525         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1526         timer->function = inactive_task_timer;
1527 }
1528 
1529 #define __node_2_dle(node) \
1530         rb_entry((node), struct sched_dl_entity, rb_node)
1531 
1532 #ifdef CONFIG_SMP
1533 
1534 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1535 {
1536         struct rq *rq = rq_of_dl_rq(dl_rq);
1537 
1538         if (dl_rq->earliest_dl.curr == 0 ||
1539             dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1540                 if (dl_rq->earliest_dl.curr == 0)
1541                         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1542                 dl_rq->earliest_dl.curr = deadline;
1543                 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1544         }
1545 }
1546 
1547 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1548 {
1549         struct rq *rq = rq_of_dl_rq(dl_rq);
1550 
1551         /*
1552          * Since we may have removed our earliest (and/or next earliest)
1553          * task we must recompute them.
1554          */
1555         if (!dl_rq->dl_nr_running) {
1556                 dl_rq->earliest_dl.curr = 0;
1557                 dl_rq->earliest_dl.next = 0;
1558                 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1559                 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1560         } else {
1561                 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1562                 struct sched_dl_entity *entry = __node_2_dle(leftmost);
1563 
1564                 dl_rq->earliest_dl.curr = entry->deadline;
1565                 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1566         }
1567 }
1568 
1569 #else
1570 
1571 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1572 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1573 
1574 #endif /* CONFIG_SMP */
1575 
1576 static inline
1577 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1578 {
1579         u64 deadline = dl_se->deadline;
1580 
1581         dl_rq->dl_nr_running++;
1582         add_nr_running(rq_of_dl_rq(dl_rq), 1);
1583 
1584         inc_dl_deadline(dl_rq, deadline);
1585 }
1586 
1587 static inline
1588 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1589 {
1590         WARN_ON(!dl_rq->dl_nr_running);
1591         dl_rq->dl_nr_running--;
1592         sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1593 
1594         dec_dl_deadline(dl_rq, dl_se->deadline);
1595 }
1596 
1597 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1598 {
1599         return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1600 }
1601 
1602 static __always_inline struct sched_statistics *
1603 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1604 {
1605         if (!schedstat_enabled())
1606                 return NULL;
1607 
1608         if (dl_server(dl_se))
1609                 return NULL;
1610 
1611         return &dl_task_of(dl_se)->stats;
1612 }
1613 
1614 static inline void
1615 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1616 {
1617         struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1618         if (stats)
1619                 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1620 }
1621 
1622 static inline void
1623 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1624 {
1625         struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1626         if (stats)
1627                 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1628 }
1629 
1630 static inline void
1631 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1632 {
1633         struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1634         if (stats)
1635                 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1636 }
1637 
1638 static inline void
1639 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1640                         int flags)
1641 {
1642         if (!schedstat_enabled())
1643                 return;
1644 
1645         if (flags & ENQUEUE_WAKEUP)
1646                 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1647 }
1648 
1649 static inline void
1650 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1651                         int flags)
1652 {
1653         struct task_struct *p = dl_task_of(dl_se);
1654 
1655         if (!schedstat_enabled())
1656                 return;
1657 
1658         if ((flags & DEQUEUE_SLEEP)) {
1659                 unsigned int state;
1660 
1661                 state = READ_ONCE(p->__state);
1662                 if (state & TASK_INTERRUPTIBLE)
1663                         __schedstat_set(p->stats.sleep_start,
1664                                         rq_clock(rq_of_dl_rq(dl_rq)));
1665 
1666                 if (state & TASK_UNINTERRUPTIBLE)
1667                         __schedstat_set(p->stats.block_start,
1668                                         rq_clock(rq_of_dl_rq(dl_rq)));
1669         }
1670 }
1671 
1672 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1673 {
1674         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1675 
1676         WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
1677 
1678         rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1679 
1680         inc_dl_tasks(dl_se, dl_rq);
1681 }
1682 
1683 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1684 {
1685         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1686 
1687         if (RB_EMPTY_NODE(&dl_se->rb_node))
1688                 return;
1689 
1690         rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1691 
1692         RB_CLEAR_NODE(&dl_se->rb_node);
1693 
1694         dec_dl_tasks(dl_se, dl_rq);
1695 }
1696 
1697 static void
1698 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1699 {
1700         WARN_ON_ONCE(on_dl_rq(dl_se));
1701 
1702         update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1703 
1704         /*
1705          * Check if a constrained deadline task was activated
1706          * after the deadline but before the next period.
1707          * If that is the case, the task will be throttled and
1708          * the replenishment timer will be set to the next period.
1709          */
1710         if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
1711                 dl_check_constrained_dl(dl_se);
1712 
1713         if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
1714                 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1715 
1716                 add_rq_bw(dl_se, dl_rq);
1717                 add_running_bw(dl_se, dl_rq);
1718         }
1719 
1720         /*
1721          * If p is throttled, we do not enqueue it. In fact, if it exhausted
1722          * its budget it needs a replenishment and, since it now is on
1723          * its rq, the bandwidth timer callback (which clearly has not
1724          * run yet) will take care of this.
1725          * However, the active utilization does not depend on the fact
1726          * that the task is on the runqueue or not (but depends on the
1727          * task's state - in GRUB parlance, "inactive" vs "active contending").
1728          * In other words, even if a task is throttled its utilization must
1729          * be counted in the active utilization; hence, we need to call
1730          * add_running_bw().
1731          */
1732         if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1733                 if (flags & ENQUEUE_WAKEUP)
1734                         task_contending(dl_se, flags);
1735 
1736                 return;
1737         }
1738 
1739         /*
1740          * If this is a wakeup or a new instance, the scheduling
1741          * parameters of the task might need updating. Otherwise,
1742          * we want a replenishment of its runtime.
1743          */
1744         if (flags & ENQUEUE_WAKEUP) {
1745                 task_contending(dl_se, flags);
1746                 update_dl_entity(dl_se);
1747         } else if (flags & ENQUEUE_REPLENISH) {
1748                 replenish_dl_entity(dl_se);
1749         } else if ((flags & ENQUEUE_RESTORE) &&
1750                    dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
1751                 setup_new_dl_entity(dl_se);
1752         }
1753 
1754         __enqueue_dl_entity(dl_se);
1755 }
1756 
1757 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1758 {
1759         __dequeue_dl_entity(dl_se);
1760 
1761         if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
1762                 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1763 
1764                 sub_running_bw(dl_se, dl_rq);
1765                 sub_rq_bw(dl_se, dl_rq);
1766         }
1767 
1768         /*
1769          * This check allows to start the inactive timer (or to immediately
1770          * decrease the active utilization, if needed) in two cases:
1771          * when the task blocks and when it is terminating
1772          * (p->state == TASK_DEAD). We can handle the two cases in the same
1773          * way, because from GRUB's point of view the same thing is happening
1774          * (the task moves from "active contending" to "active non contending"
1775          * or "inactive")
1776          */
1777         if (flags & DEQUEUE_SLEEP)
1778                 task_non_contending(dl_se);
1779 }
1780 
1781 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1782 {
1783         if (is_dl_boosted(&p->dl)) {
1784                 /*
1785                  * Because of delays in the detection of the overrun of a
1786                  * thread's runtime, it might be the case that a thread
1787                  * goes to sleep in a rt mutex with negative runtime. As
1788                  * a consequence, the thread will be throttled.
1789                  *
1790                  * While waiting for the mutex, this thread can also be
1791                  * boosted via PI, resulting in a thread that is throttled
1792                  * and boosted at the same time.
1793                  *
1794                  * In this case, the boost overrides the throttle.
1795                  */
1796                 if (p->dl.dl_throttled) {
1797                         /*
1798                          * The replenish timer needs to be canceled. No
1799                          * problem if it fires concurrently: boosted threads
1800                          * are ignored in dl_task_timer().
1801                          *
1802                          * If the timer callback was running (hrtimer_try_to_cancel == -1),
1803                          * it will eventually call put_task_struct().
1804                          */
1805                         if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 &&
1806                             !dl_server(&p->dl))
1807                                 put_task_struct(p);
1808                         p->dl.dl_throttled = 0;
1809                 }
1810         } else if (!dl_prio(p->normal_prio)) {
1811                 /*
1812                  * Special case in which we have a !SCHED_DEADLINE task that is going
1813                  * to be deboosted, but exceeds its runtime while doing so. No point in
1814                  * replenishing it, as it's going to return back to its original
1815                  * scheduling class after this. If it has been throttled, we need to
1816                  * clear the flag, otherwise the task may wake up as throttled after
1817                  * being boosted again with no means to replenish the runtime and clear
1818                  * the throttle.
1819                  */
1820                 p->dl.dl_throttled = 0;
1821                 if (!(flags & ENQUEUE_REPLENISH))
1822                         printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
1823                                              task_pid_nr(p));
1824 
1825                 return;
1826         }
1827 
1828         check_schedstat_required();
1829         update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1830 
1831         if (p->on_rq == TASK_ON_RQ_MIGRATING)
1832                 flags |= ENQUEUE_MIGRATING;
1833 
1834         enqueue_dl_entity(&p->dl, flags);
1835 
1836         if (dl_server(&p->dl))
1837                 return;
1838 
1839         if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
1840                 enqueue_pushable_dl_task(rq, p);
1841 }
1842 
1843 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1844 {
1845         update_curr_dl(rq);
1846 
1847         if (p->on_rq == TASK_ON_RQ_MIGRATING)
1848                 flags |= DEQUEUE_MIGRATING;
1849 
1850         dequeue_dl_entity(&p->dl, flags);
1851         if (!p->dl.dl_throttled && !dl_server(&p->dl))
1852                 dequeue_pushable_dl_task(rq, p);
1853 }
1854 
1855 /*
1856  * Yield task semantic for -deadline tasks is:
1857  *
1858  *   get off from the CPU until our next instance, with
1859  *   a new runtime. This is of little use now, since we
1860  *   don't have a bandwidth reclaiming mechanism. Anyway,
1861  *   bandwidth reclaiming is planned for the future, and
1862  *   yield_task_dl will indicate that some spare budget
1863  *   is available for other task instances to use it.
1864  */
1865 static void yield_task_dl(struct rq *rq)
1866 {
1867         /*
1868          * We make the task go to sleep until its current deadline by
1869          * forcing its runtime to zero. This way, update_curr_dl() stops
1870          * it and the bandwidth timer will wake it up and will give it
1871          * new scheduling parameters (thanks to dl_yielded=1).
1872          */
1873         rq->curr->dl.dl_yielded = 1;
1874 
1875         update_rq_clock(rq);
1876         update_curr_dl(rq);
1877         /*
1878          * Tell update_rq_clock() that we've just updated,
1879          * so we don't do microscopic update in schedule()
1880          * and double the fastpath cost.
1881          */
1882         rq_clock_skip_update(rq);
1883 }
1884 
1885 #ifdef CONFIG_SMP
1886 
1887 static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
1888                                                  struct rq *rq)
1889 {
1890         return (!rq->dl.dl_nr_running ||
1891                 dl_time_before(p->dl.deadline,
1892                                rq->dl.earliest_dl.curr));
1893 }
1894 
1895 static int find_later_rq(struct task_struct *task);
1896 
1897 static int
1898 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1899 {
1900         struct task_struct *curr;
1901         bool select_rq;
1902         struct rq *rq;
1903 
1904         if (!(flags & WF_TTWU))
1905                 goto out;
1906 
1907         rq = cpu_rq(cpu);
1908 
1909         rcu_read_lock();
1910         curr = READ_ONCE(rq->curr); /* unlocked access */
1911 
1912         /*
1913          * If we are dealing with a -deadline task, we must
1914          * decide where to wake it up.
1915          * If it has a later deadline and the current task
1916          * on this rq can't move (provided the waking task
1917          * can!) we prefer to send it somewhere else. On the
1918          * other hand, if it has a shorter deadline, we
1919          * try to make it stay here, it might be important.
1920          */
1921         select_rq = unlikely(dl_task(curr)) &&
1922                     (curr->nr_cpus_allowed < 2 ||
1923                      !dl_entity_preempt(&p->dl, &curr->dl)) &&
1924                     p->nr_cpus_allowed > 1;
1925 
1926         /*
1927          * Take the capacity of the CPU into account to
1928          * ensure it fits the requirement of the task.
1929          */
1930         if (sched_asym_cpucap_active())
1931                 select_rq |= !dl_task_fits_capacity(p, cpu);
1932 
1933         if (select_rq) {
1934                 int target = find_later_rq(p);
1935 
1936                 if (target != -1 &&
1937                     dl_task_is_earliest_deadline(p, cpu_rq(target)))
1938                         cpu = target;
1939         }
1940         rcu_read_unlock();
1941 
1942 out:
1943         return cpu;
1944 }
1945 
1946 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1947 {
1948         struct rq_flags rf;
1949         struct rq *rq;
1950 
1951         if (READ_ONCE(p->__state) != TASK_WAKING)
1952                 return;
1953 
1954         rq = task_rq(p);
1955         /*
1956          * Since p->state == TASK_WAKING, set_task_cpu() has been called
1957          * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1958          * rq->lock is not... So, lock it
1959          */
1960         rq_lock(rq, &rf);
1961         if (p->dl.dl_non_contending) {
1962                 update_rq_clock(rq);
1963                 sub_running_bw(&p->dl, &rq->dl);
1964                 p->dl.dl_non_contending = 0;
1965                 /*
1966                  * If the timer handler is currently running and the
1967                  * timer cannot be canceled, inactive_task_timer()
1968                  * will see that dl_not_contending is not set, and
1969                  * will not touch the rq's active utilization,
1970                  * so we are still safe.
1971                  */
1972                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1973                         put_task_struct(p);
1974         }
1975         sub_rq_bw(&p->dl, &rq->dl);
1976         rq_unlock(rq, &rf);
1977 }
1978 
1979 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1980 {
1981         /*
1982          * Current can't be migrated, useless to reschedule,
1983          * let's hope p can move out.
1984          */
1985         if (rq->curr->nr_cpus_allowed == 1 ||
1986             !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1987                 return;
1988 
1989         /*
1990          * p is migratable, so let's not schedule it and
1991          * see if it is pushed or pulled somewhere else.
1992          */
1993         if (p->nr_cpus_allowed != 1 &&
1994             cpudl_find(&rq->rd->cpudl, p, NULL))
1995                 return;
1996 
1997         resched_curr(rq);
1998 }
1999 
2000 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2001 {
2002         if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2003                 /*
2004                  * This is OK, because current is on_cpu, which avoids it being
2005                  * picked for load-balance and preemption/IRQs are still
2006                  * disabled avoiding further scheduler activity on it and we've
2007                  * not yet started the picking loop.
2008                  */
2009                 rq_unpin_lock(rq, rf);
2010                 pull_dl_task(rq);
2011                 rq_repin_lock(rq, rf);
2012         }
2013 
2014         return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2015 }
2016 #endif /* CONFIG_SMP */
2017 
2018 /*
2019  * Only called when both the current and waking task are -deadline
2020  * tasks.
2021  */
2022 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
2023                                   int flags)
2024 {
2025         if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
2026                 resched_curr(rq);
2027                 return;
2028         }
2029 
2030 #ifdef CONFIG_SMP
2031         /*
2032          * In the unlikely case current and p have the same deadline
2033          * let us try to decide what's the best thing to do...
2034          */
2035         if ((p->dl.deadline == rq->curr->dl.deadline) &&
2036             !test_tsk_need_resched(rq->curr))
2037                 check_preempt_equal_dl(rq, p);
2038 #endif /* CONFIG_SMP */
2039 }
2040 
2041 #ifdef CONFIG_SCHED_HRTICK
2042 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2043 {
2044         hrtick_start(rq, dl_se->runtime);
2045 }
2046 #else /* !CONFIG_SCHED_HRTICK */
2047 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2048 {
2049 }
2050 #endif
2051 
2052 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2053 {
2054         struct sched_dl_entity *dl_se = &p->dl;
2055         struct dl_rq *dl_rq = &rq->dl;
2056 
2057         p->se.exec_start = rq_clock_task(rq);
2058         if (on_dl_rq(&p->dl))
2059                 update_stats_wait_end_dl(dl_rq, dl_se);
2060 
2061         /* You can't push away the running task */
2062         dequeue_pushable_dl_task(rq, p);
2063 
2064         if (!first)
2065                 return;
2066 
2067         if (rq->curr->sched_class != &dl_sched_class)
2068                 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2069 
2070         deadline_queue_push_tasks(rq);
2071 }
2072 
2073 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2074 {
2075         struct rb_node *left = rb_first_cached(&dl_rq->root);
2076 
2077         if (!left)
2078                 return NULL;
2079 
2080         return __node_2_dle(left);
2081 }
2082 
2083 static struct task_struct *pick_task_dl(struct rq *rq)
2084 {
2085         struct sched_dl_entity *dl_se;
2086         struct dl_rq *dl_rq = &rq->dl;
2087         struct task_struct *p;
2088 
2089 again:
2090         if (!sched_dl_runnable(rq))
2091                 return NULL;
2092 
2093         dl_se = pick_next_dl_entity(dl_rq);
2094         WARN_ON_ONCE(!dl_se);
2095 
2096         if (dl_server(dl_se)) {
2097                 p = dl_se->server_pick(dl_se);
2098                 if (!p) {
2099                         WARN_ON_ONCE(1);
2100                         dl_se->dl_yielded = 1;
2101                         update_curr_dl_se(rq, dl_se, 0);
2102                         goto again;
2103                 }
2104                 p->dl_server = dl_se;
2105         } else {
2106                 p = dl_task_of(dl_se);
2107         }
2108 
2109         return p;
2110 }
2111 
2112 static struct task_struct *pick_next_task_dl(struct rq *rq)
2113 {
2114         struct task_struct *p;
2115 
2116         p = pick_task_dl(rq);
2117         if (!p)
2118                 return p;
2119 
2120         if (!p->dl_server)
2121                 set_next_task_dl(rq, p, true);
2122 
2123         if (hrtick_enabled(rq))
2124                 start_hrtick_dl(rq, &p->dl);
2125 
2126         return p;
2127 }
2128 
2129 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2130 {
2131         struct sched_dl_entity *dl_se = &p->dl;
2132         struct dl_rq *dl_rq = &rq->dl;
2133 
2134         if (on_dl_rq(&p->dl))
2135                 update_stats_wait_start_dl(dl_rq, dl_se);
2136 
2137         update_curr_dl(rq);
2138 
2139         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2140         if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2141                 enqueue_pushable_dl_task(rq, p);
2142 }
2143 
2144 /*
2145  * scheduler tick hitting a task of our scheduling class.
2146  *
2147  * NOTE: This function can be called remotely by the tick offload that
2148  * goes along full dynticks. Therefore no local assumption can be made
2149  * and everything must be accessed through the @rq and @curr passed in
2150  * parameters.
2151  */
2152 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2153 {
2154         update_curr_dl(rq);
2155 
2156         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2157         /*
2158          * Even when we have runtime, update_curr_dl() might have resulted in us
2159          * not being the leftmost task anymore. In that case NEED_RESCHED will
2160          * be set and schedule() will start a new hrtick for the next task.
2161          */
2162         if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2163             is_leftmost(&p->dl, &rq->dl))
2164                 start_hrtick_dl(rq, &p->dl);
2165 }
2166 
2167 static void task_fork_dl(struct task_struct *p)
2168 {
2169         /*
2170          * SCHED_DEADLINE tasks cannot fork and this is achieved through
2171          * sched_fork()
2172          */
2173 }
2174 
2175 #ifdef CONFIG_SMP
2176 
2177 /* Only try algorithms three times */
2178 #define DL_MAX_TRIES 3
2179 
2180 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2181 {
2182         if (!task_on_cpu(rq, p) &&
2183             cpumask_test_cpu(cpu, &p->cpus_mask))
2184                 return 1;
2185         return 0;
2186 }
2187 
2188 /*
2189  * Return the earliest pushable rq's task, which is suitable to be executed
2190  * on the CPU, NULL otherwise:
2191  */
2192 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2193 {
2194         struct task_struct *p = NULL;
2195         struct rb_node *next_node;
2196 
2197         if (!has_pushable_dl_tasks(rq))
2198                 return NULL;
2199 
2200         next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2201 
2202 next_node:
2203         if (next_node) {
2204                 p = __node_2_pdl(next_node);
2205 
2206                 if (pick_dl_task(rq, p, cpu))
2207                         return p;
2208 
2209                 next_node = rb_next(next_node);
2210                 goto next_node;
2211         }
2212 
2213         return NULL;
2214 }
2215 
2216 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2217 
2218 static int find_later_rq(struct task_struct *task)
2219 {
2220         struct sched_domain *sd;
2221         struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2222         int this_cpu = smp_processor_id();
2223         int cpu = task_cpu(task);
2224 
2225         /* Make sure the mask is initialized first */
2226         if (unlikely(!later_mask))
2227                 return -1;
2228 
2229         if (task->nr_cpus_allowed == 1)
2230                 return -1;
2231 
2232         /*
2233          * We have to consider system topology and task affinity
2234          * first, then we can look for a suitable CPU.
2235          */
2236         if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2237                 return -1;
2238 
2239         /*
2240          * If we are here, some targets have been found, including
2241          * the most suitable which is, among the runqueues where the
2242          * current tasks have later deadlines than the task's one, the
2243          * rq with the latest possible one.
2244          *
2245          * Now we check how well this matches with task's
2246          * affinity and system topology.
2247          *
2248          * The last CPU where the task run is our first
2249          * guess, since it is most likely cache-hot there.
2250          */
2251         if (cpumask_test_cpu(cpu, later_mask))
2252                 return cpu;
2253         /*
2254          * Check if this_cpu is to be skipped (i.e., it is
2255          * not in the mask) or not.
2256          */
2257         if (!cpumask_test_cpu(this_cpu, later_mask))
2258                 this_cpu = -1;
2259 
2260         rcu_read_lock();
2261         for_each_domain(cpu, sd) {
2262                 if (sd->flags & SD_WAKE_AFFINE) {
2263                         int best_cpu;
2264 
2265                         /*
2266                          * If possible, preempting this_cpu is
2267                          * cheaper than migrating.
2268                          */
2269                         if (this_cpu != -1 &&
2270                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2271                                 rcu_read_unlock();
2272                                 return this_cpu;
2273                         }
2274 
2275                         best_cpu = cpumask_any_and_distribute(later_mask,
2276                                                               sched_domain_span(sd));
2277                         /*
2278                          * Last chance: if a CPU being in both later_mask
2279                          * and current sd span is valid, that becomes our
2280                          * choice. Of course, the latest possible CPU is
2281                          * already under consideration through later_mask.
2282                          */
2283                         if (best_cpu < nr_cpu_ids) {
2284                                 rcu_read_unlock();
2285                                 return best_cpu;
2286                         }
2287                 }
2288         }
2289         rcu_read_unlock();
2290 
2291         /*
2292          * At this point, all our guesses failed, we just return
2293          * 'something', and let the caller sort the things out.
2294          */
2295         if (this_cpu != -1)
2296                 return this_cpu;
2297 
2298         cpu = cpumask_any_distribute(later_mask);
2299         if (cpu < nr_cpu_ids)
2300                 return cpu;
2301 
2302         return -1;
2303 }
2304 
2305 /* Locks the rq it finds */
2306 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2307 {
2308         struct rq *later_rq = NULL;
2309         int tries;
2310         int cpu;
2311 
2312         for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2313                 cpu = find_later_rq(task);
2314 
2315                 if ((cpu == -1) || (cpu == rq->cpu))
2316                         break;
2317 
2318                 later_rq = cpu_rq(cpu);
2319 
2320                 if (!dl_task_is_earliest_deadline(task, later_rq)) {
2321                         /*
2322                          * Target rq has tasks of equal or earlier deadline,
2323                          * retrying does not release any lock and is unlikely
2324                          * to yield a different result.
2325                          */
2326                         later_rq = NULL;
2327                         break;
2328                 }
2329 
2330                 /* Retry if something changed. */
2331                 if (double_lock_balance(rq, later_rq)) {
2332                         if (unlikely(task_rq(task) != rq ||
2333                                      !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2334                                      task_on_cpu(rq, task) ||
2335                                      !dl_task(task) ||
2336                                      is_migration_disabled(task) ||
2337                                      !task_on_rq_queued(task))) {
2338                                 double_unlock_balance(rq, later_rq);
2339                                 later_rq = NULL;
2340                                 break;
2341                         }
2342                 }
2343 
2344                 /*
2345                  * If the rq we found has no -deadline task, or
2346                  * its earliest one has a later deadline than our
2347                  * task, the rq is a good one.
2348                  */
2349                 if (dl_task_is_earliest_deadline(task, later_rq))
2350                         break;
2351 
2352                 /* Otherwise we try again. */
2353                 double_unlock_balance(rq, later_rq);
2354                 later_rq = NULL;
2355         }
2356 
2357         return later_rq;
2358 }
2359 
2360 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2361 {
2362         struct task_struct *p;
2363 
2364         if (!has_pushable_dl_tasks(rq))
2365                 return NULL;
2366 
2367         p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2368 
2369         WARN_ON_ONCE(rq->cpu != task_cpu(p));
2370         WARN_ON_ONCE(task_current(rq, p));
2371         WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2372 
2373         WARN_ON_ONCE(!task_on_rq_queued(p));
2374         WARN_ON_ONCE(!dl_task(p));
2375 
2376         return p;
2377 }
2378 
2379 /*
2380  * See if the non running -deadline tasks on this rq
2381  * can be sent to some other CPU where they can preempt
2382  * and start executing.
2383  */
2384 static int push_dl_task(struct rq *rq)
2385 {
2386         struct task_struct *next_task;
2387         struct rq *later_rq;
2388         int ret = 0;
2389 
2390         next_task = pick_next_pushable_dl_task(rq);
2391         if (!next_task)
2392                 return 0;
2393 
2394 retry:
2395         /*
2396          * If next_task preempts rq->curr, and rq->curr
2397          * can move away, it makes sense to just reschedule
2398          * without going further in pushing next_task.
2399          */
2400         if (dl_task(rq->curr) &&
2401             dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2402             rq->curr->nr_cpus_allowed > 1) {
2403                 resched_curr(rq);
2404                 return 0;
2405         }
2406 
2407         if (is_migration_disabled(next_task))
2408                 return 0;
2409 
2410         if (WARN_ON(next_task == rq->curr))
2411                 return 0;
2412 
2413         /* We might release rq lock */
2414         get_task_struct(next_task);
2415 
2416         /* Will lock the rq it'll find */
2417         later_rq = find_lock_later_rq(next_task, rq);
2418         if (!later_rq) {
2419                 struct task_struct *task;
2420 
2421                 /*
2422                  * We must check all this again, since
2423                  * find_lock_later_rq releases rq->lock and it is
2424                  * then possible that next_task has migrated.
2425                  */
2426                 task = pick_next_pushable_dl_task(rq);
2427                 if (task == next_task) {
2428                         /*
2429                          * The task is still there. We don't try
2430                          * again, some other CPU will pull it when ready.
2431                          */
2432                         goto out;
2433                 }
2434 
2435                 if (!task)
2436                         /* No more tasks */
2437                         goto out;
2438 
2439                 put_task_struct(next_task);
2440                 next_task = task;
2441                 goto retry;
2442         }
2443 
2444         deactivate_task(rq, next_task, 0);
2445         set_task_cpu(next_task, later_rq->cpu);
2446         activate_task(later_rq, next_task, 0);
2447         ret = 1;
2448 
2449         resched_curr(later_rq);
2450 
2451         double_unlock_balance(rq, later_rq);
2452 
2453 out:
2454         put_task_struct(next_task);
2455 
2456         return ret;
2457 }
2458 
2459 static void push_dl_tasks(struct rq *rq)
2460 {
2461         /* push_dl_task() will return true if it moved a -deadline task */
2462         while (push_dl_task(rq))
2463                 ;
2464 }
2465 
2466 static void pull_dl_task(struct rq *this_rq)
2467 {
2468         int this_cpu = this_rq->cpu, cpu;
2469         struct task_struct *p, *push_task;
2470         bool resched = false;
2471         struct rq *src_rq;
2472         u64 dmin = LONG_MAX;
2473 
2474         if (likely(!dl_overloaded(this_rq)))
2475                 return;
2476 
2477         /*
2478          * Match the barrier from dl_set_overloaded; this guarantees that if we
2479          * see overloaded we must also see the dlo_mask bit.
2480          */
2481         smp_rmb();
2482 
2483         for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2484                 if (this_cpu == cpu)
2485                         continue;
2486 
2487                 src_rq = cpu_rq(cpu);
2488 
2489                 /*
2490                  * It looks racy, and it is! However, as in sched_rt.c,
2491                  * we are fine with this.
2492                  */
2493                 if (this_rq->dl.dl_nr_running &&
2494                     dl_time_before(this_rq->dl.earliest_dl.curr,
2495                                    src_rq->dl.earliest_dl.next))
2496                         continue;
2497 
2498                 /* Might drop this_rq->lock */
2499                 push_task = NULL;
2500                 double_lock_balance(this_rq, src_rq);
2501 
2502                 /*
2503                  * If there are no more pullable tasks on the
2504                  * rq, we're done with it.
2505                  */
2506                 if (src_rq->dl.dl_nr_running <= 1)
2507                         goto skip;
2508 
2509                 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2510 
2511                 /*
2512                  * We found a task to be pulled if:
2513                  *  - it preempts our current (if there's one),
2514                  *  - it will preempt the last one we pulled (if any).
2515                  */
2516                 if (p && dl_time_before(p->dl.deadline, dmin) &&
2517                     dl_task_is_earliest_deadline(p, this_rq)) {
2518                         WARN_ON(p == src_rq->curr);
2519                         WARN_ON(!task_on_rq_queued(p));
2520 
2521                         /*
2522                          * Then we pull iff p has actually an earlier
2523                          * deadline than the current task of its runqueue.
2524                          */
2525                         if (dl_time_before(p->dl.deadline,
2526                                            src_rq->curr->dl.deadline))
2527                                 goto skip;
2528 
2529                         if (is_migration_disabled(p)) {
2530                                 push_task = get_push_task(src_rq);
2531                         } else {
2532                                 deactivate_task(src_rq, p, 0);
2533                                 set_task_cpu(p, this_cpu);
2534                                 activate_task(this_rq, p, 0);
2535                                 dmin = p->dl.deadline;
2536                                 resched = true;
2537                         }
2538 
2539                         /* Is there any other task even earlier? */
2540                 }
2541 skip:
2542                 double_unlock_balance(this_rq, src_rq);
2543 
2544                 if (push_task) {
2545                         preempt_disable();
2546                         raw_spin_rq_unlock(this_rq);
2547                         stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2548                                             push_task, &src_rq->push_work);
2549                         preempt_enable();
2550                         raw_spin_rq_lock(this_rq);
2551                 }
2552         }
2553 
2554         if (resched)
2555                 resched_curr(this_rq);
2556 }
2557 
2558 /*
2559  * Since the task is not running and a reschedule is not going to happen
2560  * anytime soon on its runqueue, we try pushing it away now.
2561  */
2562 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2563 {
2564         if (!task_on_cpu(rq, p) &&
2565             !test_tsk_need_resched(rq->curr) &&
2566             p->nr_cpus_allowed > 1 &&
2567             dl_task(rq->curr) &&
2568             (rq->curr->nr_cpus_allowed < 2 ||
2569              !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2570                 push_dl_tasks(rq);
2571         }
2572 }
2573 
2574 static void set_cpus_allowed_dl(struct task_struct *p,
2575                                 struct affinity_context *ctx)
2576 {
2577         struct root_domain *src_rd;
2578         struct rq *rq;
2579 
2580         WARN_ON_ONCE(!dl_task(p));
2581 
2582         rq = task_rq(p);
2583         src_rd = rq->rd;
2584         /*
2585          * Migrating a SCHED_DEADLINE task between exclusive
2586          * cpusets (different root_domains) entails a bandwidth
2587          * update. We already made space for us in the destination
2588          * domain (see cpuset_can_attach()).
2589          */
2590         if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
2591                 struct dl_bw *src_dl_b;
2592 
2593                 src_dl_b = dl_bw_of(cpu_of(rq));
2594                 /*
2595                  * We now free resources of the root_domain we are migrating
2596                  * off. In the worst case, sched_setattr() may temporary fail
2597                  * until we complete the update.
2598                  */
2599                 raw_spin_lock(&src_dl_b->lock);
2600                 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2601                 raw_spin_unlock(&src_dl_b->lock);
2602         }
2603 
2604         set_cpus_allowed_common(p, ctx);
2605 }
2606 
2607 /* Assumes rq->lock is held */
2608 static void rq_online_dl(struct rq *rq)
2609 {
2610         if (rq->dl.overloaded)
2611                 dl_set_overload(rq);
2612 
2613         cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2614         if (rq->dl.dl_nr_running > 0)
2615                 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2616 }
2617 
2618 /* Assumes rq->lock is held */
2619 static void rq_offline_dl(struct rq *rq)
2620 {
2621         if (rq->dl.overloaded)
2622                 dl_clear_overload(rq);
2623 
2624         cpudl_clear(&rq->rd->cpudl, rq->cpu);
2625         cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2626 }
2627 
2628 void __init init_sched_dl_class(void)
2629 {
2630         unsigned int i;
2631 
2632         for_each_possible_cpu(i)
2633                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2634                                         GFP_KERNEL, cpu_to_node(i));
2635 }
2636 
2637 void dl_add_task_root_domain(struct task_struct *p)
2638 {
2639         struct rq_flags rf;
2640         struct rq *rq;
2641         struct dl_bw *dl_b;
2642 
2643         raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2644         if (!dl_task(p)) {
2645                 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2646                 return;
2647         }
2648 
2649         rq = __task_rq_lock(p, &rf);
2650 
2651         dl_b = &rq->rd->dl_bw;
2652         raw_spin_lock(&dl_b->lock);
2653 
2654         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2655 
2656         raw_spin_unlock(&dl_b->lock);
2657 
2658         task_rq_unlock(rq, p, &rf);
2659 }
2660 
2661 void dl_clear_root_domain(struct root_domain *rd)
2662 {
2663         unsigned long flags;
2664 
2665         raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2666         rd->dl_bw.total_bw = 0;
2667         raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2668 }
2669 
2670 #endif /* CONFIG_SMP */
2671 
2672 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2673 {
2674         /*
2675          * task_non_contending() can start the "inactive timer" (if the 0-lag
2676          * time is in the future). If the task switches back to dl before
2677          * the "inactive timer" fires, it can continue to consume its current
2678          * runtime using its current deadline. If it stays outside of
2679          * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2680          * will reset the task parameters.
2681          */
2682         if (task_on_rq_queued(p) && p->dl.dl_runtime)
2683                 task_non_contending(&p->dl);
2684 
2685         /*
2686          * In case a task is setscheduled out from SCHED_DEADLINE we need to
2687          * keep track of that on its cpuset (for correct bandwidth tracking).
2688          */
2689         dec_dl_tasks_cs(p);
2690 
2691         if (!task_on_rq_queued(p)) {
2692                 /*
2693                  * Inactive timer is armed. However, p is leaving DEADLINE and
2694                  * might migrate away from this rq while continuing to run on
2695                  * some other class. We need to remove its contribution from
2696                  * this rq running_bw now, or sub_rq_bw (below) will complain.
2697                  */
2698                 if (p->dl.dl_non_contending)
2699                         sub_running_bw(&p->dl, &rq->dl);
2700                 sub_rq_bw(&p->dl, &rq->dl);
2701         }
2702 
2703         /*
2704          * We cannot use inactive_task_timer() to invoke sub_running_bw()
2705          * at the 0-lag time, because the task could have been migrated
2706          * while SCHED_OTHER in the meanwhile.
2707          */
2708         if (p->dl.dl_non_contending)
2709                 p->dl.dl_non_contending = 0;
2710 
2711         /*
2712          * Since this might be the only -deadline task on the rq,
2713          * this is the right place to try to pull some other one
2714          * from an overloaded CPU, if any.
2715          */
2716         if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2717                 return;
2718 
2719         deadline_queue_pull_task(rq);
2720 }
2721 
2722 /*
2723  * When switching to -deadline, we may overload the rq, then
2724  * we try to push someone off, if possible.
2725  */
2726 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2727 {
2728         if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2729                 put_task_struct(p);
2730 
2731         /*
2732          * In case a task is setscheduled to SCHED_DEADLINE we need to keep
2733          * track of that on its cpuset (for correct bandwidth tracking).
2734          */
2735         inc_dl_tasks_cs(p);
2736 
2737         /* If p is not queued we will update its parameters at next wakeup. */
2738         if (!task_on_rq_queued(p)) {
2739                 add_rq_bw(&p->dl, &rq->dl);
2740 
2741                 return;
2742         }
2743 
2744         if (rq->curr != p) {
2745 #ifdef CONFIG_SMP
2746                 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2747                         deadline_queue_push_tasks(rq);
2748 #endif
2749                 if (dl_task(rq->curr))
2750                         wakeup_preempt_dl(rq, p, 0);
2751                 else
2752                         resched_curr(rq);
2753         } else {
2754                 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2755         }
2756 }
2757 
2758 /*
2759  * If the scheduling parameters of a -deadline task changed,
2760  * a push or pull operation might be needed.
2761  */
2762 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2763                             int oldprio)
2764 {
2765         if (!task_on_rq_queued(p))
2766                 return;
2767 
2768 #ifdef CONFIG_SMP
2769         /*
2770          * This might be too much, but unfortunately
2771          * we don't have the old deadline value, and
2772          * we can't argue if the task is increasing
2773          * or lowering its prio, so...
2774          */
2775         if (!rq->dl.overloaded)
2776                 deadline_queue_pull_task(rq);
2777 
2778         if (task_current(rq, p)) {
2779                 /*
2780                  * If we now have a earlier deadline task than p,
2781                  * then reschedule, provided p is still on this
2782                  * runqueue.
2783                  */
2784                 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2785                         resched_curr(rq);
2786         } else {
2787                 /*
2788                  * Current may not be deadline in case p was throttled but we
2789                  * have just replenished it (e.g. rt_mutex_setprio()).
2790                  *
2791                  * Otherwise, if p was given an earlier deadline, reschedule.
2792                  */
2793                 if (!dl_task(rq->curr) ||
2794                     dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
2795                         resched_curr(rq);
2796         }
2797 #else
2798         /*
2799          * We don't know if p has a earlier or later deadline, so let's blindly
2800          * set a (maybe not needed) rescheduling point.
2801          */
2802         resched_curr(rq);
2803 #endif
2804 }
2805 
2806 #ifdef CONFIG_SCHED_CORE
2807 static int task_is_throttled_dl(struct task_struct *p, int cpu)
2808 {
2809         return p->dl.dl_throttled;
2810 }
2811 #endif
2812 
2813 DEFINE_SCHED_CLASS(dl) = {
2814 
2815         .enqueue_task           = enqueue_task_dl,
2816         .dequeue_task           = dequeue_task_dl,
2817         .yield_task             = yield_task_dl,
2818 
2819         .wakeup_preempt         = wakeup_preempt_dl,
2820 
2821         .pick_next_task         = pick_next_task_dl,
2822         .put_prev_task          = put_prev_task_dl,
2823         .set_next_task          = set_next_task_dl,
2824 
2825 #ifdef CONFIG_SMP
2826         .balance                = balance_dl,
2827         .pick_task              = pick_task_dl,
2828         .select_task_rq         = select_task_rq_dl,
2829         .migrate_task_rq        = migrate_task_rq_dl,
2830         .set_cpus_allowed       = set_cpus_allowed_dl,
2831         .rq_online              = rq_online_dl,
2832         .rq_offline             = rq_offline_dl,
2833         .task_woken             = task_woken_dl,
2834         .find_lock_rq           = find_lock_later_rq,
2835 #endif
2836 
2837         .task_tick              = task_tick_dl,
2838         .task_fork              = task_fork_dl,
2839 
2840         .prio_changed           = prio_changed_dl,
2841         .switched_from          = switched_from_dl,
2842         .switched_to            = switched_to_dl,
2843 
2844         .update_curr            = update_curr_dl,
2845 #ifdef CONFIG_SCHED_CORE
2846         .task_is_throttled      = task_is_throttled_dl,
2847 #endif
2848 };
2849 
2850 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2851 static u64 dl_generation;
2852 
2853 int sched_dl_global_validate(void)
2854 {
2855         u64 runtime = global_rt_runtime();
2856         u64 period = global_rt_period();
2857         u64 new_bw = to_ratio(period, runtime);
2858         u64 gen = ++dl_generation;
2859         struct dl_bw *dl_b;
2860         int cpu, cpus, ret = 0;
2861         unsigned long flags;
2862 
2863         /*
2864          * Here we want to check the bandwidth not being set to some
2865          * value smaller than the currently allocated bandwidth in
2866          * any of the root_domains.
2867          */
2868         for_each_possible_cpu(cpu) {
2869                 rcu_read_lock_sched();
2870 
2871                 if (dl_bw_visited(cpu, gen))
2872                         goto next;
2873 
2874                 dl_b = dl_bw_of(cpu);
2875                 cpus = dl_bw_cpus(cpu);
2876 
2877                 raw_spin_lock_irqsave(&dl_b->lock, flags);
2878                 if (new_bw * cpus < dl_b->total_bw)
2879                         ret = -EBUSY;
2880                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2881 
2882 next:
2883                 rcu_read_unlock_sched();
2884 
2885                 if (ret)
2886                         break;
2887         }
2888 
2889         return ret;
2890 }
2891 
2892 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2893 {
2894         if (global_rt_runtime() == RUNTIME_INF) {
2895                 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2896                 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
2897         } else {
2898                 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2899                           global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2900                 dl_rq->max_bw = dl_rq->extra_bw =
2901                         to_ratio(global_rt_period(), global_rt_runtime());
2902         }
2903 }
2904 
2905 void sched_dl_do_global(void)
2906 {
2907         u64 new_bw = -1;
2908         u64 gen = ++dl_generation;
2909         struct dl_bw *dl_b;
2910         int cpu;
2911         unsigned long flags;
2912 
2913         if (global_rt_runtime() != RUNTIME_INF)
2914                 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2915 
2916         for_each_possible_cpu(cpu) {
2917                 rcu_read_lock_sched();
2918 
2919                 if (dl_bw_visited(cpu, gen)) {
2920                         rcu_read_unlock_sched();
2921                         continue;
2922                 }
2923 
2924                 dl_b = dl_bw_of(cpu);
2925 
2926                 raw_spin_lock_irqsave(&dl_b->lock, flags);
2927                 dl_b->bw = new_bw;
2928                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2929 
2930                 rcu_read_unlock_sched();
2931                 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2932         }
2933 }
2934 
2935 /*
2936  * We must be sure that accepting a new task (or allowing changing the
2937  * parameters of an existing one) is consistent with the bandwidth
2938  * constraints. If yes, this function also accordingly updates the currently
2939  * allocated bandwidth to reflect the new situation.
2940  *
2941  * This function is called while holding p's rq->lock.
2942  */
2943 int sched_dl_overflow(struct task_struct *p, int policy,
2944                       const struct sched_attr *attr)
2945 {
2946         u64 period = attr->sched_period ?: attr->sched_deadline;
2947         u64 runtime = attr->sched_runtime;
2948         u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2949         int cpus, err = -1, cpu = task_cpu(p);
2950         struct dl_bw *dl_b = dl_bw_of(cpu);
2951         unsigned long cap;
2952 
2953         if (attr->sched_flags & SCHED_FLAG_SUGOV)
2954                 return 0;
2955 
2956         /* !deadline task may carry old deadline bandwidth */
2957         if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2958                 return 0;
2959 
2960         /*
2961          * Either if a task, enters, leave, or stays -deadline but changes
2962          * its parameters, we may need to update accordingly the total
2963          * allocated bandwidth of the container.
2964          */
2965         raw_spin_lock(&dl_b->lock);
2966         cpus = dl_bw_cpus(cpu);
2967         cap = dl_bw_capacity(cpu);
2968 
2969         if (dl_policy(policy) && !task_has_dl_policy(p) &&
2970             !__dl_overflow(dl_b, cap, 0, new_bw)) {
2971                 if (hrtimer_active(&p->dl.inactive_timer))
2972                         __dl_sub(dl_b, p->dl.dl_bw, cpus);
2973                 __dl_add(dl_b, new_bw, cpus);
2974                 err = 0;
2975         } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2976                    !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2977                 /*
2978                  * XXX this is slightly incorrect: when the task
2979                  * utilization decreases, we should delay the total
2980                  * utilization change until the task's 0-lag point.
2981                  * But this would require to set the task's "inactive
2982                  * timer" when the task is not inactive.
2983                  */
2984                 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2985                 __dl_add(dl_b, new_bw, cpus);
2986                 dl_change_utilization(p, new_bw);
2987                 err = 0;
2988         } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2989                 /*
2990                  * Do not decrease the total deadline utilization here,
2991                  * switched_from_dl() will take care to do it at the correct
2992                  * (0-lag) time.
2993                  */
2994                 err = 0;
2995         }
2996         raw_spin_unlock(&dl_b->lock);
2997 
2998         return err;
2999 }
3000 
3001 /*
3002  * This function initializes the sched_dl_entity of a newly becoming
3003  * SCHED_DEADLINE task.
3004  *
3005  * Only the static values are considered here, the actual runtime and the
3006  * absolute deadline will be properly calculated when the task is enqueued
3007  * for the first time with its new policy.
3008  */
3009 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3010 {
3011         struct sched_dl_entity *dl_se = &p->dl;
3012 
3013         dl_se->dl_runtime = attr->sched_runtime;
3014         dl_se->dl_deadline = attr->sched_deadline;
3015         dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3016         dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
3017         dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3018         dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3019 }
3020 
3021 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3022 {
3023         struct sched_dl_entity *dl_se = &p->dl;
3024 
3025         attr->sched_priority = p->rt_priority;
3026         attr->sched_runtime = dl_se->dl_runtime;
3027         attr->sched_deadline = dl_se->dl_deadline;
3028         attr->sched_period = dl_se->dl_period;
3029         attr->sched_flags &= ~SCHED_DL_FLAGS;
3030         attr->sched_flags |= dl_se->flags;
3031 }
3032 
3033 /*
3034  * This function validates the new parameters of a -deadline task.
3035  * We ask for the deadline not being zero, and greater or equal
3036  * than the runtime, as well as the period of being zero or
3037  * greater than deadline. Furthermore, we have to be sure that
3038  * user parameters are above the internal resolution of 1us (we
3039  * check sched_runtime only since it is always the smaller one) and
3040  * below 2^63 ns (we have to check both sched_deadline and
3041  * sched_period, as the latter can be zero).
3042  */
3043 bool __checkparam_dl(const struct sched_attr *attr)
3044 {
3045         u64 period, max, min;
3046 
3047         /* special dl tasks don't actually use any parameter */
3048         if (attr->sched_flags & SCHED_FLAG_SUGOV)
3049                 return true;
3050 
3051         /* deadline != 0 */
3052         if (attr->sched_deadline == 0)
3053                 return false;
3054 
3055         /*
3056          * Since we truncate DL_SCALE bits, make sure we're at least
3057          * that big.
3058          */
3059         if (attr->sched_runtime < (1ULL << DL_SCALE))
3060                 return false;
3061 
3062         /*
3063          * Since we use the MSB for wrap-around and sign issues, make
3064          * sure it's not set (mind that period can be equal to zero).
3065          */
3066         if (attr->sched_deadline & (1ULL << 63) ||
3067             attr->sched_period & (1ULL << 63))
3068                 return false;
3069 
3070         period = attr->sched_period;
3071         if (!period)
3072                 period = attr->sched_deadline;
3073 
3074         /* runtime <= deadline <= period (if period != 0) */
3075         if (period < attr->sched_deadline ||
3076             attr->sched_deadline < attr->sched_runtime)
3077                 return false;
3078 
3079         max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3080         min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3081 
3082         if (period < min || period > max)
3083                 return false;
3084 
3085         return true;
3086 }
3087 
3088 /*
3089  * This function clears the sched_dl_entity static params.
3090  */
3091 static void __dl_clear_params(struct sched_dl_entity *dl_se)
3092 {
3093         dl_se->dl_runtime               = 0;
3094         dl_se->dl_deadline              = 0;
3095         dl_se->dl_period                = 0;
3096         dl_se->flags                    = 0;
3097         dl_se->dl_bw                    = 0;
3098         dl_se->dl_density               = 0;
3099 
3100         dl_se->dl_throttled             = 0;
3101         dl_se->dl_yielded               = 0;
3102         dl_se->dl_non_contending        = 0;
3103         dl_se->dl_overrun               = 0;
3104         dl_se->dl_server                = 0;
3105 
3106 #ifdef CONFIG_RT_MUTEXES
3107         dl_se->pi_se                    = dl_se;
3108 #endif
3109 }
3110 
3111 void init_dl_entity(struct sched_dl_entity *dl_se)
3112 {
3113         RB_CLEAR_NODE(&dl_se->rb_node);
3114         init_dl_task_timer(dl_se);
3115         init_dl_inactive_task_timer(dl_se);
3116         __dl_clear_params(dl_se);
3117 }
3118 
3119 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3120 {
3121         struct sched_dl_entity *dl_se = &p->dl;
3122 
3123         if (dl_se->dl_runtime != attr->sched_runtime ||
3124             dl_se->dl_deadline != attr->sched_deadline ||
3125             dl_se->dl_period != attr->sched_period ||
3126             dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3127                 return true;
3128 
3129         return false;
3130 }
3131 
3132 #ifdef CONFIG_SMP
3133 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3134                                  const struct cpumask *trial)
3135 {
3136         unsigned long flags, cap;
3137         struct dl_bw *cur_dl_b;
3138         int ret = 1;
3139 
3140         rcu_read_lock_sched();
3141         cur_dl_b = dl_bw_of(cpumask_any(cur));
3142         cap = __dl_bw_capacity(trial);
3143         raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3144         if (__dl_overflow(cur_dl_b, cap, 0, 0))
3145                 ret = 0;
3146         raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3147         rcu_read_unlock_sched();
3148 
3149         return ret;
3150 }
3151 
3152 enum dl_bw_request {
3153         dl_bw_req_check_overflow = 0,
3154         dl_bw_req_alloc,
3155         dl_bw_req_free
3156 };
3157 
3158 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3159 {
3160         unsigned long flags;
3161         struct dl_bw *dl_b;
3162         bool overflow = 0;
3163 
3164         rcu_read_lock_sched();
3165         dl_b = dl_bw_of(cpu);
3166         raw_spin_lock_irqsave(&dl_b->lock, flags);
3167 
3168         if (req == dl_bw_req_free) {
3169                 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3170         } else {
3171                 unsigned long cap = dl_bw_capacity(cpu);
3172 
3173                 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3174 
3175                 if (req == dl_bw_req_alloc && !overflow) {
3176                         /*
3177                          * We reserve space in the destination
3178                          * root_domain, as we can't fail after this point.
3179                          * We will free resources in the source root_domain
3180                          * later on (see set_cpus_allowed_dl()).
3181                          */
3182                         __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3183                 }
3184         }
3185 
3186         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3187         rcu_read_unlock_sched();
3188 
3189         return overflow ? -EBUSY : 0;
3190 }
3191 
3192 int dl_bw_check_overflow(int cpu)
3193 {
3194         return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3195 }
3196 
3197 int dl_bw_alloc(int cpu, u64 dl_bw)
3198 {
3199         return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3200 }
3201 
3202 void dl_bw_free(int cpu, u64 dl_bw)
3203 {
3204         dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3205 }
3206 #endif
3207 
3208 #ifdef CONFIG_SCHED_DEBUG
3209 void print_dl_stats(struct seq_file *m, int cpu)
3210 {
3211         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3212 }
3213 #endif /* CONFIG_SCHED_DEBUG */
3214 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php